2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
10 #include <linux/dma-mapping.h>
12 #include "sti_compositor.h"
14 #include "sti_layer.h"
17 #define ENA_COLOR_FILL BIT(8)
18 #define WAIT_NEXT_VSYNC BIT(31)
20 /* GDP color formats */
21 #define GDP_RGB565 0x00
22 #define GDP_RGB888 0x01
23 #define GDP_RGB888_32 0x02
24 #define GDP_ARGB8565 0x04
25 #define GDP_ARGB8888 0x05
26 #define GDP_ARGB1555 0x06
27 #define GDP_ARGB4444 0x07
28 #define GDP_CLUT8 0x0B
29 #define GDP_YCBR888 0x10
30 #define GDP_YCBR422R 0x12
31 #define GDP_AYCBR8888 0x15
33 #define GAM_GDP_CTL_OFFSET 0x00
34 #define GAM_GDP_AGC_OFFSET 0x04
35 #define GAM_GDP_VPO_OFFSET 0x0C
36 #define GAM_GDP_VPS_OFFSET 0x10
37 #define GAM_GDP_PML_OFFSET 0x14
38 #define GAM_GDP_PMP_OFFSET 0x18
39 #define GAM_GDP_SIZE_OFFSET 0x1C
40 #define GAM_GDP_NVN_OFFSET 0x24
41 #define GAM_GDP_KEY1_OFFSET 0x28
42 #define GAM_GDP_KEY2_OFFSET 0x2C
43 #define GAM_GDP_PPT_OFFSET 0x34
44 #define GAM_GDP_CML_OFFSET 0x3C
45 #define GAM_GDP_MST_OFFSET 0x68
47 #define GAM_GDP_ALPHARANGE_255 BIT(5)
48 #define GAM_GDP_AGC_FULL_RANGE 0x00808080
49 #define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
50 #define GAM_GDP_SIZE_MAX 0x7FF
52 #define GDP_NODE_NB_BANK 2
53 #define GDP_NODE_PER_FIELD 2
74 struct sti_gdp_node_list {
75 struct sti_gdp_node *top_field;
76 dma_addr_t top_field_paddr;
77 struct sti_gdp_node *btm_field;
78 dma_addr_t btm_field_paddr;
84 * @layer: layer structure
85 * @clk_pix: pixel clock for the current gdp
86 * @clk_main_parent: gdp parent clock if main path used
87 * @clk_aux_parent: gdp parent clock if aux path used
88 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
89 * @is_curr_top: true if the current node processed is the top field
90 * @node_list: array of node list
93 struct sti_layer layer;
95 struct clk *clk_main_parent;
96 struct clk *clk_aux_parent;
97 struct notifier_block vtg_field_nb;
99 struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
102 #define to_sti_gdp(x) container_of(x, struct sti_gdp, layer)
104 static const uint32_t gdp_supported_formats[] = {
117 static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer)
119 return gdp_supported_formats;
122 static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer)
124 return ARRAY_SIZE(gdp_supported_formats);
127 static int sti_gdp_fourcc2format(int fourcc)
130 case DRM_FORMAT_XRGB8888:
131 return GDP_RGB888_32;
132 case DRM_FORMAT_ARGB8888:
134 case DRM_FORMAT_ARGB4444:
136 case DRM_FORMAT_ARGB1555:
138 case DRM_FORMAT_RGB565:
140 case DRM_FORMAT_RGB888:
142 case DRM_FORMAT_AYUV:
143 return GDP_AYCBR8888;
144 case DRM_FORMAT_YUV444:
146 case DRM_FORMAT_VYUY:
154 static int sti_gdp_get_alpharange(int format)
160 return GAM_GDP_ALPHARANGE_255;
166 * sti_gdp_get_free_nodes
169 * Look for a GDP node list that is not currently read by the HW.
172 * Pointer to the free GDP node list
174 static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
177 struct sti_gdp *gdp = to_sti_gdp(layer);
180 hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
184 for (i = 0; i < GDP_NODE_NB_BANK; i++)
185 if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
186 (hw_nvn != gdp->node_list[i].top_field_paddr))
187 return &gdp->node_list[i];
189 /* in hazardious cases restart with the first node */
190 DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
191 sti_layer_to_str(layer), hw_nvn);
194 return &gdp->node_list[0];
198 * sti_gdp_get_current_nodes
201 * Look for GDP nodes that are currently read by the HW.
204 * Pointer to the current GDP node list
207 struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
210 struct sti_gdp *gdp = to_sti_gdp(layer);
213 hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
217 for (i = 0; i < GDP_NODE_NB_BANK; i++)
218 if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
219 (hw_nvn == gdp->node_list[i].top_field_paddr))
220 return &gdp->node_list[i];
223 DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
224 hw_nvn, sti_layer_to_str(layer));
230 * sti_gdp_prepare_layer
232 * @first_prepare: true if it is the first time this function is called
234 * Update the free GDP node list according to the layer properties.
239 static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
241 struct sti_gdp_node_list *list;
242 struct sti_gdp_node *top_field, *btm_field;
243 struct drm_display_mode *mode = layer->mode;
244 struct device *dev = layer->dev;
245 struct sti_gdp *gdp = to_sti_gdp(layer);
246 struct sti_compositor *compo = dev_get_drvdata(dev);
248 unsigned int depth, bpp;
249 int rate = mode->clock * 1000;
251 u32 ydo, xdo, yds, xds;
253 list = sti_gdp_get_free_nodes(layer);
254 top_field = list->top_field;
255 btm_field = list->btm_field;
257 dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
258 sti_layer_to_str(layer), top_field, btm_field);
260 /* Build the top field from layer params */
261 top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
262 top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
263 format = sti_gdp_fourcc2format(layer->format);
265 DRM_ERROR("Format not supported by GDP %.4s\n",
266 (char *)&layer->format);
269 top_field->gam_gdp_ctl |= format;
270 top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
271 top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
273 /* pixel memory location */
274 drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
275 top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
276 top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
277 top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];
279 /* input parameters */
280 top_field->gam_gdp_pmp = layer->pitches[0];
281 top_field->gam_gdp_size =
282 clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
283 clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);
285 /* output parameters */
286 ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
287 yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
288 xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
289 xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
290 top_field->gam_gdp_vpo = (ydo << 16) | xdo;
291 top_field->gam_gdp_vps = (yds << 16) | xds;
293 /* Same content and chained together */
294 memcpy(btm_field, top_field, sizeof(*btm_field));
295 top_field->gam_gdp_nvn = list->btm_field_paddr;
296 btm_field->gam_gdp_nvn = list->top_field_paddr;
298 /* Interlaced mode */
299 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
300 btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
304 /* Register gdp callback */
305 if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
306 compo->vtg_main : compo->vtg_aux,
307 &gdp->vtg_field_nb, layer->mixer_id)) {
308 DRM_ERROR("Cannot register VTG notifier\n");
312 /* Set and enable gdp clock */
315 /* According to the mixer used, the gdp pixel clock
316 * should have a different parent clock. */
317 if (layer->mixer_id == STI_MIXER_MAIN)
318 clkp = gdp->clk_main_parent;
320 clkp = gdp->clk_aux_parent;
323 clk_set_parent(gdp->clk_pix, clkp);
325 res = clk_set_rate(gdp->clk_pix, rate);
327 DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
332 if (clk_prepare_enable(gdp->clk_pix)) {
333 DRM_ERROR("Failed to prepare/enable gdp\n");
343 * sti_gdp_commit_layer
346 * Update the NVN field of the 'right' field of the current GDP node (being
347 * used by the HW) with the address of the updated ('free') top field GDP node.
348 * - In interlaced mode the 'right' field is the bottom field as we update
349 * frames starting from their top field
350 * - In progressive mode, we update both bottom and top fields which are
352 * At the next VSYNC, the updated node list will be used by the HW.
357 static int sti_gdp_commit_layer(struct sti_layer *layer)
359 struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
360 struct sti_gdp_node *updated_top_node = updated_list->top_field;
361 struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
362 struct sti_gdp *gdp = to_sti_gdp(layer);
363 u32 dma_updated_top = updated_list->top_field_paddr;
364 u32 dma_updated_btm = updated_list->btm_field_paddr;
365 struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
367 dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
368 sti_layer_to_str(layer),
369 updated_top_node, updated_btm_node);
370 dev_dbg(layer->dev, "Current NVN:0x%X\n",
371 readl(layer->regs + GAM_GDP_NVN_OFFSET));
372 dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
373 (unsigned long)layer->paddr,
374 readl(layer->regs + GAM_GDP_PML_OFFSET));
376 if (curr_list == NULL) {
377 /* First update or invalid node should directly write in the
379 DRM_DEBUG_DRIVER("%s first update (or invalid node)",
380 sti_layer_to_str(layer));
382 writel(gdp->is_curr_top == true ?
383 dma_updated_btm : dma_updated_top,
384 layer->regs + GAM_GDP_NVN_OFFSET);
388 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
389 if (gdp->is_curr_top == true) {
390 /* Do not update in the middle of the frame, but
391 * postpone the update after the bottom field has
393 curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
395 /* Direct update to avoid one frame delay */
396 writel(dma_updated_top,
397 layer->regs + GAM_GDP_NVN_OFFSET);
400 /* Direct update for progressive to avoid one frame delay */
401 writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
408 * sti_gdp_disable_layer
416 static int sti_gdp_disable_layer(struct sti_layer *layer)
419 struct sti_gdp *gdp = to_sti_gdp(layer);
420 struct sti_compositor *compo = dev_get_drvdata(layer->dev);
422 DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
424 /* Set the nodes as 'to be ignored on mixer' */
425 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
426 gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
427 gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
430 if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ?
431 compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
432 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
435 clk_disable_unprepare(gdp->clk_pix);
442 * @nb: notifier block
443 * @event: event message
444 * @data: private data
446 * Handle VTG top field and bottom field event.
451 int sti_gdp_field_cb(struct notifier_block *nb,
452 unsigned long event, void *data)
454 struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
457 case VTG_TOP_FIELD_EVENT:
458 gdp->is_curr_top = true;
460 case VTG_BOTTOM_FIELD_EVENT:
461 gdp->is_curr_top = false;
464 DRM_ERROR("unsupported event: %lu\n", event);
471 static void sti_gdp_init(struct sti_layer *layer)
473 struct sti_gdp *gdp = to_sti_gdp(layer);
474 struct device_node *np = layer->dev->of_node;
477 unsigned int i, size;
479 /* Allocate all the nodes within a single memory page */
480 size = sizeof(struct sti_gdp_node) *
481 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
482 base = dma_alloc_writecombine(layer->dev,
483 size, &dma_addr, GFP_KERNEL | GFP_DMA);
486 DRM_ERROR("Failed to allocate memory for GDP node\n");
489 memset(base, 0, size);
491 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
492 if (dma_addr & 0xF) {
493 DRM_ERROR("Mem alignment failed\n");
496 gdp->node_list[i].top_field = base;
497 gdp->node_list[i].top_field_paddr = dma_addr;
499 DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
500 base += sizeof(struct sti_gdp_node);
501 dma_addr += sizeof(struct sti_gdp_node);
503 if (dma_addr & 0xF) {
504 DRM_ERROR("Mem alignment failed\n");
507 gdp->node_list[i].btm_field = base;
508 gdp->node_list[i].btm_field_paddr = dma_addr;
509 DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
510 base += sizeof(struct sti_gdp_node);
511 dma_addr += sizeof(struct sti_gdp_node);
514 if (of_device_is_compatible(np, "st,stih407-compositor")) {
515 /* GDP of STiH407 chip have its own pixel clock */
518 switch (layer->desc) {
520 clk_name = "pix_gdp1";
523 clk_name = "pix_gdp2";
526 clk_name = "pix_gdp3";
529 clk_name = "pix_gdp4";
532 DRM_ERROR("GDP id not recognized\n");
536 gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
537 if (IS_ERR(gdp->clk_pix))
538 DRM_ERROR("Cannot get %s clock\n", clk_name);
540 gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent");
541 if (IS_ERR(gdp->clk_main_parent))
542 DRM_ERROR("Cannot get main_parent clock\n");
544 gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent");
545 if (IS_ERR(gdp->clk_aux_parent))
546 DRM_ERROR("Cannot get aux_parent clock\n");
550 static const struct sti_layer_funcs gdp_ops = {
551 .get_formats = sti_gdp_get_formats,
552 .get_nb_formats = sti_gdp_get_nb_formats,
553 .init = sti_gdp_init,
554 .prepare = sti_gdp_prepare_layer,
555 .commit = sti_gdp_commit_layer,
556 .disable = sti_gdp_disable_layer,
559 struct sti_layer *sti_gdp_create(struct device *dev, int id)
563 gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
565 DRM_ERROR("Failed to allocate memory for GDP\n");
569 gdp->layer.ops = &gdp_ops;
570 gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
572 return (struct sti_layer *)gdp;