drm/msm/dsi: Set up link clocks for DSIv2
[deliverable/linux.git] / drivers / gpu / drm / msm / dsi / dsi_host.c
CommitLineData
a689554b
HL
1/*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/err.h>
17#include <linux/gpio.h>
964a0754 18#include <linux/gpio/consumer.h>
a689554b
HL
19#include <linux/interrupt.h>
20#include <linux/of_device.h>
21#include <linux/of_gpio.h>
22#include <linux/of_irq.h>
ab8909b0 23#include <linux/pinctrl/consumer.h>
f7009d26 24#include <linux/of_graph.h>
a689554b
HL
25#include <linux/regulator/consumer.h>
26#include <linux/spinlock.h>
27#include <video/mipi_display.h>
28
29#include "dsi.h"
30#include "dsi.xml.h"
d248b61f 31#include "dsi_cfg.h"
a689554b
HL
32
33static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
34{
35 u32 ver;
a689554b
HL
36
37 if (!major || !minor)
38 return -EINVAL;
39
648d5063
AT
40 /*
41 * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
a689554b 42 * makes all other registers 4-byte shifted down.
648d5063
AT
43 *
44 * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
45 * older, we read the DSI_VERSION register without any shift(offset
46 * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
47 * the case of DSI6G, this has to be zero (the offset points to a
48 * scratch register which we never touch)
a689554b 49 */
648d5063
AT
50
51 ver = msm_readl(base + REG_DSI_VERSION);
52 if (ver) {
53 /* older dsi host, there is no register shift */
a689554b
HL
54 ver = FIELD(ver, DSI_VERSION_MAJOR);
55 if (ver <= MSM_DSI_VER_MAJOR_V2) {
56 /* old versions */
57 *major = ver;
58 *minor = 0;
59 return 0;
60 } else {
61 return -EINVAL;
62 }
63 } else {
648d5063
AT
64 /*
65 * newer host, offset 0 has 6G_HW_VERSION, the rest of the
66 * registers are shifted down, read DSI_VERSION again with
67 * the shifted offset
68 */
a689554b
HL
69 ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
70 ver = FIELD(ver, DSI_VERSION_MAJOR);
71 if (ver == MSM_DSI_VER_MAJOR_6G) {
72 /* 6G version */
73 *major = ver;
648d5063 74 *minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
a689554b
HL
75 return 0;
76 } else {
77 return -EINVAL;
78 }
79 }
80}
81
82#define DSI_ERR_STATE_ACK 0x0000
83#define DSI_ERR_STATE_TIMEOUT 0x0001
84#define DSI_ERR_STATE_DLN0_PHY 0x0002
85#define DSI_ERR_STATE_FIFO 0x0004
86#define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008
87#define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010
88#define DSI_ERR_STATE_PLL_UNLOCKED 0x0020
89
90#define DSI_CLK_CTRL_ENABLE_CLKS \
91 (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
92 DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
93 DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
94 DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
95
96struct msm_dsi_host {
97 struct mipi_dsi_host base;
98
99 struct platform_device *pdev;
100 struct drm_device *dev;
101
102 int id;
103
104 void __iomem *ctrl_base;
ec31abf6 105 struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
6e0eb52e
AT
106
107 struct clk *bus_clks[DSI_BUS_CLK_MAX];
108
a689554b
HL
109 struct clk *byte_clk;
110 struct clk *esc_clk;
111 struct clk *pixel_clk;
9d32c498
HL
112 struct clk *byte_clk_src;
113 struct clk *pixel_clk_src;
114
a689554b 115 u32 byte_clk_rate;
4bfa9748
AT
116 u32 esc_clk_rate;
117
118 /* DSI v2 specific clocks */
119 struct clk *src_clk;
120 struct clk *esc_clk_src;
121 struct clk *dsi_clk_src;
122
123 u32 src_clk_rate;
a689554b
HL
124
125 struct gpio_desc *disp_en_gpio;
126 struct gpio_desc *te_gpio;
127
d248b61f 128 const struct msm_dsi_cfg_handler *cfg_hnd;
a689554b
HL
129
130 struct completion dma_comp;
131 struct completion video_comp;
132 struct mutex dev_mutex;
133 struct mutex cmd_mutex;
134 struct mutex clk_mutex;
135 spinlock_t intr_lock; /* Protect interrupt ctrl register */
136
137 u32 err_work_state;
138 struct work_struct err_work;
139 struct workqueue_struct *workqueue;
140
141 struct drm_gem_object *tx_gem_obj;
142 u8 *rx_buf;
143
144 struct drm_display_mode *mode;
145
a9ddac9c
AT
146 /* connected device info */
147 struct device_node *device_node;
a689554b
HL
148 unsigned int channel;
149 unsigned int lanes;
150 enum mipi_dsi_pixel_format format;
151 unsigned long mode_flags;
152
153 u32 dma_cmd_ctrl_restore;
154
155 bool registered;
156 bool power_on;
157 int irq;
158};
159
160static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
161{
162 switch (fmt) {
163 case MIPI_DSI_FMT_RGB565: return 16;
164 case MIPI_DSI_FMT_RGB666_PACKED: return 18;
165 case MIPI_DSI_FMT_RGB666:
166 case MIPI_DSI_FMT_RGB888:
167 default: return 24;
168 }
169}
170
171static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
172{
d248b61f 173 return msm_readl(msm_host->ctrl_base + reg);
a689554b
HL
174}
175static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
176{
d248b61f 177 msm_writel(data, msm_host->ctrl_base + reg);
a689554b
HL
178}
179
180static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
181static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
182
d248b61f
HL
183static const struct msm_dsi_cfg_handler *dsi_get_config(
184 struct msm_dsi_host *msm_host)
a689554b 185{
d248b61f 186 const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
31c92767 187 struct device *dev = &msm_host->pdev->dev;
a689554b 188 struct regulator *gdsc_reg;
31c92767 189 struct clk *ahb_clk;
d248b61f 190 int ret;
a689554b
HL
191 u32 major = 0, minor = 0;
192
31c92767 193 gdsc_reg = regulator_get(dev, "gdsc");
bdc80de2 194 if (IS_ERR(gdsc_reg)) {
a689554b 195 pr_err("%s: cannot get gdsc\n", __func__);
d248b61f 196 goto exit;
a689554b 197 }
31c92767
AT
198
199 ahb_clk = clk_get(dev, "iface_clk");
200 if (IS_ERR(ahb_clk)) {
201 pr_err("%s: cannot get interface clock\n", __func__);
202 goto put_gdsc;
203 }
204
a689554b
HL
205 ret = regulator_enable(gdsc_reg);
206 if (ret) {
207 pr_err("%s: unable to enable gdsc\n", __func__);
31c92767 208 goto put_clk;
a689554b 209 }
31c92767
AT
210
211 ret = clk_prepare_enable(ahb_clk);
a689554b
HL
212 if (ret) {
213 pr_err("%s: unable to enable ahb_clk\n", __func__);
d248b61f 214 goto disable_gdsc;
a689554b
HL
215 }
216
217 ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
a689554b
HL
218 if (ret) {
219 pr_err("%s: Invalid version\n", __func__);
d248b61f 220 goto disable_clks;
a689554b
HL
221 }
222
d248b61f 223 cfg_hnd = msm_dsi_cfg_get(major, minor);
a689554b 224
d248b61f
HL
225 DBG("%s: Version %x:%x\n", __func__, major, minor);
226
227disable_clks:
31c92767 228 clk_disable_unprepare(ahb_clk);
d248b61f
HL
229disable_gdsc:
230 regulator_disable(gdsc_reg);
31c92767
AT
231put_clk:
232 clk_put(ahb_clk);
d248b61f
HL
233put_gdsc:
234 regulator_put(gdsc_reg);
235exit:
236 return cfg_hnd;
a689554b
HL
237}
238
239static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
240{
241 return container_of(host, struct msm_dsi_host, base);
242}
243
244static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
245{
246 struct regulator_bulk_data *s = msm_host->supplies;
d248b61f
HL
247 const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
248 int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
a689554b
HL
249 int i;
250
251 DBG("");
252 for (i = num - 1; i >= 0; i--)
253 if (regs[i].disable_load >= 0)
2c33ce00
DA
254 regulator_set_load(s[i].consumer,
255 regs[i].disable_load);
a689554b
HL
256
257 regulator_bulk_disable(num, s);
258}
259
260static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
261{
262 struct regulator_bulk_data *s = msm_host->supplies;
d248b61f
HL
263 const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
264 int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
a689554b
HL
265 int ret, i;
266
267 DBG("");
268 for (i = 0; i < num; i++) {
269 if (regs[i].enable_load >= 0) {
2c33ce00
DA
270 ret = regulator_set_load(s[i].consumer,
271 regs[i].enable_load);
a689554b
HL
272 if (ret < 0) {
273 pr_err("regulator %d set op mode failed, %d\n",
274 i, ret);
275 goto fail;
276 }
277 }
278 }
279
280 ret = regulator_bulk_enable(num, s);
281 if (ret < 0) {
282 pr_err("regulator enable failed, %d\n", ret);
283 goto fail;
284 }
285
286 return 0;
287
288fail:
289 for (i--; i >= 0; i--)
2c33ce00 290 regulator_set_load(s[i].consumer, regs[i].disable_load);
a689554b
HL
291 return ret;
292}
293
294static int dsi_regulator_init(struct msm_dsi_host *msm_host)
295{
296 struct regulator_bulk_data *s = msm_host->supplies;
d248b61f
HL
297 const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
298 int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
a689554b
HL
299 int i, ret;
300
301 for (i = 0; i < num; i++)
302 s[i].supply = regs[i].name;
303
304 ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
305 if (ret < 0) {
306 pr_err("%s: failed to init regulator, ret=%d\n",
307 __func__, ret);
308 return ret;
309 }
310
311 for (i = 0; i < num; i++) {
556a76e5 312 if (regulator_can_change_voltage(s[i].consumer)) {
a689554b
HL
313 ret = regulator_set_voltage(s[i].consumer,
314 regs[i].min_voltage, regs[i].max_voltage);
315 if (ret < 0) {
316 pr_err("regulator %d set voltage failed, %d\n",
317 i, ret);
318 return ret;
319 }
320 }
321 }
322
323 return 0;
324}
325
326static int dsi_clk_init(struct msm_dsi_host *msm_host)
327{
328 struct device *dev = &msm_host->pdev->dev;
4bfa9748
AT
329 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
330 const struct msm_dsi_config *cfg = cfg_hnd->cfg;
6e0eb52e
AT
331 int i, ret = 0;
332
333 /* get bus clocks */
334 for (i = 0; i < cfg->num_bus_clks; i++) {
335 msm_host->bus_clks[i] = devm_clk_get(dev,
336 cfg->bus_clk_names[i]);
337 if (IS_ERR(msm_host->bus_clks[i])) {
338 ret = PTR_ERR(msm_host->bus_clks[i]);
339 pr_err("%s: Unable to get %s, ret = %d\n",
340 __func__, cfg->bus_clk_names[i], ret);
341 goto exit;
342 }
a689554b
HL
343 }
344
6e0eb52e 345 /* get link and source clocks */
a689554b
HL
346 msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
347 if (IS_ERR(msm_host->byte_clk)) {
348 ret = PTR_ERR(msm_host->byte_clk);
349 pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
350 __func__, ret);
351 msm_host->byte_clk = NULL;
352 goto exit;
353 }
354
355 msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
356 if (IS_ERR(msm_host->pixel_clk)) {
357 ret = PTR_ERR(msm_host->pixel_clk);
358 pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
359 __func__, ret);
360 msm_host->pixel_clk = NULL;
361 goto exit;
362 }
363
364 msm_host->esc_clk = devm_clk_get(dev, "core_clk");
365 if (IS_ERR(msm_host->esc_clk)) {
366 ret = PTR_ERR(msm_host->esc_clk);
367 pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
368 __func__, ret);
369 msm_host->esc_clk = NULL;
370 goto exit;
371 }
372
e6c4c78c
AT
373 msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
374 if (!msm_host->byte_clk_src) {
375 ret = -ENODEV;
9d32c498 376 pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
9d32c498
HL
377 goto exit;
378 }
379
e6c4c78c
AT
380 msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
381 if (!msm_host->pixel_clk_src) {
382 ret = -ENODEV;
9d32c498 383 pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
4bfa9748 384 goto exit;
9d32c498
HL
385 }
386
4bfa9748
AT
387 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
388 msm_host->src_clk = devm_clk_get(dev, "src_clk");
389 if (IS_ERR(msm_host->src_clk)) {
390 ret = PTR_ERR(msm_host->src_clk);
391 pr_err("%s: can't find dsi_src_clk. ret=%d\n",
392 __func__, ret);
393 msm_host->src_clk = NULL;
394 goto exit;
395 }
396
397 msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
398 if (!msm_host->esc_clk_src) {
399 ret = -ENODEV;
400 pr_err("%s: can't get esc_clk_src. ret=%d\n",
401 __func__, ret);
402 goto exit;
403 }
404
405 msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
406 if (!msm_host->dsi_clk_src) {
407 ret = -ENODEV;
408 pr_err("%s: can't get dsi_clk_src. ret=%d\n",
409 __func__, ret);
410 }
411 }
a689554b
HL
412exit:
413 return ret;
414}
415
416static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
417{
6e0eb52e
AT
418 const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
419 int i, ret;
a689554b
HL
420
421 DBG("id=%d", msm_host->id);
422
6e0eb52e
AT
423 for (i = 0; i < cfg->num_bus_clks; i++) {
424 ret = clk_prepare_enable(msm_host->bus_clks[i]);
425 if (ret) {
426 pr_err("%s: failed to enable bus clock %d ret %d\n",
427 __func__, i, ret);
428 goto err;
429 }
a689554b
HL
430 }
431
432 return 0;
6e0eb52e
AT
433err:
434 for (; i > 0; i--)
435 clk_disable_unprepare(msm_host->bus_clks[i]);
a689554b 436
a689554b
HL
437 return ret;
438}
439
440static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
441{
6e0eb52e
AT
442 const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
443 int i;
444
a689554b 445 DBG("");
6e0eb52e
AT
446
447 for (i = cfg->num_bus_clks - 1; i >= 0; i--)
448 clk_disable_unprepare(msm_host->bus_clks[i]);
a689554b
HL
449}
450
4bfa9748 451static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
a689554b
HL
452{
453 int ret;
454
455 DBG("Set clk rates: pclk=%d, byteclk=%d",
456 msm_host->mode->clock, msm_host->byte_clk_rate);
457
458 ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
459 if (ret) {
460 pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
461 goto error;
462 }
463
464 ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
465 if (ret) {
466 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
467 goto error;
468 }
469
470 ret = clk_prepare_enable(msm_host->esc_clk);
471 if (ret) {
472 pr_err("%s: Failed to enable dsi esc clk\n", __func__);
473 goto error;
474 }
475
476 ret = clk_prepare_enable(msm_host->byte_clk);
477 if (ret) {
478 pr_err("%s: Failed to enable dsi byte clk\n", __func__);
479 goto byte_clk_err;
480 }
481
482 ret = clk_prepare_enable(msm_host->pixel_clk);
483 if (ret) {
484 pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
485 goto pixel_clk_err;
486 }
487
488 return 0;
489
490pixel_clk_err:
491 clk_disable_unprepare(msm_host->byte_clk);
492byte_clk_err:
493 clk_disable_unprepare(msm_host->esc_clk);
494error:
495 return ret;
496}
497
4bfa9748 498static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
a689554b 499{
4bfa9748
AT
500 int ret;
501
502 DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
503 msm_host->mode->clock, msm_host->byte_clk_rate,
504 msm_host->esc_clk_rate, msm_host->src_clk_rate);
505
506 ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
507 if (ret) {
508 pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
509 goto error;
510 }
511
512 ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
513 if (ret) {
514 pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
515 goto error;
516 }
517
518 ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
519 if (ret) {
520 pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
521 goto error;
522 }
523
524 ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
525 if (ret) {
526 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
527 goto error;
528 }
529
530 ret = clk_prepare_enable(msm_host->byte_clk);
531 if (ret) {
532 pr_err("%s: Failed to enable dsi byte clk\n", __func__);
533 goto error;
534 }
535
536 ret = clk_prepare_enable(msm_host->esc_clk);
537 if (ret) {
538 pr_err("%s: Failed to enable dsi esc clk\n", __func__);
539 goto esc_clk_err;
540 }
541
542 ret = clk_prepare_enable(msm_host->src_clk);
543 if (ret) {
544 pr_err("%s: Failed to enable dsi src clk\n", __func__);
545 goto src_clk_err;
546 }
547
548 ret = clk_prepare_enable(msm_host->pixel_clk);
549 if (ret) {
550 pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
551 goto pixel_clk_err;
552 }
553
554 return 0;
555
556pixel_clk_err:
557 clk_disable_unprepare(msm_host->src_clk);
558src_clk_err:
a689554b 559 clk_disable_unprepare(msm_host->esc_clk);
4bfa9748 560esc_clk_err:
a689554b 561 clk_disable_unprepare(msm_host->byte_clk);
4bfa9748
AT
562error:
563 return ret;
564}
565
566static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
567{
568 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
569
570 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
571 return dsi_link_clk_enable_6g(msm_host);
572 else
573 return dsi_link_clk_enable_v2(msm_host);
574}
575
576static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
577{
578 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
579
580 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
581 clk_disable_unprepare(msm_host->esc_clk);
582 clk_disable_unprepare(msm_host->pixel_clk);
583 clk_disable_unprepare(msm_host->byte_clk);
584 } else {
585 clk_disable_unprepare(msm_host->pixel_clk);
586 clk_disable_unprepare(msm_host->src_clk);
587 clk_disable_unprepare(msm_host->esc_clk);
588 clk_disable_unprepare(msm_host->byte_clk);
589 }
a689554b
HL
590}
591
592static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
593{
594 int ret = 0;
595
596 mutex_lock(&msm_host->clk_mutex);
597 if (enable) {
598 ret = dsi_bus_clk_enable(msm_host);
599 if (ret) {
600 pr_err("%s: Can not enable bus clk, %d\n",
601 __func__, ret);
602 goto unlock_ret;
603 }
604 ret = dsi_link_clk_enable(msm_host);
605 if (ret) {
606 pr_err("%s: Can not enable link clk, %d\n",
607 __func__, ret);
608 dsi_bus_clk_disable(msm_host);
609 goto unlock_ret;
610 }
611 } else {
612 dsi_link_clk_disable(msm_host);
613 dsi_bus_clk_disable(msm_host);
614 }
615
616unlock_ret:
617 mutex_unlock(&msm_host->clk_mutex);
618 return ret;
619}
620
621static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
622{
623 struct drm_display_mode *mode = msm_host->mode;
4bfa9748 624 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
a689554b
HL
625 u8 lanes = msm_host->lanes;
626 u32 bpp = dsi_get_bpp(msm_host->format);
627 u32 pclk_rate;
628
629 if (!mode) {
630 pr_err("%s: mode not set\n", __func__);
631 return -EINVAL;
632 }
633
634 pclk_rate = mode->clock * 1000;
635 if (lanes > 0) {
636 msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
637 } else {
638 pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
639 msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
640 }
641
642 DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
643
4bfa9748
AT
644 msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
645
646 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
647 unsigned int esc_mhz, esc_div;
648 unsigned long byte_mhz;
649
650 msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
651
652 /*
653 * esc clock is byte clock followed by a 4 bit divider,
654 * we need to find an escape clock frequency within the
655 * mipi DSI spec range within the maximum divider limit
656 * We iterate here between an escape clock frequencey
657 * between 20 Mhz to 5 Mhz and pick up the first one
658 * that can be supported by our divider
659 */
660
661 byte_mhz = msm_host->byte_clk_rate / 1000000;
662
663 for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
664 esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
665
666 /*
667 * TODO: Ideally, we shouldn't know what sort of divider
668 * is available in mmss_cc, we're just assuming that
669 * it'll always be a 4 bit divider. Need to come up with
670 * a better way here.
671 */
672 if (esc_div >= 1 && esc_div <= 16)
673 break;
674 }
675
676 if (esc_mhz < 5)
677 return -EINVAL;
678
679 msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
680
681 DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
682 msm_host->src_clk_rate);
683 }
684
a689554b
HL
685 return 0;
686}
687
688static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
689{
690 DBG("");
691 dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
692 /* Make sure fully reset */
693 wmb();
694 udelay(1000);
695 dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
696 udelay(100);
697}
698
699static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
700{
701 u32 intr;
702 unsigned long flags;
703
704 spin_lock_irqsave(&msm_host->intr_lock, flags);
705 intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
706
707 if (enable)
708 intr |= mask;
709 else
710 intr &= ~mask;
711
712 DBG("intr=%x enable=%d", intr, enable);
713
714 dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
715 spin_unlock_irqrestore(&msm_host->intr_lock, flags);
716}
717
718static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
719{
720 if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
721 return BURST_MODE;
722 else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
723 return NON_BURST_SYNCH_PULSE;
724
725 return NON_BURST_SYNCH_EVENT;
726}
727
728static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
729 const enum mipi_dsi_pixel_format mipi_fmt)
730{
731 switch (mipi_fmt) {
732 case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888;
733 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE;
734 case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666;
735 case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565;
736 default: return VID_DST_FORMAT_RGB888;
737 }
738}
739
740static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
741 const enum mipi_dsi_pixel_format mipi_fmt)
742{
743 switch (mipi_fmt) {
744 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
745 case MIPI_DSI_FMT_RGB666_PACKED:
746 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
747 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
748 default: return CMD_DST_FORMAT_RGB888;
749 }
750}
751
752static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
753 u32 clk_pre, u32 clk_post)
754{
755 u32 flags = msm_host->mode_flags;
756 enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
d248b61f 757 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
a689554b
HL
758 u32 data = 0;
759
760 if (!enable) {
761 dsi_write(msm_host, REG_DSI_CTRL, 0);
762 return;
763 }
764
765 if (flags & MIPI_DSI_MODE_VIDEO) {
766 if (flags & MIPI_DSI_MODE_VIDEO_HSE)
767 data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
768 if (flags & MIPI_DSI_MODE_VIDEO_HFP)
769 data |= DSI_VID_CFG0_HFP_POWER_STOP;
770 if (flags & MIPI_DSI_MODE_VIDEO_HBP)
771 data |= DSI_VID_CFG0_HBP_POWER_STOP;
772 if (flags & MIPI_DSI_MODE_VIDEO_HSA)
773 data |= DSI_VID_CFG0_HSA_POWER_STOP;
774 /* Always set low power stop mode for BLLP
775 * to let command engine send packets
776 */
777 data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
778 DSI_VID_CFG0_BLLP_POWER_STOP;
779 data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
780 data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
781 data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
782 dsi_write(msm_host, REG_DSI_VID_CFG0, data);
783
784 /* Do not swap RGB colors */
785 data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
786 dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
787 } else {
788 /* Do not swap RGB colors */
789 data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
790 data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
791 dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
792
793 data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
794 DSI_CMD_CFG1_WR_MEM_CONTINUE(
795 MIPI_DCS_WRITE_MEMORY_CONTINUE);
796 /* Always insert DCS command */
797 data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
798 dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
799 }
800
801 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
802 DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
803 DSI_CMD_DMA_CTRL_LOW_POWER);
804
805 data = 0;
806 /* Always assume dedicated TE pin */
807 data |= DSI_TRIG_CTRL_TE;
808 data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
809 data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
810 data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
d248b61f
HL
811 if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
812 (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
a689554b
HL
813 data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
814 dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
815
816 data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
817 DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
818 dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
819
820 data = 0;
821 if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
822 data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
823 dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
824
825 /* allow only ack-err-status to generate interrupt */
826 dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
827
828 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
829
830 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
831
832 data = DSI_CTRL_CLK_EN;
833
834 DBG("lane number=%d", msm_host->lanes);
835 if (msm_host->lanes == 2) {
836 data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
837 /* swap lanes for 2-lane panel for better performance */
838 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
839 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
840 } else {
841 /* Take 4 lanes as default */
842 data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
843 DSI_CTRL_LANE3;
844 /* Do not swap lanes for 4-lane panel */
845 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
846 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
847 }
65c5e542
AT
848
849 if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
850 dsi_write(msm_host, REG_DSI_LANE_CTRL,
851 DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
852
a689554b
HL
853 data |= DSI_CTRL_ENABLE;
854
855 dsi_write(msm_host, REG_DSI_CTRL, data);
856}
857
858static void dsi_timing_setup(struct msm_dsi_host *msm_host)
859{
860 struct drm_display_mode *mode = msm_host->mode;
861 u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
862 u32 h_total = mode->htotal;
863 u32 v_total = mode->vtotal;
864 u32 hs_end = mode->hsync_end - mode->hsync_start;
865 u32 vs_end = mode->vsync_end - mode->vsync_start;
866 u32 ha_start = h_total - mode->hsync_start;
867 u32 ha_end = ha_start + mode->hdisplay;
868 u32 va_start = v_total - mode->vsync_start;
869 u32 va_end = va_start + mode->vdisplay;
870 u32 wc;
871
872 DBG("");
873
874 if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
875 dsi_write(msm_host, REG_DSI_ACTIVE_H,
876 DSI_ACTIVE_H_START(ha_start) |
877 DSI_ACTIVE_H_END(ha_end));
878 dsi_write(msm_host, REG_DSI_ACTIVE_V,
879 DSI_ACTIVE_V_START(va_start) |
880 DSI_ACTIVE_V_END(va_end));
881 dsi_write(msm_host, REG_DSI_TOTAL,
882 DSI_TOTAL_H_TOTAL(h_total - 1) |
883 DSI_TOTAL_V_TOTAL(v_total - 1));
884
885 dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
886 DSI_ACTIVE_HSYNC_START(hs_start) |
887 DSI_ACTIVE_HSYNC_END(hs_end));
888 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
889 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
890 DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
891 DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
892 } else { /* command mode */
893 /* image data and 1 byte write_memory_start cmd */
894 wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
895
896 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
897 DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
898 DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
899 msm_host->channel) |
900 DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
901 MIPI_DSI_DCS_LONG_WRITE));
902
903 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
904 DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
905 DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
906 }
907}
908
909static void dsi_sw_reset(struct msm_dsi_host *msm_host)
910{
911 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
912 wmb(); /* clocks need to be enabled before reset */
913
914 dsi_write(msm_host, REG_DSI_RESET, 1);
915 wmb(); /* make sure reset happen */
916 dsi_write(msm_host, REG_DSI_RESET, 0);
917}
918
919static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
920 bool video_mode, bool enable)
921{
922 u32 dsi_ctrl;
923
924 dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
925
926 if (!enable) {
927 dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
928 DSI_CTRL_CMD_MODE_EN);
929 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
930 DSI_IRQ_MASK_VIDEO_DONE, 0);
931 } else {
932 if (video_mode) {
933 dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
934 } else { /* command mode */
935 dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
936 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
937 }
938 dsi_ctrl |= DSI_CTRL_ENABLE;
939 }
940
941 dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
942}
943
944static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
945{
946 u32 data;
947
948 data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
949
950 if (mode == 0)
951 data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
952 else
953 data |= DSI_CMD_DMA_CTRL_LOW_POWER;
954
955 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
956}
957
958static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
959{
960 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
961
962 reinit_completion(&msm_host->video_comp);
963
964 wait_for_completion_timeout(&msm_host->video_comp,
965 msecs_to_jiffies(70));
966
967 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
968}
969
970static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
971{
972 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
973 return;
974
975 if (msm_host->power_on) {
976 dsi_wait4video_done(msm_host);
977 /* delay 4 ms to skip BLLP */
978 usleep_range(2000, 4000);
979 }
980}
981
982/* dsi_cmd */
983static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
984{
985 struct drm_device *dev = msm_host->dev;
986 int ret;
987 u32 iova;
988
989 mutex_lock(&dev->struct_mutex);
990 msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
991 if (IS_ERR(msm_host->tx_gem_obj)) {
992 ret = PTR_ERR(msm_host->tx_gem_obj);
993 pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
994 msm_host->tx_gem_obj = NULL;
995 mutex_unlock(&dev->struct_mutex);
996 return ret;
997 }
998
999 ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
1000 if (ret) {
1001 pr_err("%s: failed to get iova, %d\n", __func__, ret);
1002 return ret;
1003 }
1004 mutex_unlock(&dev->struct_mutex);
1005
1006 if (iova & 0x07) {
1007 pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
1008 return -EINVAL;
1009 }
1010
1011 return 0;
1012}
1013
1014static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
1015{
1016 struct drm_device *dev = msm_host->dev;
1017
1018 if (msm_host->tx_gem_obj) {
1019 msm_gem_put_iova(msm_host->tx_gem_obj, 0);
1020 mutex_lock(&dev->struct_mutex);
1021 msm_gem_free_object(msm_host->tx_gem_obj);
1022 msm_host->tx_gem_obj = NULL;
1023 mutex_unlock(&dev->struct_mutex);
1024 }
1025}
1026
1027/*
1028 * prepare cmd buffer to be txed
1029 */
1030static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
1031 const struct mipi_dsi_msg *msg)
1032{
1033 struct mipi_dsi_packet packet;
1034 int len;
1035 int ret;
1036 u8 *data;
1037
1038 ret = mipi_dsi_create_packet(&packet, msg);
1039 if (ret) {
1040 pr_err("%s: create packet failed, %d\n", __func__, ret);
1041 return ret;
1042 }
1043 len = (packet.size + 3) & (~0x3);
1044
1045 if (len > tx_gem->size) {
1046 pr_err("%s: packet size is too big\n", __func__);
1047 return -EINVAL;
1048 }
1049
1050 data = msm_gem_vaddr(tx_gem);
1051
1052 if (IS_ERR(data)) {
1053 ret = PTR_ERR(data);
1054 pr_err("%s: get vaddr failed, %d\n", __func__, ret);
1055 return ret;
1056 }
1057
1058 /* MSM specific command format in memory */
1059 data[0] = packet.header[1];
1060 data[1] = packet.header[2];
1061 data[2] = packet.header[0];
1062 data[3] = BIT(7); /* Last packet */
1063 if (mipi_dsi_packet_format_is_long(msg->type))
1064 data[3] |= BIT(6);
1065 if (msg->rx_buf && msg->rx_len)
1066 data[3] |= BIT(5);
1067
1068 /* Long packet */
1069 if (packet.payload && packet.payload_length)
1070 memcpy(data + 4, packet.payload, packet.payload_length);
1071
1072 /* Append 0xff to the end */
1073 if (packet.size < len)
1074 memset(data + packet.size, 0xff, len - packet.size);
1075
1076 return len;
1077}
1078
1079/*
1080 * dsi_short_read1_resp: 1 parameter
1081 */
1082static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1083{
1084 u8 *data = msg->rx_buf;
1085 if (data && (msg->rx_len >= 1)) {
1086 *data = buf[1]; /* strip out dcs type */
1087 return 1;
1088 } else {
981371f3 1089 pr_err("%s: read data does not match with rx_buf len %zu\n",
a689554b
HL
1090 __func__, msg->rx_len);
1091 return -EINVAL;
1092 }
1093}
1094
1095/*
1096 * dsi_short_read2_resp: 2 parameter
1097 */
1098static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1099{
1100 u8 *data = msg->rx_buf;
1101 if (data && (msg->rx_len >= 2)) {
1102 data[0] = buf[1]; /* strip out dcs type */
1103 data[1] = buf[2];
1104 return 2;
1105 } else {
981371f3 1106 pr_err("%s: read data does not match with rx_buf len %zu\n",
a689554b
HL
1107 __func__, msg->rx_len);
1108 return -EINVAL;
1109 }
1110}
1111
1112static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1113{
1114 /* strip out 4 byte dcs header */
1115 if (msg->rx_buf && msg->rx_len)
1116 memcpy(msg->rx_buf, buf + 4, msg->rx_len);
1117
1118 return msg->rx_len;
1119}
1120
1121
1122static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
1123{
1124 int ret;
1125 u32 iova;
1126 bool triggered;
1127
1128 ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
1129 if (ret) {
1130 pr_err("%s: failed to get iova: %d\n", __func__, ret);
1131 return ret;
1132 }
1133
1134 reinit_completion(&msm_host->dma_comp);
1135
1136 dsi_wait4video_eng_busy(msm_host);
1137
1138 triggered = msm_dsi_manager_cmd_xfer_trigger(
1139 msm_host->id, iova, len);
1140 if (triggered) {
1141 ret = wait_for_completion_timeout(&msm_host->dma_comp,
1142 msecs_to_jiffies(200));
1143 DBG("ret=%d", ret);
1144 if (ret == 0)
1145 ret = -ETIMEDOUT;
1146 else
1147 ret = len;
1148 } else
1149 ret = len;
1150
1151 return ret;
1152}
1153
1154static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
1155 u8 *buf, int rx_byte, int pkt_size)
1156{
1157 u32 *lp, *temp, data;
1158 int i, j = 0, cnt;
a689554b
HL
1159 u32 read_cnt;
1160 u8 reg[16];
1161 int repeated_bytes = 0;
1162 int buf_offset = buf - msm_host->rx_buf;
1163
1164 lp = (u32 *)buf;
1165 temp = (u32 *)reg;
1166 cnt = (rx_byte + 3) >> 2;
1167 if (cnt > 4)
1168 cnt = 4; /* 4 x 32 bits registers only */
1169
ec1936eb
HL
1170 if (rx_byte == 4)
1171 read_cnt = 4;
1172 else
1173 read_cnt = pkt_size + 6;
a689554b
HL
1174
1175 /*
1176 * In case of multiple reads from the panel, after the first read, there
1177 * is possibility that there are some bytes in the payload repeating in
1178 * the RDBK_DATA registers. Since we read all the parameters from the
1179 * panel right from the first byte for every pass. We need to skip the
1180 * repeating bytes and then append the new parameters to the rx buffer.
1181 */
1182 if (read_cnt > 16) {
1183 int bytes_shifted;
1184 /* Any data more than 16 bytes will be shifted out.
1185 * The temp read buffer should already contain these bytes.
1186 * The remaining bytes in read buffer are the repeated bytes.
1187 */
1188 bytes_shifted = read_cnt - 16;
1189 repeated_bytes = buf_offset - bytes_shifted;
1190 }
1191
1192 for (i = cnt - 1; i >= 0; i--) {
1193 data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
1194 *temp++ = ntohl(data); /* to host byte order */
1195 DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
1196 }
1197
1198 for (i = repeated_bytes; i < 16; i++)
1199 buf[j++] = reg[i];
1200
1201 return j;
1202}
1203
1204static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
1205 const struct mipi_dsi_msg *msg)
1206{
1207 int len, ret;
1208 int bllp_len = msm_host->mode->hdisplay *
1209 dsi_get_bpp(msm_host->format) / 8;
1210
1211 len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg);
1212 if (!len) {
1213 pr_err("%s: failed to add cmd type = 0x%x\n",
1214 __func__, msg->type);
1215 return -EINVAL;
1216 }
1217
1218 /* for video mode, do not send cmds more than
1219 * one pixel line, since it only transmit it
1220 * during BLLP.
1221 */
1222 /* TODO: if the command is sent in LP mode, the bit rate is only
1223 * half of esc clk rate. In this case, if the video is already
1224 * actively streaming, we need to check more carefully if the
1225 * command can be fit into one BLLP.
1226 */
1227 if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
1228 pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
1229 __func__, len);
1230 return -EINVAL;
1231 }
1232
1233 ret = dsi_cmd_dma_tx(msm_host, len);
1234 if (ret < len) {
1235 pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
1236 __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
1237 return -ECOMM;
1238 }
1239
1240 return len;
1241}
1242
1243static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
1244{
1245 u32 data0, data1;
1246
1247 data0 = dsi_read(msm_host, REG_DSI_CTRL);
1248 data1 = data0;
1249 data1 &= ~DSI_CTRL_ENABLE;
1250 dsi_write(msm_host, REG_DSI_CTRL, data1);
1251 /*
1252 * dsi controller need to be disabled before
1253 * clocks turned on
1254 */
1255 wmb();
1256
1257 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
1258 wmb(); /* make sure clocks enabled */
1259
1260 /* dsi controller can only be reset while clocks are running */
1261 dsi_write(msm_host, REG_DSI_RESET, 1);
1262 wmb(); /* make sure reset happen */
1263 dsi_write(msm_host, REG_DSI_RESET, 0);
1264 wmb(); /* controller out of reset */
1265 dsi_write(msm_host, REG_DSI_CTRL, data0);
1266 wmb(); /* make sure dsi controller enabled again */
1267}
1268
1269static void dsi_err_worker(struct work_struct *work)
1270{
1271 struct msm_dsi_host *msm_host =
1272 container_of(work, struct msm_dsi_host, err_work);
1273 u32 status = msm_host->err_work_state;
1274
ff431fa4 1275 pr_err_ratelimited("%s: status=%x\n", __func__, status);
a689554b
HL
1276 if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
1277 dsi_sw_reset_restore(msm_host);
1278
1279 /* It is safe to clear here because error irq is disabled. */
1280 msm_host->err_work_state = 0;
1281
1282 /* enable dsi error interrupt */
1283 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
1284}
1285
1286static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
1287{
1288 u32 status;
1289
1290 status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
1291
1292 if (status) {
1293 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
1294 /* Writing of an extra 0 needed to clear error bits */
1295 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
1296 msm_host->err_work_state |= DSI_ERR_STATE_ACK;
1297 }
1298}
1299
1300static void dsi_timeout_status(struct msm_dsi_host *msm_host)
1301{
1302 u32 status;
1303
1304 status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
1305
1306 if (status) {
1307 dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
1308 msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
1309 }
1310}
1311
1312static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
1313{
1314 u32 status;
1315
1316 status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
1317
01199361
AT
1318 if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
1319 DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
1320 DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
1321 DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
1322 DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
a689554b
HL
1323 dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
1324 msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
1325 }
1326}
1327
1328static void dsi_fifo_status(struct msm_dsi_host *msm_host)
1329{
1330 u32 status;
1331
1332 status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
1333
1334 /* fifo underflow, overflow */
1335 if (status) {
1336 dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
1337 msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
1338 if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
1339 msm_host->err_work_state |=
1340 DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
1341 }
1342}
1343
1344static void dsi_status(struct msm_dsi_host *msm_host)
1345{
1346 u32 status;
1347
1348 status = dsi_read(msm_host, REG_DSI_STATUS0);
1349
1350 if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
1351 dsi_write(msm_host, REG_DSI_STATUS0, status);
1352 msm_host->err_work_state |=
1353 DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
1354 }
1355}
1356
1357static void dsi_clk_status(struct msm_dsi_host *msm_host)
1358{
1359 u32 status;
1360
1361 status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
1362
1363 if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
1364 dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
1365 msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
1366 }
1367}
1368
1369static void dsi_error(struct msm_dsi_host *msm_host)
1370{
1371 /* disable dsi error interrupt */
1372 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
1373
1374 dsi_clk_status(msm_host);
1375 dsi_fifo_status(msm_host);
1376 dsi_ack_err_status(msm_host);
1377 dsi_timeout_status(msm_host);
1378 dsi_status(msm_host);
1379 dsi_dln0_phy_err(msm_host);
1380
1381 queue_work(msm_host->workqueue, &msm_host->err_work);
1382}
1383
1384static irqreturn_t dsi_host_irq(int irq, void *ptr)
1385{
1386 struct msm_dsi_host *msm_host = ptr;
1387 u32 isr;
1388 unsigned long flags;
1389
1390 if (!msm_host->ctrl_base)
1391 return IRQ_HANDLED;
1392
1393 spin_lock_irqsave(&msm_host->intr_lock, flags);
1394 isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
1395 dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
1396 spin_unlock_irqrestore(&msm_host->intr_lock, flags);
1397
1398 DBG("isr=0x%x, id=%d", isr, msm_host->id);
1399
1400 if (isr & DSI_IRQ_ERROR)
1401 dsi_error(msm_host);
1402
1403 if (isr & DSI_IRQ_VIDEO_DONE)
1404 complete(&msm_host->video_comp);
1405
1406 if (isr & DSI_IRQ_CMD_DMA_DONE)
1407 complete(&msm_host->dma_comp);
1408
1409 return IRQ_HANDLED;
1410}
1411
1412static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
1413 struct device *panel_device)
1414{
9590e69d
UKK
1415 msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
1416 "disp-enable",
1417 GPIOD_OUT_LOW);
a689554b
HL
1418 if (IS_ERR(msm_host->disp_en_gpio)) {
1419 DBG("cannot get disp-enable-gpios %ld",
1420 PTR_ERR(msm_host->disp_en_gpio));
9590e69d 1421 return PTR_ERR(msm_host->disp_en_gpio);
a689554b
HL
1422 }
1423
60d05cb4
AT
1424 msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
1425 GPIOD_IN);
a689554b
HL
1426 if (IS_ERR(msm_host->te_gpio)) {
1427 DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
9590e69d 1428 return PTR_ERR(msm_host->te_gpio);
a689554b
HL
1429 }
1430
1431 return 0;
1432}
1433
1434static int dsi_host_attach(struct mipi_dsi_host *host,
1435 struct mipi_dsi_device *dsi)
1436{
1437 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1438 int ret;
1439
1440 msm_host->channel = dsi->channel;
1441 msm_host->lanes = dsi->lanes;
1442 msm_host->format = dsi->format;
1443 msm_host->mode_flags = dsi->mode_flags;
1444
a9ddac9c 1445 WARN_ON(dsi->dev.of_node != msm_host->device_node);
a689554b
HL
1446
1447 /* Some gpios defined in panel DT need to be controlled by host */
1448 ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
1449 if (ret)
1450 return ret;
1451
1452 DBG("id=%d", msm_host->id);
1453 if (msm_host->dev)
1454 drm_helper_hpd_irq_event(msm_host->dev);
1455
1456 return 0;
1457}
1458
1459static int dsi_host_detach(struct mipi_dsi_host *host,
1460 struct mipi_dsi_device *dsi)
1461{
1462 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1463
a9ddac9c 1464 msm_host->device_node = NULL;
a689554b
HL
1465
1466 DBG("id=%d", msm_host->id);
1467 if (msm_host->dev)
1468 drm_helper_hpd_irq_event(msm_host->dev);
1469
1470 return 0;
1471}
1472
1473static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
1474 const struct mipi_dsi_msg *msg)
1475{
1476 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1477 int ret;
1478
1479 if (!msg || !msm_host->power_on)
1480 return -EINVAL;
1481
1482 mutex_lock(&msm_host->cmd_mutex);
1483 ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
1484 mutex_unlock(&msm_host->cmd_mutex);
1485
1486 return ret;
1487}
1488
1489static struct mipi_dsi_host_ops dsi_host_ops = {
1490 .attach = dsi_host_attach,
1491 .detach = dsi_host_detach,
1492 .transfer = dsi_host_transfer,
1493};
1494
f7009d26
AT
1495static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
1496{
1497 struct device *dev = &msm_host->pdev->dev;
1498 struct device_node *np = dev->of_node;
a9ddac9c 1499 struct device_node *endpoint, *device_node;
f7009d26
AT
1500 int ret;
1501
1502 ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
1503 if (ret) {
1504 dev_err(dev, "%s: host index not specified, ret=%d\n",
1505 __func__, ret);
1506 return ret;
1507 }
1508
1509 /*
1510 * Get the first endpoint node. In our case, dsi has one output port
1511 * to which the panel is connected. Don't return an error if a port
1512 * isn't defined. It's possible that there is nothing connected to
1513 * the dsi output.
1514 */
1515 endpoint = of_graph_get_next_endpoint(np, NULL);
1516 if (!endpoint) {
1517 dev_dbg(dev, "%s: no endpoint\n", __func__);
1518 return 0;
1519 }
1520
1521 /* Get panel node from the output port's endpoint data */
a9ddac9c
AT
1522 device_node = of_graph_get_remote_port_parent(endpoint);
1523 if (!device_node) {
f7009d26
AT
1524 dev_err(dev, "%s: no valid device\n", __func__);
1525 of_node_put(endpoint);
1526 return -ENODEV;
1527 }
1528
1529 of_node_put(endpoint);
a9ddac9c 1530 of_node_put(device_node);
f7009d26 1531
a9ddac9c 1532 msm_host->device_node = device_node;
f7009d26
AT
1533
1534 return 0;
1535}
1536
a689554b
HL
1537int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1538{
1539 struct msm_dsi_host *msm_host = NULL;
1540 struct platform_device *pdev = msm_dsi->pdev;
1541 int ret;
1542
1543 msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
1544 if (!msm_host) {
1545 pr_err("%s: FAILED: cannot alloc dsi host\n",
1546 __func__);
1547 ret = -ENOMEM;
1548 goto fail;
1549 }
1550
f7009d26
AT
1551 msm_host->pdev = pdev;
1552
1553 ret = dsi_host_parse_dt(msm_host);
a689554b 1554 if (ret) {
f7009d26 1555 pr_err("%s: failed to parse dt\n", __func__);
a689554b
HL
1556 goto fail;
1557 }
a689554b 1558
a689554b
HL
1559 msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
1560 if (IS_ERR(msm_host->ctrl_base)) {
1561 pr_err("%s: unable to map Dsi ctrl base\n", __func__);
1562 ret = PTR_ERR(msm_host->ctrl_base);
1563 goto fail;
1564 }
1565
d248b61f
HL
1566 msm_host->cfg_hnd = dsi_get_config(msm_host);
1567 if (!msm_host->cfg_hnd) {
a689554b
HL
1568 ret = -EINVAL;
1569 pr_err("%s: get config failed\n", __func__);
1570 goto fail;
1571 }
1572
d248b61f
HL
1573 /* fixup base address by io offset */
1574 msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
1575
a689554b
HL
1576 ret = dsi_regulator_init(msm_host);
1577 if (ret) {
1578 pr_err("%s: regulator init failed\n", __func__);
1579 goto fail;
1580 }
1581
31c92767
AT
1582 ret = dsi_clk_init(msm_host);
1583 if (ret) {
1584 pr_err("%s: unable to initialize dsi clks\n", __func__);
1585 goto fail;
1586 }
1587
a689554b
HL
1588 msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
1589 if (!msm_host->rx_buf) {
1590 pr_err("%s: alloc rx temp buf failed\n", __func__);
1591 goto fail;
1592 }
1593
1594 init_completion(&msm_host->dma_comp);
1595 init_completion(&msm_host->video_comp);
1596 mutex_init(&msm_host->dev_mutex);
1597 mutex_init(&msm_host->cmd_mutex);
1598 mutex_init(&msm_host->clk_mutex);
1599 spin_lock_init(&msm_host->intr_lock);
1600
1601 /* setup workqueue */
1602 msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
1603 INIT_WORK(&msm_host->err_work, dsi_err_worker);
1604
a689554b
HL
1605 msm_dsi->host = &msm_host->base;
1606 msm_dsi->id = msm_host->id;
1607
1608 DBG("Dsi Host %d initialized", msm_host->id);
1609 return 0;
1610
1611fail:
1612 return ret;
1613}
1614
1615void msm_dsi_host_destroy(struct mipi_dsi_host *host)
1616{
1617 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1618
1619 DBG("");
1620 dsi_tx_buf_free(msm_host);
1621 if (msm_host->workqueue) {
1622 flush_workqueue(msm_host->workqueue);
1623 destroy_workqueue(msm_host->workqueue);
1624 msm_host->workqueue = NULL;
1625 }
1626
1627 mutex_destroy(&msm_host->clk_mutex);
1628 mutex_destroy(&msm_host->cmd_mutex);
1629 mutex_destroy(&msm_host->dev_mutex);
1630}
1631
1632int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
1633 struct drm_device *dev)
1634{
1635 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1636 struct platform_device *pdev = msm_host->pdev;
1637 int ret;
1638
1639 msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1640 if (msm_host->irq < 0) {
1641 ret = msm_host->irq;
1642 dev_err(dev->dev, "failed to get irq: %d\n", ret);
1643 return ret;
1644 }
1645
1646 ret = devm_request_irq(&pdev->dev, msm_host->irq,
1647 dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
1648 "dsi_isr", msm_host);
1649 if (ret < 0) {
1650 dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
1651 msm_host->irq, ret);
1652 return ret;
1653 }
1654
1655 msm_host->dev = dev;
1656 ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
1657 if (ret) {
1658 pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
1659 return ret;
1660 }
1661
1662 return 0;
1663}
1664
1665int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
1666{
1667 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
a689554b
HL
1668 int ret;
1669
1670 /* Register mipi dsi host */
1671 if (!msm_host->registered) {
1672 host->dev = &msm_host->pdev->dev;
1673 host->ops = &dsi_host_ops;
1674 ret = mipi_dsi_host_register(host);
1675 if (ret)
1676 return ret;
1677
1678 msm_host->registered = true;
1679
1680 /* If the panel driver has not been probed after host register,
1681 * we should defer the host's probe.
1682 * It makes sure panel is connected when fbcon detects
1683 * connector status and gets the proper display mode to
1684 * create framebuffer.
f7009d26
AT
1685 * Don't try to defer if there is nothing connected to the dsi
1686 * output
a689554b 1687 */
a9ddac9c
AT
1688 if (check_defer && msm_host->device_node) {
1689 if (!of_drm_find_panel(msm_host->device_node))
c118e290
AT
1690 if (!of_drm_find_bridge(msm_host->device_node))
1691 return -EPROBE_DEFER;
a689554b
HL
1692 }
1693 }
1694
1695 return 0;
1696}
1697
1698void msm_dsi_host_unregister(struct mipi_dsi_host *host)
1699{
1700 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1701
1702 if (msm_host->registered) {
1703 mipi_dsi_host_unregister(host);
1704 host->dev = NULL;
1705 host->ops = NULL;
1706 msm_host->registered = false;
1707 }
1708}
1709
1710int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
1711 const struct mipi_dsi_msg *msg)
1712{
1713 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1714
1715 /* TODO: make sure dsi_cmd_mdp is idle.
1716 * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
1717 * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
1718 * How to handle the old versions? Wait for mdp cmd done?
1719 */
1720
1721 /*
1722 * mdss interrupt is generated in mdp core clock domain
1723 * mdp clock need to be enabled to receive dsi interrupt
1724 */
1725 dsi_clk_ctrl(msm_host, 1);
1726
1727 /* TODO: vote for bus bandwidth */
1728
1729 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
1730 dsi_set_tx_power_mode(0, msm_host);
1731
1732 msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
1733 dsi_write(msm_host, REG_DSI_CTRL,
1734 msm_host->dma_cmd_ctrl_restore |
1735 DSI_CTRL_CMD_MODE_EN |
1736 DSI_CTRL_ENABLE);
1737 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
1738
1739 return 0;
1740}
1741
1742void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
1743 const struct mipi_dsi_msg *msg)
1744{
1745 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1746
1747 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
1748 dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
1749
1750 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
1751 dsi_set_tx_power_mode(1, msm_host);
1752
1753 /* TODO: unvote for bus bandwidth */
1754
1755 dsi_clk_ctrl(msm_host, 0);
1756}
1757
1758int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
1759 const struct mipi_dsi_msg *msg)
1760{
1761 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1762
1763 return dsi_cmds2buf_tx(msm_host, msg);
1764}
1765
1766int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
1767 const struct mipi_dsi_msg *msg)
1768{
1769 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
d248b61f 1770 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
a689554b
HL
1771 int data_byte, rx_byte, dlen, end;
1772 int short_response, diff, pkt_size, ret = 0;
1773 char cmd;
1774 int rlen = msg->rx_len;
1775 u8 *buf;
1776
1777 if (rlen <= 2) {
1778 short_response = 1;
1779 pkt_size = rlen;
1780 rx_byte = 4;
1781 } else {
1782 short_response = 0;
1783 data_byte = 10; /* first read */
1784 if (rlen < data_byte)
1785 pkt_size = rlen;
1786 else
1787 pkt_size = data_byte;
1788 rx_byte = data_byte + 6; /* 4 header + 2 crc */
1789 }
1790
1791 buf = msm_host->rx_buf;
1792 end = 0;
1793 while (!end) {
1794 u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
1795 struct mipi_dsi_msg max_pkt_size_msg = {
1796 .channel = msg->channel,
1797 .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
1798 .tx_len = 2,
1799 .tx_buf = tx,
1800 };
1801
1802 DBG("rlen=%d pkt_size=%d rx_byte=%d",
1803 rlen, pkt_size, rx_byte);
1804
1805 ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
1806 if (ret < 2) {
1807 pr_err("%s: Set max pkt size failed, %d\n",
1808 __func__, ret);
1809 return -EINVAL;
1810 }
1811
d248b61f
HL
1812 if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
1813 (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
a689554b
HL
1814 /* Clear the RDBK_DATA registers */
1815 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
1816 DSI_RDBK_DATA_CTRL_CLR);
1817 wmb(); /* make sure the RDBK registers are cleared */
1818 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
1819 wmb(); /* release cleared status before transfer */
1820 }
1821
1822 ret = dsi_cmds2buf_tx(msm_host, msg);
1823 if (ret < msg->tx_len) {
1824 pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
1825 return ret;
1826 }
1827
1828 /*
1829 * once cmd_dma_done interrupt received,
1830 * return data from client is ready and stored
1831 * at RDBK_DATA register already
1832 * since rx fifo is 16 bytes, dcs header is kept at first loop,
1833 * after that dcs header lost during shift into registers
1834 */
1835 dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
1836
1837 if (dlen <= 0)
1838 return 0;
1839
1840 if (short_response)
1841 break;
1842
1843 if (rlen <= data_byte) {
1844 diff = data_byte - rlen;
1845 end = 1;
1846 } else {
1847 diff = 0;
1848 rlen -= data_byte;
1849 }
1850
1851 if (!end) {
1852 dlen -= 2; /* 2 crc */
1853 dlen -= diff;
1854 buf += dlen; /* next start position */
1855 data_byte = 14; /* NOT first read */
1856 if (rlen < data_byte)
1857 pkt_size += rlen;
1858 else
1859 pkt_size += data_byte;
1860 DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
1861 }
1862 }
1863
1864 /*
1865 * For single Long read, if the requested rlen < 10,
1866 * we need to shift the start position of rx
1867 * data buffer to skip the bytes which are not
1868 * updated.
1869 */
1870 if (pkt_size < 10 && !short_response)
1871 buf = msm_host->rx_buf + (10 - rlen);
1872 else
1873 buf = msm_host->rx_buf;
1874
1875 cmd = buf[0];
1876 switch (cmd) {
1877 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
1878 pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
1879 ret = 0;
651ad3f5 1880 break;
a689554b
HL
1881 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
1882 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
1883 ret = dsi_short_read1_resp(buf, msg);
1884 break;
1885 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
1886 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
1887 ret = dsi_short_read2_resp(buf, msg);
1888 break;
1889 case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
1890 case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
1891 ret = dsi_long_read_resp(buf, msg);
1892 break;
1893 default:
1894 pr_warn("%s:Invalid response cmd\n", __func__);
1895 ret = 0;
1896 }
1897
1898 return ret;
1899}
1900
1901void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
1902{
1903 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1904
1905 dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
1906 dsi_write(msm_host, REG_DSI_DMA_LEN, len);
1907 dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
1908
1909 /* Make sure trigger happens */
1910 wmb();
1911}
1912
9d32c498
HL
1913int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
1914 struct msm_dsi_pll *src_pll)
1915{
1916 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
4bfa9748 1917 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
9d32c498
HL
1918 struct clk *byte_clk_provider, *pixel_clk_provider;
1919 int ret;
1920
1921 ret = msm_dsi_pll_get_clk_provider(src_pll,
1922 &byte_clk_provider, &pixel_clk_provider);
1923 if (ret) {
1924 pr_info("%s: can't get provider from pll, don't set parent\n",
1925 __func__);
1926 return 0;
1927 }
1928
1929 ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
1930 if (ret) {
1931 pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
1932 __func__, ret);
1933 goto exit;
1934 }
1935
1936 ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
1937 if (ret) {
1938 pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
1939 __func__, ret);
1940 goto exit;
1941 }
1942
4bfa9748
AT
1943 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
1944 ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
1945 if (ret) {
1946 pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
1947 __func__, ret);
1948 goto exit;
1949 }
1950
1951 ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
1952 if (ret) {
1953 pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
1954 __func__, ret);
1955 goto exit;
1956 }
1957 }
1958
9d32c498
HL
1959exit:
1960 return ret;
1961}
1962
a689554b
HL
1963int msm_dsi_host_enable(struct mipi_dsi_host *host)
1964{
1965 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1966
1967 dsi_op_mode_config(msm_host,
1968 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
1969
1970 /* TODO: clock should be turned off for command mode,
1971 * and only turned on before MDP START.
1972 * This part of code should be enabled once mdp driver support it.
1973 */
1974 /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
1975 dsi_clk_ctrl(msm_host, 0); */
1976
1977 return 0;
1978}
1979
1980int msm_dsi_host_disable(struct mipi_dsi_host *host)
1981{
1982 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1983
1984 dsi_op_mode_config(msm_host,
1985 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
1986
1987 /* Since we have disabled INTF, the video engine won't stop so that
1988 * the cmd engine will be blocked.
1989 * Reset to disable video engine so that we can send off cmd.
1990 */
1991 dsi_sw_reset(msm_host);
1992
1993 return 0;
1994}
1995
1996int msm_dsi_host_power_on(struct mipi_dsi_host *host)
1997{
1998 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1999 u32 clk_pre = 0, clk_post = 0;
2000 int ret = 0;
2001
2002 mutex_lock(&msm_host->dev_mutex);
2003 if (msm_host->power_on) {
2004 DBG("dsi host already on");
2005 goto unlock_ret;
2006 }
2007
2008 ret = dsi_calc_clk_rate(msm_host);
2009 if (ret) {
2010 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
2011 goto unlock_ret;
2012 }
2013
2014 ret = dsi_host_regulator_enable(msm_host);
2015 if (ret) {
2016 pr_err("%s:Failed to enable vregs.ret=%d\n",
2017 __func__, ret);
2018 goto unlock_ret;
2019 }
2020
2021 ret = dsi_bus_clk_enable(msm_host);
2022 if (ret) {
2023 pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
2024 goto fail_disable_reg;
2025 }
2026
2027 dsi_phy_sw_reset(msm_host);
2028 ret = msm_dsi_manager_phy_enable(msm_host->id,
2029 msm_host->byte_clk_rate * 8,
4bfa9748 2030 msm_host->esc_clk_rate,
a689554b
HL
2031 &clk_pre, &clk_post);
2032 dsi_bus_clk_disable(msm_host);
2033 if (ret) {
2034 pr_err("%s: failed to enable phy, %d\n", __func__, ret);
2035 goto fail_disable_reg;
2036 }
2037
2038 ret = dsi_clk_ctrl(msm_host, 1);
2039 if (ret) {
2040 pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
2041 goto fail_disable_reg;
2042 }
2043
ab8909b0
HL
2044 ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
2045 if (ret) {
2046 pr_err("%s: failed to set pinctrl default state, %d\n",
2047 __func__, ret);
2048 goto fail_disable_clk;
2049 }
2050
a689554b
HL
2051 dsi_timing_setup(msm_host);
2052 dsi_sw_reset(msm_host);
2053 dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
2054
2055 if (msm_host->disp_en_gpio)
2056 gpiod_set_value(msm_host->disp_en_gpio, 1);
2057
2058 msm_host->power_on = true;
2059 mutex_unlock(&msm_host->dev_mutex);
2060
2061 return 0;
2062
ab8909b0
HL
2063fail_disable_clk:
2064 dsi_clk_ctrl(msm_host, 0);
a689554b
HL
2065fail_disable_reg:
2066 dsi_host_regulator_disable(msm_host);
2067unlock_ret:
2068 mutex_unlock(&msm_host->dev_mutex);
2069 return ret;
2070}
2071
2072int msm_dsi_host_power_off(struct mipi_dsi_host *host)
2073{
2074 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2075
2076 mutex_lock(&msm_host->dev_mutex);
2077 if (!msm_host->power_on) {
2078 DBG("dsi host already off");
2079 goto unlock_ret;
2080 }
2081
2082 dsi_ctrl_config(msm_host, false, 0, 0);
2083
2084 if (msm_host->disp_en_gpio)
2085 gpiod_set_value(msm_host->disp_en_gpio, 0);
2086
ab8909b0
HL
2087 pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
2088
a689554b
HL
2089 msm_dsi_manager_phy_disable(msm_host->id);
2090
2091 dsi_clk_ctrl(msm_host, 0);
2092
2093 dsi_host_regulator_disable(msm_host);
2094
2095 DBG("-");
2096
2097 msm_host->power_on = false;
2098
2099unlock_ret:
2100 mutex_unlock(&msm_host->dev_mutex);
2101 return 0;
2102}
2103
2104int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2105 struct drm_display_mode *mode)
2106{
2107 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2108
2109 if (msm_host->mode) {
2110 drm_mode_destroy(msm_host->dev, msm_host->mode);
2111 msm_host->mode = NULL;
2112 }
2113
2114 msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
2115 if (IS_ERR(msm_host->mode)) {
2116 pr_err("%s: cannot duplicate mode\n", __func__);
2117 return PTR_ERR(msm_host->mode);
2118 }
2119
2120 return 0;
2121}
2122
2123struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
2124 unsigned long *panel_flags)
2125{
2126 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2127 struct drm_panel *panel;
2128
a9ddac9c 2129 panel = of_drm_find_panel(msm_host->device_node);
a689554b
HL
2130 if (panel_flags)
2131 *panel_flags = msm_host->mode_flags;
2132
2133 return panel;
2134}
2135
c118e290
AT
2136struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
2137{
2138 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2139
2140 return of_drm_find_bridge(msm_host->device_node);
2141}
This page took 0.137816 seconds and 5 git commands to generate.