Commit | Line | Data |
---|---|---|
051fb70f BA |
1 | /* |
2 | * Qualcomm Peripheral Image Loader | |
3 | * | |
4 | * Copyright (C) 2016 Linaro Ltd. | |
5 | * Copyright (C) 2014 Sony Mobile Communications AB | |
6 | * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License | |
10 | * version 2 as published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | */ | |
17 | ||
18 | #include <linux/clk.h> | |
19 | #include <linux/delay.h> | |
20 | #include <linux/dma-mapping.h> | |
21 | #include <linux/interrupt.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/mfd/syscon.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/of_address.h> | |
26 | #include <linux/platform_device.h> | |
27 | #include <linux/regmap.h> | |
28 | #include <linux/regulator/consumer.h> | |
29 | #include <linux/remoteproc.h> | |
30 | #include <linux/reset.h> | |
31 | #include <linux/soc/qcom/smem.h> | |
32 | #include <linux/soc/qcom/smem_state.h> | |
33 | ||
34 | #include "remoteproc_internal.h" | |
35 | #include "qcom_mdt_loader.h" | |
36 | ||
37 | #include <linux/qcom_scm.h> | |
38 | ||
39 | #define MBA_FIRMWARE_NAME "mba.b00" | |
40 | #define MPSS_FIRMWARE_NAME "modem.mdt" | |
41 | ||
42 | #define MPSS_CRASH_REASON_SMEM 421 | |
43 | ||
44 | /* RMB Status Register Values */ | |
45 | #define RMB_PBL_SUCCESS 0x1 | |
46 | ||
47 | #define RMB_MBA_XPU_UNLOCKED 0x1 | |
48 | #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2 | |
49 | #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3 | |
50 | #define RMB_MBA_AUTH_COMPLETE 0x4 | |
51 | ||
52 | /* PBL/MBA interface registers */ | |
53 | #define RMB_MBA_IMAGE_REG 0x00 | |
54 | #define RMB_PBL_STATUS_REG 0x04 | |
55 | #define RMB_MBA_COMMAND_REG 0x08 | |
56 | #define RMB_MBA_STATUS_REG 0x0C | |
57 | #define RMB_PMI_META_DATA_REG 0x10 | |
58 | #define RMB_PMI_CODE_START_REG 0x14 | |
59 | #define RMB_PMI_CODE_LENGTH_REG 0x18 | |
60 | ||
61 | #define RMB_CMD_META_DATA_READY 0x1 | |
62 | #define RMB_CMD_LOAD_READY 0x2 | |
63 | ||
64 | /* QDSP6SS Register Offsets */ | |
65 | #define QDSP6SS_RESET_REG 0x014 | |
66 | #define QDSP6SS_GFMUX_CTL_REG 0x020 | |
67 | #define QDSP6SS_PWR_CTL_REG 0x030 | |
68 | ||
69 | /* AXI Halt Register Offsets */ | |
70 | #define AXI_HALTREQ_REG 0x0 | |
71 | #define AXI_HALTACK_REG 0x4 | |
72 | #define AXI_IDLE_REG 0x8 | |
73 | ||
74 | #define HALT_ACK_TIMEOUT_MS 100 | |
75 | ||
76 | /* QDSP6SS_RESET */ | |
77 | #define Q6SS_STOP_CORE BIT(0) | |
78 | #define Q6SS_CORE_ARES BIT(1) | |
79 | #define Q6SS_BUS_ARES_ENABLE BIT(2) | |
80 | ||
81 | /* QDSP6SS_GFMUX_CTL */ | |
82 | #define Q6SS_CLK_ENABLE BIT(1) | |
83 | ||
84 | /* QDSP6SS_PWR_CTL */ | |
85 | #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0) | |
86 | #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1) | |
87 | #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2) | |
88 | #define Q6SS_L2TAG_SLP_NRET_N BIT(16) | |
89 | #define Q6SS_ETB_SLP_NRET_N BIT(17) | |
90 | #define Q6SS_L2DATA_STBY_N BIT(18) | |
91 | #define Q6SS_SLP_RET_N BIT(19) | |
92 | #define Q6SS_CLAMP_IO BIT(20) | |
93 | #define QDSS_BHS_ON BIT(21) | |
94 | #define QDSS_LDO_BYP BIT(22) | |
95 | ||
96 | struct q6v5 { | |
97 | struct device *dev; | |
98 | struct rproc *rproc; | |
99 | ||
100 | void __iomem *reg_base; | |
101 | void __iomem *rmb_base; | |
102 | ||
103 | struct regmap *halt_map; | |
104 | u32 halt_q6; | |
105 | u32 halt_modem; | |
106 | u32 halt_nc; | |
107 | ||
108 | struct reset_control *mss_restart; | |
109 | ||
110 | struct qcom_smem_state *state; | |
111 | unsigned stop_bit; | |
112 | ||
113 | struct regulator_bulk_data supply[4]; | |
114 | ||
115 | struct clk *ahb_clk; | |
116 | struct clk *axi_clk; | |
117 | struct clk *rom_clk; | |
118 | ||
119 | struct completion start_done; | |
120 | struct completion stop_done; | |
121 | bool running; | |
122 | ||
123 | phys_addr_t mba_phys; | |
124 | void *mba_region; | |
125 | size_t mba_size; | |
126 | ||
127 | phys_addr_t mpss_phys; | |
128 | phys_addr_t mpss_reloc; | |
129 | void *mpss_region; | |
130 | size_t mpss_size; | |
131 | }; | |
132 | ||
133 | enum { | |
134 | Q6V5_SUPPLY_CX, | |
135 | Q6V5_SUPPLY_MX, | |
136 | Q6V5_SUPPLY_MSS, | |
137 | Q6V5_SUPPLY_PLL, | |
138 | }; | |
139 | ||
140 | static int q6v5_regulator_init(struct q6v5 *qproc) | |
141 | { | |
142 | int ret; | |
143 | ||
144 | qproc->supply[Q6V5_SUPPLY_CX].supply = "cx"; | |
145 | qproc->supply[Q6V5_SUPPLY_MX].supply = "mx"; | |
146 | qproc->supply[Q6V5_SUPPLY_MSS].supply = "mss"; | |
147 | qproc->supply[Q6V5_SUPPLY_PLL].supply = "pll"; | |
148 | ||
149 | ret = devm_regulator_bulk_get(qproc->dev, | |
150 | ARRAY_SIZE(qproc->supply), qproc->supply); | |
151 | if (ret < 0) { | |
152 | dev_err(qproc->dev, "failed to get supplies\n"); | |
153 | return ret; | |
154 | } | |
155 | ||
156 | regulator_set_load(qproc->supply[Q6V5_SUPPLY_CX].consumer, 100000); | |
157 | regulator_set_load(qproc->supply[Q6V5_SUPPLY_MSS].consumer, 100000); | |
158 | regulator_set_load(qproc->supply[Q6V5_SUPPLY_PLL].consumer, 10000); | |
159 | ||
160 | return 0; | |
161 | } | |
162 | ||
163 | static int q6v5_regulator_enable(struct q6v5 *qproc) | |
164 | { | |
165 | struct regulator *mss = qproc->supply[Q6V5_SUPPLY_MSS].consumer; | |
166 | struct regulator *mx = qproc->supply[Q6V5_SUPPLY_MX].consumer; | |
167 | int ret; | |
168 | ||
169 | /* TODO: Q6V5_SUPPLY_CX is supposed to be set to super-turbo here */ | |
170 | ||
171 | ret = regulator_set_voltage(mx, 1050000, INT_MAX); | |
172 | if (ret) | |
173 | return ret; | |
174 | ||
175 | regulator_set_voltage(mss, 1000000, 1150000); | |
176 | ||
177 | return regulator_bulk_enable(ARRAY_SIZE(qproc->supply), qproc->supply); | |
178 | } | |
179 | ||
180 | static void q6v5_regulator_disable(struct q6v5 *qproc) | |
181 | { | |
182 | struct regulator *mss = qproc->supply[Q6V5_SUPPLY_MSS].consumer; | |
183 | struct regulator *mx = qproc->supply[Q6V5_SUPPLY_MX].consumer; | |
184 | ||
185 | /* TODO: Q6V5_SUPPLY_CX corner votes should be released */ | |
186 | ||
187 | regulator_bulk_disable(ARRAY_SIZE(qproc->supply), qproc->supply); | |
188 | regulator_set_voltage(mx, 0, INT_MAX); | |
189 | regulator_set_voltage(mss, 0, 1150000); | |
190 | } | |
191 | ||
192 | static int q6v5_load(struct rproc *rproc, const struct firmware *fw) | |
193 | { | |
194 | struct q6v5 *qproc = rproc->priv; | |
195 | ||
196 | memcpy(qproc->mba_region, fw->data, fw->size); | |
197 | ||
198 | return 0; | |
199 | } | |
200 | ||
201 | static const struct rproc_fw_ops q6v5_fw_ops = { | |
202 | .find_rsc_table = qcom_mdt_find_rsc_table, | |
203 | .load = q6v5_load, | |
204 | }; | |
205 | ||
206 | static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms) | |
207 | { | |
208 | unsigned long timeout; | |
209 | s32 val; | |
210 | ||
211 | timeout = jiffies + msecs_to_jiffies(ms); | |
212 | for (;;) { | |
213 | val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG); | |
214 | if (val) | |
215 | break; | |
216 | ||
217 | if (time_after(jiffies, timeout)) | |
218 | return -ETIMEDOUT; | |
219 | ||
220 | msleep(1); | |
221 | } | |
222 | ||
223 | return val; | |
224 | } | |
225 | ||
226 | static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) | |
227 | { | |
228 | ||
229 | unsigned long timeout; | |
230 | s32 val; | |
231 | ||
232 | timeout = jiffies + msecs_to_jiffies(ms); | |
233 | for (;;) { | |
234 | val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); | |
235 | if (val < 0) | |
236 | break; | |
237 | ||
238 | if (!status && val) | |
239 | break; | |
240 | else if (status && val == status) | |
241 | break; | |
242 | ||
243 | if (time_after(jiffies, timeout)) | |
244 | return -ETIMEDOUT; | |
245 | ||
246 | msleep(1); | |
247 | } | |
248 | ||
249 | return val; | |
250 | } | |
251 | ||
252 | static int q6v5proc_reset(struct q6v5 *qproc) | |
253 | { | |
254 | u32 val; | |
255 | int ret; | |
256 | ||
257 | /* Assert resets, stop core */ | |
258 | val = readl(qproc->reg_base + QDSP6SS_RESET_REG); | |
259 | val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE); | |
260 | writel(val, qproc->reg_base + QDSP6SS_RESET_REG); | |
261 | ||
262 | /* Enable power block headswitch, and wait for it to stabilize */ | |
263 | val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); | |
264 | val |= QDSS_BHS_ON | QDSS_LDO_BYP; | |
265 | writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); | |
266 | udelay(1); | |
267 | ||
268 | /* | |
269 | * Turn on memories. L2 banks should be done individually | |
270 | * to minimize inrush current. | |
271 | */ | |
272 | val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); | |
273 | val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N | | |
274 | Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N; | |
275 | writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); | |
276 | val |= Q6SS_L2DATA_SLP_NRET_N_2; | |
277 | writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); | |
278 | val |= Q6SS_L2DATA_SLP_NRET_N_1; | |
279 | writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); | |
280 | val |= Q6SS_L2DATA_SLP_NRET_N_0; | |
281 | writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); | |
282 | ||
283 | /* Remove IO clamp */ | |
284 | val &= ~Q6SS_CLAMP_IO; | |
285 | writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); | |
286 | ||
287 | /* Bring core out of reset */ | |
288 | val = readl(qproc->reg_base + QDSP6SS_RESET_REG); | |
289 | val &= ~Q6SS_CORE_ARES; | |
290 | writel(val, qproc->reg_base + QDSP6SS_RESET_REG); | |
291 | ||
292 | /* Turn on core clock */ | |
293 | val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); | |
294 | val |= Q6SS_CLK_ENABLE; | |
295 | writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); | |
296 | ||
297 | /* Start core execution */ | |
298 | val = readl(qproc->reg_base + QDSP6SS_RESET_REG); | |
299 | val &= ~Q6SS_STOP_CORE; | |
300 | writel(val, qproc->reg_base + QDSP6SS_RESET_REG); | |
301 | ||
302 | /* Wait for PBL status */ | |
303 | ret = q6v5_rmb_pbl_wait(qproc, 1000); | |
304 | if (ret == -ETIMEDOUT) { | |
305 | dev_err(qproc->dev, "PBL boot timed out\n"); | |
306 | } else if (ret != RMB_PBL_SUCCESS) { | |
307 | dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret); | |
308 | ret = -EINVAL; | |
309 | } else { | |
310 | ret = 0; | |
311 | } | |
312 | ||
313 | return ret; | |
314 | } | |
315 | ||
316 | static void q6v5proc_halt_axi_port(struct q6v5 *qproc, | |
317 | struct regmap *halt_map, | |
318 | u32 offset) | |
319 | { | |
320 | unsigned long timeout; | |
321 | unsigned int val; | |
322 | int ret; | |
323 | ||
324 | /* Check if we're already idle */ | |
325 | ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); | |
326 | if (!ret && val) | |
327 | return; | |
328 | ||
329 | /* Assert halt request */ | |
330 | regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); | |
331 | ||
332 | /* Wait for halt */ | |
333 | timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS); | |
334 | for (;;) { | |
335 | ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val); | |
336 | if (ret || val || time_after(jiffies, timeout)) | |
337 | break; | |
338 | ||
339 | msleep(1); | |
340 | } | |
341 | ||
342 | ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); | |
343 | if (ret || !val) | |
344 | dev_err(qproc->dev, "port failed halt\n"); | |
345 | ||
346 | /* Clear halt request (port will remain halted until reset) */ | |
347 | regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); | |
348 | } | |
349 | ||
350 | static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) | |
351 | { | |
352 | DEFINE_DMA_ATTRS(attrs); | |
353 | dma_addr_t phys; | |
354 | void *ptr; | |
355 | int ret; | |
356 | ||
357 | dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs); | |
358 | ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs); | |
359 | if (!ptr) { | |
360 | dev_err(qproc->dev, "failed to allocate mdt buffer\n"); | |
361 | return -ENOMEM; | |
362 | } | |
363 | ||
364 | memcpy(ptr, fw->data, fw->size); | |
365 | ||
366 | writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG); | |
367 | writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); | |
368 | ||
369 | ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000); | |
370 | if (ret == -ETIMEDOUT) | |
371 | dev_err(qproc->dev, "MPSS header authentication timed out\n"); | |
372 | else if (ret < 0) | |
373 | dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); | |
374 | ||
375 | dma_free_attrs(qproc->dev, fw->size, ptr, phys, &attrs); | |
376 | ||
377 | return ret < 0 ? ret : 0; | |
378 | } | |
379 | ||
380 | static int q6v5_mpss_validate(struct q6v5 *qproc, const struct firmware *fw) | |
381 | { | |
382 | const struct elf32_phdr *phdrs; | |
383 | const struct elf32_phdr *phdr; | |
384 | struct elf32_hdr *ehdr; | |
385 | phys_addr_t boot_addr; | |
386 | phys_addr_t fw_addr; | |
387 | bool relocate; | |
388 | size_t size; | |
389 | u32 val; | |
390 | int ret; | |
391 | int i; | |
392 | ||
393 | ret = qcom_mdt_parse(fw, &fw_addr, NULL, &relocate); | |
394 | if (ret) { | |
395 | dev_err(qproc->dev, "failed to parse mdt header\n"); | |
396 | return ret; | |
397 | } | |
398 | ||
399 | if (relocate) | |
400 | boot_addr = qproc->mpss_phys; | |
401 | else | |
402 | boot_addr = fw_addr; | |
403 | ||
404 | ehdr = (struct elf32_hdr *)fw->data; | |
405 | phdrs = (struct elf32_phdr *)(ehdr + 1); | |
406 | for (i = 0; i < ehdr->e_phnum; i++, phdr++) { | |
407 | phdr = &phdrs[i]; | |
408 | ||
409 | if (phdr->p_type != PT_LOAD) | |
410 | continue; | |
411 | ||
412 | if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) | |
413 | continue; | |
414 | ||
415 | if (!phdr->p_memsz) | |
416 | continue; | |
417 | ||
418 | size = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); | |
419 | if (!size) { | |
420 | writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); | |
421 | writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); | |
422 | } | |
423 | ||
424 | size += phdr->p_memsz; | |
425 | writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); | |
426 | } | |
427 | ||
428 | val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); | |
429 | return val < 0 ? val : 0; | |
430 | } | |
431 | ||
432 | static int q6v5_mpss_load(struct q6v5 *qproc) | |
433 | { | |
434 | const struct firmware *fw; | |
435 | phys_addr_t fw_addr; | |
436 | bool relocate; | |
437 | int ret; | |
438 | ||
439 | ret = request_firmware(&fw, MPSS_FIRMWARE_NAME, qproc->dev); | |
440 | if (ret < 0) { | |
441 | dev_err(qproc->dev, "unable to load " MPSS_FIRMWARE_NAME "\n"); | |
442 | return ret; | |
443 | } | |
444 | ||
445 | ret = qcom_mdt_parse(fw, &fw_addr, NULL, &relocate); | |
446 | if (ret) { | |
447 | dev_err(qproc->dev, "failed to parse mdt header\n"); | |
448 | goto release_firmware; | |
449 | } | |
450 | ||
451 | if (relocate) | |
452 | qproc->mpss_reloc = fw_addr; | |
453 | ||
454 | /* Initialize the RMB validator */ | |
455 | writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); | |
456 | ||
457 | ret = q6v5_mpss_init_image(qproc, fw); | |
458 | if (ret) | |
459 | goto release_firmware; | |
460 | ||
461 | ret = qcom_mdt_load(qproc->rproc, fw, MPSS_FIRMWARE_NAME); | |
462 | if (ret) | |
463 | goto release_firmware; | |
464 | ||
465 | ret = q6v5_mpss_validate(qproc, fw); | |
466 | if (ret) | |
467 | goto release_firmware; | |
468 | ||
469 | ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); | |
470 | if (ret == -ETIMEDOUT) | |
471 | dev_err(qproc->dev, "MPSS authentication timed out\n"); | |
472 | else if (ret < 0) | |
473 | dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); | |
474 | ||
475 | release_firmware: | |
476 | release_firmware(fw); | |
477 | ||
478 | return ret < 0 ? ret : 0; | |
479 | } | |
480 | ||
481 | static int q6v5_start(struct rproc *rproc) | |
482 | { | |
483 | struct q6v5 *qproc = (struct q6v5 *)rproc->priv; | |
484 | int ret; | |
485 | ||
486 | ret = q6v5_regulator_enable(qproc); | |
487 | if (ret) { | |
488 | dev_err(qproc->dev, "failed to enable supplies\n"); | |
489 | return ret; | |
490 | } | |
491 | ||
492 | ret = reset_control_deassert(qproc->mss_restart); | |
493 | if (ret) { | |
494 | dev_err(qproc->dev, "failed to deassert mss restart\n"); | |
495 | goto disable_vdd; | |
496 | } | |
497 | ||
498 | ret = clk_prepare_enable(qproc->ahb_clk); | |
499 | if (ret) | |
500 | goto assert_reset; | |
501 | ||
502 | ret = clk_prepare_enable(qproc->axi_clk); | |
503 | if (ret) | |
504 | goto disable_ahb_clk; | |
505 | ||
506 | ret = clk_prepare_enable(qproc->rom_clk); | |
507 | if (ret) | |
508 | goto disable_axi_clk; | |
509 | ||
510 | writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); | |
511 | ||
512 | ret = q6v5proc_reset(qproc); | |
513 | if (ret) | |
514 | goto halt_axi_ports; | |
515 | ||
516 | ret = q6v5_rmb_mba_wait(qproc, 0, 5000); | |
517 | if (ret == -ETIMEDOUT) { | |
518 | dev_err(qproc->dev, "MBA boot timed out\n"); | |
519 | goto halt_axi_ports; | |
520 | } else if (ret != RMB_MBA_XPU_UNLOCKED && | |
521 | ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) { | |
522 | dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret); | |
523 | ret = -EINVAL; | |
524 | goto halt_axi_ports; | |
525 | } | |
526 | ||
527 | dev_info(qproc->dev, "MBA booted, loading mpss\n"); | |
528 | ||
529 | ret = q6v5_mpss_load(qproc); | |
530 | if (ret) | |
531 | goto halt_axi_ports; | |
532 | ||
533 | ret = wait_for_completion_timeout(&qproc->start_done, | |
534 | msecs_to_jiffies(5000)); | |
535 | if (ret == 0) { | |
536 | dev_err(qproc->dev, "start timed out\n"); | |
537 | ret = -ETIMEDOUT; | |
538 | goto halt_axi_ports; | |
539 | } | |
540 | ||
541 | qproc->running = true; | |
542 | ||
543 | /* TODO: All done, release the handover resources */ | |
544 | ||
545 | return 0; | |
546 | ||
547 | halt_axi_ports: | |
548 | q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); | |
549 | q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); | |
550 | q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); | |
551 | ||
552 | clk_disable_unprepare(qproc->rom_clk); | |
553 | disable_axi_clk: | |
554 | clk_disable_unprepare(qproc->axi_clk); | |
555 | disable_ahb_clk: | |
556 | clk_disable_unprepare(qproc->ahb_clk); | |
557 | assert_reset: | |
558 | reset_control_assert(qproc->mss_restart); | |
559 | disable_vdd: | |
560 | q6v5_regulator_disable(qproc); | |
561 | ||
562 | return ret; | |
563 | } | |
564 | ||
565 | static int q6v5_stop(struct rproc *rproc) | |
566 | { | |
567 | struct q6v5 *qproc = (struct q6v5 *)rproc->priv; | |
568 | int ret; | |
569 | ||
570 | qproc->running = false; | |
571 | ||
572 | qcom_smem_state_update_bits(qproc->state, | |
573 | BIT(qproc->stop_bit), BIT(qproc->stop_bit)); | |
574 | ||
575 | ret = wait_for_completion_timeout(&qproc->stop_done, | |
576 | msecs_to_jiffies(5000)); | |
577 | if (ret == 0) | |
578 | dev_err(qproc->dev, "timed out on wait\n"); | |
579 | ||
580 | qcom_smem_state_update_bits(qproc->state, BIT(qproc->stop_bit), 0); | |
581 | ||
582 | q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); | |
583 | q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); | |
584 | q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); | |
585 | ||
586 | reset_control_assert(qproc->mss_restart); | |
587 | clk_disable_unprepare(qproc->rom_clk); | |
588 | clk_disable_unprepare(qproc->axi_clk); | |
589 | clk_disable_unprepare(qproc->ahb_clk); | |
590 | q6v5_regulator_disable(qproc); | |
591 | ||
592 | return 0; | |
593 | } | |
594 | ||
595 | static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len) | |
596 | { | |
597 | struct q6v5 *qproc = rproc->priv; | |
598 | int offset; | |
599 | ||
600 | offset = da - qproc->mpss_reloc; | |
601 | if (offset < 0 || offset + len > qproc->mpss_size) | |
602 | return NULL; | |
603 | ||
604 | return qproc->mpss_region + offset; | |
605 | } | |
606 | ||
607 | static const struct rproc_ops q6v5_ops = { | |
608 | .start = q6v5_start, | |
609 | .stop = q6v5_stop, | |
610 | .da_to_va = q6v5_da_to_va, | |
611 | }; | |
612 | ||
613 | static irqreturn_t q6v5_wdog_interrupt(int irq, void *dev) | |
614 | { | |
615 | struct q6v5 *qproc = dev; | |
616 | size_t len; | |
617 | char *msg; | |
618 | ||
619 | /* Sometimes the stop triggers a watchdog rather than a stop-ack */ | |
620 | if (!qproc->running) { | |
621 | complete(&qproc->stop_done); | |
622 | return IRQ_HANDLED; | |
623 | } | |
624 | ||
625 | msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len); | |
626 | if (!IS_ERR(msg) && len > 0 && msg[0]) | |
627 | dev_err(qproc->dev, "watchdog received: %s\n", msg); | |
628 | else | |
629 | dev_err(qproc->dev, "watchdog without message\n"); | |
630 | ||
631 | rproc_report_crash(qproc->rproc, RPROC_WATCHDOG); | |
632 | ||
633 | if (!IS_ERR(msg)) | |
634 | msg[0] = '\0'; | |
635 | ||
636 | return IRQ_HANDLED; | |
637 | } | |
638 | ||
639 | static irqreturn_t q6v5_fatal_interrupt(int irq, void *dev) | |
640 | { | |
641 | struct q6v5 *qproc = dev; | |
642 | size_t len; | |
643 | char *msg; | |
644 | ||
645 | msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len); | |
646 | if (!IS_ERR(msg) && len > 0 && msg[0]) | |
647 | dev_err(qproc->dev, "fatal error received: %s\n", msg); | |
648 | else | |
649 | dev_err(qproc->dev, "fatal error without message\n"); | |
650 | ||
651 | rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR); | |
652 | ||
653 | if (!IS_ERR(msg)) | |
654 | msg[0] = '\0'; | |
655 | ||
656 | return IRQ_HANDLED; | |
657 | } | |
658 | ||
659 | static irqreturn_t q6v5_handover_interrupt(int irq, void *dev) | |
660 | { | |
661 | struct q6v5 *qproc = dev; | |
662 | ||
663 | complete(&qproc->start_done); | |
664 | return IRQ_HANDLED; | |
665 | } | |
666 | ||
667 | static irqreturn_t q6v5_stop_ack_interrupt(int irq, void *dev) | |
668 | { | |
669 | struct q6v5 *qproc = dev; | |
670 | ||
671 | complete(&qproc->stop_done); | |
672 | return IRQ_HANDLED; | |
673 | } | |
674 | ||
675 | static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) | |
676 | { | |
677 | struct of_phandle_args args; | |
678 | struct resource *res; | |
679 | int ret; | |
680 | ||
681 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); | |
682 | qproc->reg_base = devm_ioremap_resource(&pdev->dev, res); | |
683 | if (IS_ERR(qproc->reg_base)) { | |
684 | dev_err(qproc->dev, "failed to get qdsp6_base\n"); | |
685 | return PTR_ERR(qproc->reg_base); | |
686 | } | |
687 | ||
688 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); | |
689 | qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res); | |
690 | if (IS_ERR(qproc->rmb_base)) { | |
691 | dev_err(qproc->dev, "failed to get rmb_base\n"); | |
692 | return PTR_ERR(qproc->rmb_base); | |
693 | } | |
694 | ||
695 | ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, | |
696 | "qcom,halt-regs", 3, 0, &args); | |
697 | if (ret < 0) { | |
698 | dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); | |
699 | return -EINVAL; | |
700 | } | |
701 | ||
702 | qproc->halt_map = syscon_node_to_regmap(args.np); | |
703 | of_node_put(args.np); | |
704 | if (IS_ERR(qproc->halt_map)) | |
705 | return PTR_ERR(qproc->halt_map); | |
706 | ||
707 | qproc->halt_q6 = args.args[0]; | |
708 | qproc->halt_modem = args.args[1]; | |
709 | qproc->halt_nc = args.args[2]; | |
710 | ||
711 | return 0; | |
712 | } | |
713 | ||
714 | static int q6v5_init_clocks(struct q6v5 *qproc) | |
715 | { | |
716 | qproc->ahb_clk = devm_clk_get(qproc->dev, "iface"); | |
717 | if (IS_ERR(qproc->ahb_clk)) { | |
718 | dev_err(qproc->dev, "failed to get iface clock\n"); | |
719 | return PTR_ERR(qproc->ahb_clk); | |
720 | } | |
721 | ||
722 | qproc->axi_clk = devm_clk_get(qproc->dev, "bus"); | |
723 | if (IS_ERR(qproc->axi_clk)) { | |
724 | dev_err(qproc->dev, "failed to get bus clock\n"); | |
725 | return PTR_ERR(qproc->axi_clk); | |
726 | } | |
727 | ||
728 | qproc->rom_clk = devm_clk_get(qproc->dev, "mem"); | |
729 | if (IS_ERR(qproc->rom_clk)) { | |
730 | dev_err(qproc->dev, "failed to get mem clock\n"); | |
731 | return PTR_ERR(qproc->rom_clk); | |
732 | } | |
733 | ||
734 | return 0; | |
735 | } | |
736 | ||
737 | static int q6v5_init_reset(struct q6v5 *qproc) | |
738 | { | |
739 | qproc->mss_restart = devm_reset_control_get(qproc->dev, NULL); | |
740 | if (IS_ERR(qproc->mss_restart)) { | |
741 | dev_err(qproc->dev, "failed to acquire mss restart\n"); | |
742 | return PTR_ERR(qproc->mss_restart); | |
743 | } | |
744 | ||
745 | return 0; | |
746 | } | |
747 | ||
748 | static int q6v5_request_irq(struct q6v5 *qproc, | |
749 | struct platform_device *pdev, | |
750 | const char *name, | |
751 | irq_handler_t thread_fn) | |
752 | { | |
753 | int ret; | |
754 | ||
755 | ret = platform_get_irq_byname(pdev, name); | |
756 | if (ret < 0) { | |
757 | dev_err(&pdev->dev, "no %s IRQ defined\n", name); | |
758 | return ret; | |
759 | } | |
760 | ||
761 | ret = devm_request_threaded_irq(&pdev->dev, ret, | |
762 | NULL, thread_fn, | |
763 | IRQF_TRIGGER_RISING | IRQF_ONESHOT, | |
764 | "q6v5", qproc); | |
765 | if (ret) | |
766 | dev_err(&pdev->dev, "request %s IRQ failed\n", name); | |
767 | ||
768 | return ret; | |
769 | } | |
770 | ||
771 | static int q6v5_alloc_memory_region(struct q6v5 *qproc) | |
772 | { | |
773 | struct device_node *child; | |
774 | struct device_node *node; | |
775 | struct resource r; | |
776 | int ret; | |
777 | ||
778 | child = of_get_child_by_name(qproc->dev->of_node, "mba"); | |
779 | node = of_parse_phandle(child, "memory-region", 0); | |
780 | ret = of_address_to_resource(node, 0, &r); | |
781 | if (ret) { | |
782 | dev_err(qproc->dev, "unable to resolve mba region\n"); | |
783 | return ret; | |
784 | } | |
785 | ||
786 | qproc->mba_phys = r.start; | |
787 | qproc->mba_size = resource_size(&r); | |
788 | qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size); | |
789 | if (!qproc->mba_region) { | |
790 | dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", | |
791 | &r.start, qproc->mba_size); | |
792 | return -EBUSY; | |
793 | } | |
794 | ||
795 | child = of_get_child_by_name(qproc->dev->of_node, "mpss"); | |
796 | node = of_parse_phandle(child, "memory-region", 0); | |
797 | ret = of_address_to_resource(node, 0, &r); | |
798 | if (ret) { | |
799 | dev_err(qproc->dev, "unable to resolve mpss region\n"); | |
800 | return ret; | |
801 | } | |
802 | ||
803 | qproc->mpss_phys = qproc->mpss_reloc = r.start; | |
804 | qproc->mpss_size = resource_size(&r); | |
805 | qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size); | |
806 | if (!qproc->mpss_region) { | |
807 | dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", | |
808 | &r.start, qproc->mpss_size); | |
809 | return -EBUSY; | |
810 | } | |
811 | ||
812 | return 0; | |
813 | } | |
814 | ||
815 | static int q6v5_probe(struct platform_device *pdev) | |
816 | { | |
817 | struct q6v5 *qproc; | |
818 | struct rproc *rproc; | |
819 | int ret; | |
820 | ||
821 | rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, | |
822 | MBA_FIRMWARE_NAME, sizeof(*qproc)); | |
823 | if (!rproc) { | |
824 | dev_err(&pdev->dev, "failed to allocate rproc\n"); | |
825 | return -ENOMEM; | |
826 | } | |
827 | ||
828 | rproc->fw_ops = &q6v5_fw_ops; | |
829 | ||
830 | qproc = (struct q6v5 *)rproc->priv; | |
831 | qproc->dev = &pdev->dev; | |
832 | qproc->rproc = rproc; | |
833 | platform_set_drvdata(pdev, qproc); | |
834 | ||
835 | init_completion(&qproc->start_done); | |
836 | init_completion(&qproc->stop_done); | |
837 | ||
838 | ret = q6v5_init_mem(qproc, pdev); | |
839 | if (ret) | |
840 | goto free_rproc; | |
841 | ||
842 | ret = q6v5_alloc_memory_region(qproc); | |
843 | if (ret) | |
844 | goto free_rproc; | |
845 | ||
846 | ret = q6v5_init_clocks(qproc); | |
847 | if (ret) | |
848 | goto free_rproc; | |
849 | ||
850 | ret = q6v5_regulator_init(qproc); | |
851 | if (ret) | |
852 | goto free_rproc; | |
853 | ||
854 | ret = q6v5_init_reset(qproc); | |
855 | if (ret) | |
856 | goto free_rproc; | |
857 | ||
858 | ret = q6v5_request_irq(qproc, pdev, "wdog", q6v5_wdog_interrupt); | |
859 | if (ret < 0) | |
860 | goto free_rproc; | |
861 | ||
862 | ret = q6v5_request_irq(qproc, pdev, "fatal", q6v5_fatal_interrupt); | |
863 | if (ret < 0) | |
864 | goto free_rproc; | |
865 | ||
866 | ret = q6v5_request_irq(qproc, pdev, "handover", q6v5_handover_interrupt); | |
867 | if (ret < 0) | |
868 | goto free_rproc; | |
869 | ||
870 | ret = q6v5_request_irq(qproc, pdev, "stop-ack", q6v5_stop_ack_interrupt); | |
871 | if (ret < 0) | |
872 | goto free_rproc; | |
873 | ||
874 | qproc->state = qcom_smem_state_get(&pdev->dev, "stop", &qproc->stop_bit); | |
875 | if (IS_ERR(qproc->state)) | |
876 | goto free_rproc; | |
877 | ||
878 | ret = rproc_add(rproc); | |
879 | if (ret) | |
880 | goto free_rproc; | |
881 | ||
882 | return 0; | |
883 | ||
884 | free_rproc: | |
885 | rproc_put(rproc); | |
886 | ||
887 | return ret; | |
888 | } | |
889 | ||
890 | static int q6v5_remove(struct platform_device *pdev) | |
891 | { | |
892 | struct q6v5 *qproc = platform_get_drvdata(pdev); | |
893 | ||
894 | rproc_del(qproc->rproc); | |
895 | rproc_put(qproc->rproc); | |
896 | ||
897 | return 0; | |
898 | } | |
899 | ||
900 | static const struct of_device_id q6v5_of_match[] = { | |
901 | { .compatible = "qcom,q6v5-pil", }, | |
902 | { }, | |
903 | }; | |
904 | ||
905 | static struct platform_driver q6v5_driver = { | |
906 | .probe = q6v5_probe, | |
907 | .remove = q6v5_remove, | |
908 | .driver = { | |
909 | .name = "qcom-q6v5-pil", | |
910 | .of_match_table = q6v5_of_match, | |
911 | }, | |
912 | }; | |
913 | module_platform_driver(q6v5_driver); | |
914 | ||
915 | MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon"); | |
916 | MODULE_LICENSE("GPL v2"); |