Merge branch 'stable/for-linus-4.5' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / hwtracing / coresight / coresight-etm3x.c
1 /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/device.h>
18 #include <linux/io.h>
19 #include <linux/err.h>
20 #include <linux/fs.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/smp.h>
24 #include <linux/sysfs.h>
25 #include <linux/stat.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/cpu.h>
28 #include <linux/of.h>
29 #include <linux/coresight.h>
30 #include <linux/amba/bus.h>
31 #include <linux/seq_file.h>
32 #include <linux/uaccess.h>
33 #include <linux/clk.h>
34 #include <asm/sections.h>
35
36 #include "coresight-etm.h"
37
38 static int boot_enable;
39 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
40
41 /* The number of ETM/PTM currently registered */
42 static int etm_count;
43 static struct etm_drvdata *etmdrvdata[NR_CPUS];
44
45 static inline void etm_writel(struct etm_drvdata *drvdata,
46 u32 val, u32 off)
47 {
48 if (drvdata->use_cp14) {
49 if (etm_writel_cp14(off, val)) {
50 dev_err(drvdata->dev,
51 "invalid CP14 access to ETM reg: %#x", off);
52 }
53 } else {
54 writel_relaxed(val, drvdata->base + off);
55 }
56 }
57
58 static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
59 {
60 u32 val;
61
62 if (drvdata->use_cp14) {
63 if (etm_readl_cp14(off, &val)) {
64 dev_err(drvdata->dev,
65 "invalid CP14 access to ETM reg: %#x", off);
66 }
67 } else {
68 val = readl_relaxed(drvdata->base + off);
69 }
70
71 return val;
72 }
73
74 /*
75 * Memory mapped writes to clear os lock are not supported on some processors
76 * and OS lock must be unlocked before any memory mapped access on such
77 * processors, otherwise memory mapped reads/writes will be invalid.
78 */
79 static void etm_os_unlock(void *info)
80 {
81 struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
82 /* Writing any value to ETMOSLAR unlocks the trace registers */
83 etm_writel(drvdata, 0x0, ETMOSLAR);
84 isb();
85 }
86
87 static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
88 {
89 u32 etmcr;
90
91 /* Ensure pending cp14 accesses complete before setting pwrdwn */
92 mb();
93 isb();
94 etmcr = etm_readl(drvdata, ETMCR);
95 etmcr |= ETMCR_PWD_DWN;
96 etm_writel(drvdata, etmcr, ETMCR);
97 }
98
99 static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
100 {
101 u32 etmcr;
102
103 etmcr = etm_readl(drvdata, ETMCR);
104 etmcr &= ~ETMCR_PWD_DWN;
105 etm_writel(drvdata, etmcr, ETMCR);
106 /* Ensure pwrup completes before subsequent cp14 accesses */
107 mb();
108 isb();
109 }
110
111 static void etm_set_pwrup(struct etm_drvdata *drvdata)
112 {
113 u32 etmpdcr;
114
115 etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
116 etmpdcr |= ETMPDCR_PWD_UP;
117 writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
118 /* Ensure pwrup completes before subsequent cp14 accesses */
119 mb();
120 isb();
121 }
122
123 static void etm_clr_pwrup(struct etm_drvdata *drvdata)
124 {
125 u32 etmpdcr;
126
127 /* Ensure pending cp14 accesses complete before clearing pwrup */
128 mb();
129 isb();
130 etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
131 etmpdcr &= ~ETMPDCR_PWD_UP;
132 writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
133 }
134
135 /**
136 * coresight_timeout_etm - loop until a bit has changed to a specific state.
137 * @drvdata: etm's private data structure.
138 * @offset: address of a register, starting from @addr.
139 * @position: the position of the bit of interest.
140 * @value: the value the bit should have.
141 *
142 * Basically the same as @coresight_timeout except for the register access
143 * method where we have to account for CP14 configurations.
144
145 * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
146 * TIMEOUT_US has elapsed, which ever happens first.
147 */
148
149 static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
150 int position, int value)
151 {
152 int i;
153 u32 val;
154
155 for (i = TIMEOUT_US; i > 0; i--) {
156 val = etm_readl(drvdata, offset);
157 /* Waiting on the bit to go from 0 to 1 */
158 if (value) {
159 if (val & BIT(position))
160 return 0;
161 /* Waiting on the bit to go from 1 to 0 */
162 } else {
163 if (!(val & BIT(position)))
164 return 0;
165 }
166
167 /*
168 * Delay is arbitrary - the specification doesn't say how long
169 * we are expected to wait. Extra check required to make sure
170 * we don't wait needlessly on the last iteration.
171 */
172 if (i - 1)
173 udelay(1);
174 }
175
176 return -EAGAIN;
177 }
178
179
180 static void etm_set_prog(struct etm_drvdata *drvdata)
181 {
182 u32 etmcr;
183
184 etmcr = etm_readl(drvdata, ETMCR);
185 etmcr |= ETMCR_ETM_PRG;
186 etm_writel(drvdata, etmcr, ETMCR);
187 /*
188 * Recommended by spec for cp14 accesses to ensure etmcr write is
189 * complete before polling etmsr
190 */
191 isb();
192 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
193 dev_err(drvdata->dev,
194 "%s: timeout observed when probing at offset %#x\n",
195 __func__, ETMSR);
196 }
197 }
198
199 static void etm_clr_prog(struct etm_drvdata *drvdata)
200 {
201 u32 etmcr;
202
203 etmcr = etm_readl(drvdata, ETMCR);
204 etmcr &= ~ETMCR_ETM_PRG;
205 etm_writel(drvdata, etmcr, ETMCR);
206 /*
207 * Recommended by spec for cp14 accesses to ensure etmcr write is
208 * complete before polling etmsr
209 */
210 isb();
211 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
212 dev_err(drvdata->dev,
213 "%s: timeout observed when probing at offset %#x\n",
214 __func__, ETMSR);
215 }
216 }
217
218 static void etm_set_default(struct etm_drvdata *drvdata)
219 {
220 int i;
221
222 drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
223 drvdata->enable_event = ETM_HARD_WIRE_RES_A;
224
225 drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
226 drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
227 drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
228 drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
229 drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
230 drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
231 drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
232
233 for (i = 0; i < drvdata->nr_cntr; i++) {
234 drvdata->cntr_rld_val[i] = 0x0;
235 drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
236 drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
237 drvdata->cntr_val[i] = 0x0;
238 }
239
240 drvdata->seq_curr_state = 0x0;
241 drvdata->ctxid_idx = 0x0;
242 for (i = 0; i < drvdata->nr_ctxid_cmp; i++) {
243 drvdata->ctxid_pid[i] = 0x0;
244 drvdata->ctxid_vpid[i] = 0x0;
245 }
246
247 drvdata->ctxid_mask = 0x0;
248 }
249
250 static void etm_enable_hw(void *info)
251 {
252 int i;
253 u32 etmcr;
254 struct etm_drvdata *drvdata = info;
255
256 CS_UNLOCK(drvdata->base);
257
258 /* Turn engine on */
259 etm_clr_pwrdwn(drvdata);
260 /* Apply power to trace registers */
261 etm_set_pwrup(drvdata);
262 /* Make sure all registers are accessible */
263 etm_os_unlock(drvdata);
264
265 etm_set_prog(drvdata);
266
267 etmcr = etm_readl(drvdata, ETMCR);
268 etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
269 etmcr |= drvdata->port_size;
270 etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
271 etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
272 etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
273 etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
274 etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
275 etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
276 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
277 etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
278 etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
279 }
280 for (i = 0; i < drvdata->nr_cntr; i++) {
281 etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
282 etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
283 etm_writel(drvdata, drvdata->cntr_rld_event[i],
284 ETMCNTRLDEVRn(i));
285 etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
286 }
287 etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
288 etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
289 etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
290 etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
291 etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
292 etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
293 etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
294 for (i = 0; i < drvdata->nr_ext_out; i++)
295 etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
296 for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
297 etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i));
298 etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
299 etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
300 /* No external input selected */
301 etm_writel(drvdata, 0x0, ETMEXTINSELR);
302 etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
303 /* No auxiliary control selected */
304 etm_writel(drvdata, 0x0, ETMAUXCR);
305 etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
306 /* No VMID comparator value selected */
307 etm_writel(drvdata, 0x0, ETMVMIDCVR);
308
309 /* Ensures trace output is enabled from this ETM */
310 etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
311
312 etm_clr_prog(drvdata);
313 CS_LOCK(drvdata->base);
314
315 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
316 }
317
318 static int etm_trace_id(struct coresight_device *csdev)
319 {
320 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
321 unsigned long flags;
322 int trace_id = -1;
323
324 if (!drvdata->enable)
325 return drvdata->traceid;
326 pm_runtime_get_sync(csdev->dev.parent);
327
328 spin_lock_irqsave(&drvdata->spinlock, flags);
329
330 CS_UNLOCK(drvdata->base);
331 trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
332 CS_LOCK(drvdata->base);
333
334 spin_unlock_irqrestore(&drvdata->spinlock, flags);
335 pm_runtime_put(csdev->dev.parent);
336
337 return trace_id;
338 }
339
340 static int etm_enable(struct coresight_device *csdev)
341 {
342 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
343 int ret;
344
345 pm_runtime_get_sync(csdev->dev.parent);
346 spin_lock(&drvdata->spinlock);
347
348 /*
349 * Configure the ETM only if the CPU is online. If it isn't online
350 * hw configuration will take place when 'CPU_STARTING' is received
351 * in @etm_cpu_callback.
352 */
353 if (cpu_online(drvdata->cpu)) {
354 ret = smp_call_function_single(drvdata->cpu,
355 etm_enable_hw, drvdata, 1);
356 if (ret)
357 goto err;
358 }
359
360 drvdata->enable = true;
361 drvdata->sticky_enable = true;
362
363 spin_unlock(&drvdata->spinlock);
364
365 dev_info(drvdata->dev, "ETM tracing enabled\n");
366 return 0;
367 err:
368 spin_unlock(&drvdata->spinlock);
369 pm_runtime_put(csdev->dev.parent);
370 return ret;
371 }
372
373 static void etm_disable_hw(void *info)
374 {
375 int i;
376 struct etm_drvdata *drvdata = info;
377
378 CS_UNLOCK(drvdata->base);
379 etm_set_prog(drvdata);
380
381 /* Program trace enable to low by using always false event */
382 etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
383
384 /* Read back sequencer and counters for post trace analysis */
385 drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
386
387 for (i = 0; i < drvdata->nr_cntr; i++)
388 drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
389
390 etm_set_pwrdwn(drvdata);
391 CS_LOCK(drvdata->base);
392
393 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
394 }
395
396 static void etm_disable(struct coresight_device *csdev)
397 {
398 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
399
400 /*
401 * Taking hotplug lock here protects from clocks getting disabled
402 * with tracing being left on (crash scenario) if user disable occurs
403 * after cpu online mask indicates the cpu is offline but before the
404 * DYING hotplug callback is serviced by the ETM driver.
405 */
406 get_online_cpus();
407 spin_lock(&drvdata->spinlock);
408
409 /*
410 * Executing etm_disable_hw on the cpu whose ETM is being disabled
411 * ensures that register writes occur when cpu is powered.
412 */
413 smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
414 drvdata->enable = false;
415
416 spin_unlock(&drvdata->spinlock);
417 put_online_cpus();
418 pm_runtime_put(csdev->dev.parent);
419
420 dev_info(drvdata->dev, "ETM tracing disabled\n");
421 }
422
423 static const struct coresight_ops_source etm_source_ops = {
424 .trace_id = etm_trace_id,
425 .enable = etm_enable,
426 .disable = etm_disable,
427 };
428
429 static const struct coresight_ops etm_cs_ops = {
430 .source_ops = &etm_source_ops,
431 };
432
433 static ssize_t nr_addr_cmp_show(struct device *dev,
434 struct device_attribute *attr, char *buf)
435 {
436 unsigned long val;
437 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
438
439 val = drvdata->nr_addr_cmp;
440 return sprintf(buf, "%#lx\n", val);
441 }
442 static DEVICE_ATTR_RO(nr_addr_cmp);
443
444 static ssize_t nr_cntr_show(struct device *dev,
445 struct device_attribute *attr, char *buf)
446 { unsigned long val;
447 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
448
449 val = drvdata->nr_cntr;
450 return sprintf(buf, "%#lx\n", val);
451 }
452 static DEVICE_ATTR_RO(nr_cntr);
453
454 static ssize_t nr_ctxid_cmp_show(struct device *dev,
455 struct device_attribute *attr, char *buf)
456 {
457 unsigned long val;
458 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
459
460 val = drvdata->nr_ctxid_cmp;
461 return sprintf(buf, "%#lx\n", val);
462 }
463 static DEVICE_ATTR_RO(nr_ctxid_cmp);
464
465 static ssize_t etmsr_show(struct device *dev,
466 struct device_attribute *attr, char *buf)
467 {
468 unsigned long flags, val;
469 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
470
471 pm_runtime_get_sync(drvdata->dev);
472 spin_lock_irqsave(&drvdata->spinlock, flags);
473 CS_UNLOCK(drvdata->base);
474
475 val = etm_readl(drvdata, ETMSR);
476
477 CS_LOCK(drvdata->base);
478 spin_unlock_irqrestore(&drvdata->spinlock, flags);
479 pm_runtime_put(drvdata->dev);
480
481 return sprintf(buf, "%#lx\n", val);
482 }
483 static DEVICE_ATTR_RO(etmsr);
484
485 static ssize_t reset_store(struct device *dev,
486 struct device_attribute *attr,
487 const char *buf, size_t size)
488 {
489 int i, ret;
490 unsigned long val;
491 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
492
493 ret = kstrtoul(buf, 16, &val);
494 if (ret)
495 return ret;
496
497 if (val) {
498 spin_lock(&drvdata->spinlock);
499 drvdata->mode = ETM_MODE_EXCLUDE;
500 drvdata->ctrl = 0x0;
501 drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
502 drvdata->startstop_ctrl = 0x0;
503 drvdata->addr_idx = 0x0;
504 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
505 drvdata->addr_val[i] = 0x0;
506 drvdata->addr_acctype[i] = 0x0;
507 drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
508 }
509 drvdata->cntr_idx = 0x0;
510
511 etm_set_default(drvdata);
512 spin_unlock(&drvdata->spinlock);
513 }
514
515 return size;
516 }
517 static DEVICE_ATTR_WO(reset);
518
519 static ssize_t mode_show(struct device *dev,
520 struct device_attribute *attr, char *buf)
521 {
522 unsigned long val;
523 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
524
525 val = drvdata->mode;
526 return sprintf(buf, "%#lx\n", val);
527 }
528
529 static ssize_t mode_store(struct device *dev,
530 struct device_attribute *attr,
531 const char *buf, size_t size)
532 {
533 int ret;
534 unsigned long val;
535 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
536
537 ret = kstrtoul(buf, 16, &val);
538 if (ret)
539 return ret;
540
541 spin_lock(&drvdata->spinlock);
542 drvdata->mode = val & ETM_MODE_ALL;
543
544 if (drvdata->mode & ETM_MODE_EXCLUDE)
545 drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
546 else
547 drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
548
549 if (drvdata->mode & ETM_MODE_CYCACC)
550 drvdata->ctrl |= ETMCR_CYC_ACC;
551 else
552 drvdata->ctrl &= ~ETMCR_CYC_ACC;
553
554 if (drvdata->mode & ETM_MODE_STALL) {
555 if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
556 dev_warn(drvdata->dev, "stall mode not supported\n");
557 ret = -EINVAL;
558 goto err_unlock;
559 }
560 drvdata->ctrl |= ETMCR_STALL_MODE;
561 } else
562 drvdata->ctrl &= ~ETMCR_STALL_MODE;
563
564 if (drvdata->mode & ETM_MODE_TIMESTAMP) {
565 if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
566 dev_warn(drvdata->dev, "timestamp not supported\n");
567 ret = -EINVAL;
568 goto err_unlock;
569 }
570 drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
571 } else
572 drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
573
574 if (drvdata->mode & ETM_MODE_CTXID)
575 drvdata->ctrl |= ETMCR_CTXID_SIZE;
576 else
577 drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
578 spin_unlock(&drvdata->spinlock);
579
580 return size;
581
582 err_unlock:
583 spin_unlock(&drvdata->spinlock);
584 return ret;
585 }
586 static DEVICE_ATTR_RW(mode);
587
588 static ssize_t trigger_event_show(struct device *dev,
589 struct device_attribute *attr, char *buf)
590 {
591 unsigned long val;
592 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
593
594 val = drvdata->trigger_event;
595 return sprintf(buf, "%#lx\n", val);
596 }
597
598 static ssize_t trigger_event_store(struct device *dev,
599 struct device_attribute *attr,
600 const char *buf, size_t size)
601 {
602 int ret;
603 unsigned long val;
604 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
605
606 ret = kstrtoul(buf, 16, &val);
607 if (ret)
608 return ret;
609
610 drvdata->trigger_event = val & ETM_EVENT_MASK;
611
612 return size;
613 }
614 static DEVICE_ATTR_RW(trigger_event);
615
616 static ssize_t enable_event_show(struct device *dev,
617 struct device_attribute *attr, char *buf)
618 {
619 unsigned long val;
620 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
621
622 val = drvdata->enable_event;
623 return sprintf(buf, "%#lx\n", val);
624 }
625
626 static ssize_t enable_event_store(struct device *dev,
627 struct device_attribute *attr,
628 const char *buf, size_t size)
629 {
630 int ret;
631 unsigned long val;
632 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
633
634 ret = kstrtoul(buf, 16, &val);
635 if (ret)
636 return ret;
637
638 drvdata->enable_event = val & ETM_EVENT_MASK;
639
640 return size;
641 }
642 static DEVICE_ATTR_RW(enable_event);
643
644 static ssize_t fifofull_level_show(struct device *dev,
645 struct device_attribute *attr, char *buf)
646 {
647 unsigned long val;
648 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
649
650 val = drvdata->fifofull_level;
651 return sprintf(buf, "%#lx\n", val);
652 }
653
654 static ssize_t fifofull_level_store(struct device *dev,
655 struct device_attribute *attr,
656 const char *buf, size_t size)
657 {
658 int ret;
659 unsigned long val;
660 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
661
662 ret = kstrtoul(buf, 16, &val);
663 if (ret)
664 return ret;
665
666 drvdata->fifofull_level = val;
667
668 return size;
669 }
670 static DEVICE_ATTR_RW(fifofull_level);
671
672 static ssize_t addr_idx_show(struct device *dev,
673 struct device_attribute *attr, char *buf)
674 {
675 unsigned long val;
676 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
677
678 val = drvdata->addr_idx;
679 return sprintf(buf, "%#lx\n", val);
680 }
681
682 static ssize_t addr_idx_store(struct device *dev,
683 struct device_attribute *attr,
684 const char *buf, size_t size)
685 {
686 int ret;
687 unsigned long val;
688 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
689
690 ret = kstrtoul(buf, 16, &val);
691 if (ret)
692 return ret;
693
694 if (val >= drvdata->nr_addr_cmp)
695 return -EINVAL;
696
697 /*
698 * Use spinlock to ensure index doesn't change while it gets
699 * dereferenced multiple times within a spinlock block elsewhere.
700 */
701 spin_lock(&drvdata->spinlock);
702 drvdata->addr_idx = val;
703 spin_unlock(&drvdata->spinlock);
704
705 return size;
706 }
707 static DEVICE_ATTR_RW(addr_idx);
708
709 static ssize_t addr_single_show(struct device *dev,
710 struct device_attribute *attr, char *buf)
711 {
712 u8 idx;
713 unsigned long val;
714 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
715
716 spin_lock(&drvdata->spinlock);
717 idx = drvdata->addr_idx;
718 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
719 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
720 spin_unlock(&drvdata->spinlock);
721 return -EINVAL;
722 }
723
724 val = drvdata->addr_val[idx];
725 spin_unlock(&drvdata->spinlock);
726
727 return sprintf(buf, "%#lx\n", val);
728 }
729
730 static ssize_t addr_single_store(struct device *dev,
731 struct device_attribute *attr,
732 const char *buf, size_t size)
733 {
734 u8 idx;
735 int ret;
736 unsigned long val;
737 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
738
739 ret = kstrtoul(buf, 16, &val);
740 if (ret)
741 return ret;
742
743 spin_lock(&drvdata->spinlock);
744 idx = drvdata->addr_idx;
745 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
746 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
747 spin_unlock(&drvdata->spinlock);
748 return -EINVAL;
749 }
750
751 drvdata->addr_val[idx] = val;
752 drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
753 spin_unlock(&drvdata->spinlock);
754
755 return size;
756 }
757 static DEVICE_ATTR_RW(addr_single);
758
759 static ssize_t addr_range_show(struct device *dev,
760 struct device_attribute *attr, char *buf)
761 {
762 u8 idx;
763 unsigned long val1, val2;
764 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
765
766 spin_lock(&drvdata->spinlock);
767 idx = drvdata->addr_idx;
768 if (idx % 2 != 0) {
769 spin_unlock(&drvdata->spinlock);
770 return -EPERM;
771 }
772 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
773 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
774 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
775 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
776 spin_unlock(&drvdata->spinlock);
777 return -EPERM;
778 }
779
780 val1 = drvdata->addr_val[idx];
781 val2 = drvdata->addr_val[idx + 1];
782 spin_unlock(&drvdata->spinlock);
783
784 return sprintf(buf, "%#lx %#lx\n", val1, val2);
785 }
786
787 static ssize_t addr_range_store(struct device *dev,
788 struct device_attribute *attr,
789 const char *buf, size_t size)
790 {
791 u8 idx;
792 unsigned long val1, val2;
793 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
794
795 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
796 return -EINVAL;
797 /* Lower address comparator cannot have a higher address value */
798 if (val1 > val2)
799 return -EINVAL;
800
801 spin_lock(&drvdata->spinlock);
802 idx = drvdata->addr_idx;
803 if (idx % 2 != 0) {
804 spin_unlock(&drvdata->spinlock);
805 return -EPERM;
806 }
807 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
808 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
809 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
810 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
811 spin_unlock(&drvdata->spinlock);
812 return -EPERM;
813 }
814
815 drvdata->addr_val[idx] = val1;
816 drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
817 drvdata->addr_val[idx + 1] = val2;
818 drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
819 drvdata->enable_ctrl1 |= (1 << (idx/2));
820 spin_unlock(&drvdata->spinlock);
821
822 return size;
823 }
824 static DEVICE_ATTR_RW(addr_range);
825
826 static ssize_t addr_start_show(struct device *dev,
827 struct device_attribute *attr, char *buf)
828 {
829 u8 idx;
830 unsigned long val;
831 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
832
833 spin_lock(&drvdata->spinlock);
834 idx = drvdata->addr_idx;
835 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
836 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
837 spin_unlock(&drvdata->spinlock);
838 return -EPERM;
839 }
840
841 val = drvdata->addr_val[idx];
842 spin_unlock(&drvdata->spinlock);
843
844 return sprintf(buf, "%#lx\n", val);
845 }
846
847 static ssize_t addr_start_store(struct device *dev,
848 struct device_attribute *attr,
849 const char *buf, size_t size)
850 {
851 u8 idx;
852 int ret;
853 unsigned long val;
854 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
855
856 ret = kstrtoul(buf, 16, &val);
857 if (ret)
858 return ret;
859
860 spin_lock(&drvdata->spinlock);
861 idx = drvdata->addr_idx;
862 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
863 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
864 spin_unlock(&drvdata->spinlock);
865 return -EPERM;
866 }
867
868 drvdata->addr_val[idx] = val;
869 drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
870 drvdata->startstop_ctrl |= (1 << idx);
871 drvdata->enable_ctrl1 |= BIT(25);
872 spin_unlock(&drvdata->spinlock);
873
874 return size;
875 }
876 static DEVICE_ATTR_RW(addr_start);
877
878 static ssize_t addr_stop_show(struct device *dev,
879 struct device_attribute *attr, char *buf)
880 {
881 u8 idx;
882 unsigned long val;
883 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
884
885 spin_lock(&drvdata->spinlock);
886 idx = drvdata->addr_idx;
887 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
888 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
889 spin_unlock(&drvdata->spinlock);
890 return -EPERM;
891 }
892
893 val = drvdata->addr_val[idx];
894 spin_unlock(&drvdata->spinlock);
895
896 return sprintf(buf, "%#lx\n", val);
897 }
898
899 static ssize_t addr_stop_store(struct device *dev,
900 struct device_attribute *attr,
901 const char *buf, size_t size)
902 {
903 u8 idx;
904 int ret;
905 unsigned long val;
906 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
907
908 ret = kstrtoul(buf, 16, &val);
909 if (ret)
910 return ret;
911
912 spin_lock(&drvdata->spinlock);
913 idx = drvdata->addr_idx;
914 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
915 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
916 spin_unlock(&drvdata->spinlock);
917 return -EPERM;
918 }
919
920 drvdata->addr_val[idx] = val;
921 drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
922 drvdata->startstop_ctrl |= (1 << (idx + 16));
923 drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
924 spin_unlock(&drvdata->spinlock);
925
926 return size;
927 }
928 static DEVICE_ATTR_RW(addr_stop);
929
930 static ssize_t addr_acctype_show(struct device *dev,
931 struct device_attribute *attr, char *buf)
932 {
933 unsigned long val;
934 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
935
936 spin_lock(&drvdata->spinlock);
937 val = drvdata->addr_acctype[drvdata->addr_idx];
938 spin_unlock(&drvdata->spinlock);
939
940 return sprintf(buf, "%#lx\n", val);
941 }
942
943 static ssize_t addr_acctype_store(struct device *dev,
944 struct device_attribute *attr,
945 const char *buf, size_t size)
946 {
947 int ret;
948 unsigned long val;
949 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
950
951 ret = kstrtoul(buf, 16, &val);
952 if (ret)
953 return ret;
954
955 spin_lock(&drvdata->spinlock);
956 drvdata->addr_acctype[drvdata->addr_idx] = val;
957 spin_unlock(&drvdata->spinlock);
958
959 return size;
960 }
961 static DEVICE_ATTR_RW(addr_acctype);
962
963 static ssize_t cntr_idx_show(struct device *dev,
964 struct device_attribute *attr, char *buf)
965 {
966 unsigned long val;
967 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
968
969 val = drvdata->cntr_idx;
970 return sprintf(buf, "%#lx\n", val);
971 }
972
973 static ssize_t cntr_idx_store(struct device *dev,
974 struct device_attribute *attr,
975 const char *buf, size_t size)
976 {
977 int ret;
978 unsigned long val;
979 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
980
981 ret = kstrtoul(buf, 16, &val);
982 if (ret)
983 return ret;
984
985 if (val >= drvdata->nr_cntr)
986 return -EINVAL;
987 /*
988 * Use spinlock to ensure index doesn't change while it gets
989 * dereferenced multiple times within a spinlock block elsewhere.
990 */
991 spin_lock(&drvdata->spinlock);
992 drvdata->cntr_idx = val;
993 spin_unlock(&drvdata->spinlock);
994
995 return size;
996 }
997 static DEVICE_ATTR_RW(cntr_idx);
998
999 static ssize_t cntr_rld_val_show(struct device *dev,
1000 struct device_attribute *attr, char *buf)
1001 {
1002 unsigned long val;
1003 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1004
1005 spin_lock(&drvdata->spinlock);
1006 val = drvdata->cntr_rld_val[drvdata->cntr_idx];
1007 spin_unlock(&drvdata->spinlock);
1008
1009 return sprintf(buf, "%#lx\n", val);
1010 }
1011
1012 static ssize_t cntr_rld_val_store(struct device *dev,
1013 struct device_attribute *attr,
1014 const char *buf, size_t size)
1015 {
1016 int ret;
1017 unsigned long val;
1018 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1019
1020 ret = kstrtoul(buf, 16, &val);
1021 if (ret)
1022 return ret;
1023
1024 spin_lock(&drvdata->spinlock);
1025 drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
1026 spin_unlock(&drvdata->spinlock);
1027
1028 return size;
1029 }
1030 static DEVICE_ATTR_RW(cntr_rld_val);
1031
1032 static ssize_t cntr_event_show(struct device *dev,
1033 struct device_attribute *attr, char *buf)
1034 {
1035 unsigned long val;
1036 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1037
1038 spin_lock(&drvdata->spinlock);
1039 val = drvdata->cntr_event[drvdata->cntr_idx];
1040 spin_unlock(&drvdata->spinlock);
1041
1042 return sprintf(buf, "%#lx\n", val);
1043 }
1044
1045 static ssize_t cntr_event_store(struct device *dev,
1046 struct device_attribute *attr,
1047 const char *buf, size_t size)
1048 {
1049 int ret;
1050 unsigned long val;
1051 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1052
1053 ret = kstrtoul(buf, 16, &val);
1054 if (ret)
1055 return ret;
1056
1057 spin_lock(&drvdata->spinlock);
1058 drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
1059 spin_unlock(&drvdata->spinlock);
1060
1061 return size;
1062 }
1063 static DEVICE_ATTR_RW(cntr_event);
1064
1065 static ssize_t cntr_rld_event_show(struct device *dev,
1066 struct device_attribute *attr, char *buf)
1067 {
1068 unsigned long val;
1069 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1070
1071 spin_lock(&drvdata->spinlock);
1072 val = drvdata->cntr_rld_event[drvdata->cntr_idx];
1073 spin_unlock(&drvdata->spinlock);
1074
1075 return sprintf(buf, "%#lx\n", val);
1076 }
1077
1078 static ssize_t cntr_rld_event_store(struct device *dev,
1079 struct device_attribute *attr,
1080 const char *buf, size_t size)
1081 {
1082 int ret;
1083 unsigned long val;
1084 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1085
1086 ret = kstrtoul(buf, 16, &val);
1087 if (ret)
1088 return ret;
1089
1090 spin_lock(&drvdata->spinlock);
1091 drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
1092 spin_unlock(&drvdata->spinlock);
1093
1094 return size;
1095 }
1096 static DEVICE_ATTR_RW(cntr_rld_event);
1097
1098 static ssize_t cntr_val_show(struct device *dev,
1099 struct device_attribute *attr, char *buf)
1100 {
1101 int i, ret = 0;
1102 u32 val;
1103 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1104
1105 if (!drvdata->enable) {
1106 spin_lock(&drvdata->spinlock);
1107 for (i = 0; i < drvdata->nr_cntr; i++)
1108 ret += sprintf(buf, "counter %d: %x\n",
1109 i, drvdata->cntr_val[i]);
1110 spin_unlock(&drvdata->spinlock);
1111 return ret;
1112 }
1113
1114 for (i = 0; i < drvdata->nr_cntr; i++) {
1115 val = etm_readl(drvdata, ETMCNTVRn(i));
1116 ret += sprintf(buf, "counter %d: %x\n", i, val);
1117 }
1118
1119 return ret;
1120 }
1121
1122 static ssize_t cntr_val_store(struct device *dev,
1123 struct device_attribute *attr,
1124 const char *buf, size_t size)
1125 {
1126 int ret;
1127 unsigned long val;
1128 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1129
1130 ret = kstrtoul(buf, 16, &val);
1131 if (ret)
1132 return ret;
1133
1134 spin_lock(&drvdata->spinlock);
1135 drvdata->cntr_val[drvdata->cntr_idx] = val;
1136 spin_unlock(&drvdata->spinlock);
1137
1138 return size;
1139 }
1140 static DEVICE_ATTR_RW(cntr_val);
1141
1142 static ssize_t seq_12_event_show(struct device *dev,
1143 struct device_attribute *attr, char *buf)
1144 {
1145 unsigned long val;
1146 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1147
1148 val = drvdata->seq_12_event;
1149 return sprintf(buf, "%#lx\n", val);
1150 }
1151
1152 static ssize_t seq_12_event_store(struct device *dev,
1153 struct device_attribute *attr,
1154 const char *buf, size_t size)
1155 {
1156 int ret;
1157 unsigned long val;
1158 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1159
1160 ret = kstrtoul(buf, 16, &val);
1161 if (ret)
1162 return ret;
1163
1164 drvdata->seq_12_event = val & ETM_EVENT_MASK;
1165 return size;
1166 }
1167 static DEVICE_ATTR_RW(seq_12_event);
1168
1169 static ssize_t seq_21_event_show(struct device *dev,
1170 struct device_attribute *attr, char *buf)
1171 {
1172 unsigned long val;
1173 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1174
1175 val = drvdata->seq_21_event;
1176 return sprintf(buf, "%#lx\n", val);
1177 }
1178
1179 static ssize_t seq_21_event_store(struct device *dev,
1180 struct device_attribute *attr,
1181 const char *buf, size_t size)
1182 {
1183 int ret;
1184 unsigned long val;
1185 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1186
1187 ret = kstrtoul(buf, 16, &val);
1188 if (ret)
1189 return ret;
1190
1191 drvdata->seq_21_event = val & ETM_EVENT_MASK;
1192 return size;
1193 }
1194 static DEVICE_ATTR_RW(seq_21_event);
1195
1196 static ssize_t seq_23_event_show(struct device *dev,
1197 struct device_attribute *attr, char *buf)
1198 {
1199 unsigned long val;
1200 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1201
1202 val = drvdata->seq_23_event;
1203 return sprintf(buf, "%#lx\n", val);
1204 }
1205
1206 static ssize_t seq_23_event_store(struct device *dev,
1207 struct device_attribute *attr,
1208 const char *buf, size_t size)
1209 {
1210 int ret;
1211 unsigned long val;
1212 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1213
1214 ret = kstrtoul(buf, 16, &val);
1215 if (ret)
1216 return ret;
1217
1218 drvdata->seq_23_event = val & ETM_EVENT_MASK;
1219 return size;
1220 }
1221 static DEVICE_ATTR_RW(seq_23_event);
1222
1223 static ssize_t seq_31_event_show(struct device *dev,
1224 struct device_attribute *attr, char *buf)
1225 {
1226 unsigned long val;
1227 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1228
1229 val = drvdata->seq_31_event;
1230 return sprintf(buf, "%#lx\n", val);
1231 }
1232
1233 static ssize_t seq_31_event_store(struct device *dev,
1234 struct device_attribute *attr,
1235 const char *buf, size_t size)
1236 {
1237 int ret;
1238 unsigned long val;
1239 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1240
1241 ret = kstrtoul(buf, 16, &val);
1242 if (ret)
1243 return ret;
1244
1245 drvdata->seq_31_event = val & ETM_EVENT_MASK;
1246 return size;
1247 }
1248 static DEVICE_ATTR_RW(seq_31_event);
1249
1250 static ssize_t seq_32_event_show(struct device *dev,
1251 struct device_attribute *attr, char *buf)
1252 {
1253 unsigned long val;
1254 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1255
1256 val = drvdata->seq_32_event;
1257 return sprintf(buf, "%#lx\n", val);
1258 }
1259
1260 static ssize_t seq_32_event_store(struct device *dev,
1261 struct device_attribute *attr,
1262 const char *buf, size_t size)
1263 {
1264 int ret;
1265 unsigned long val;
1266 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1267
1268 ret = kstrtoul(buf, 16, &val);
1269 if (ret)
1270 return ret;
1271
1272 drvdata->seq_32_event = val & ETM_EVENT_MASK;
1273 return size;
1274 }
1275 static DEVICE_ATTR_RW(seq_32_event);
1276
1277 static ssize_t seq_13_event_show(struct device *dev,
1278 struct device_attribute *attr, char *buf)
1279 {
1280 unsigned long val;
1281 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1282
1283 val = drvdata->seq_13_event;
1284 return sprintf(buf, "%#lx\n", val);
1285 }
1286
1287 static ssize_t seq_13_event_store(struct device *dev,
1288 struct device_attribute *attr,
1289 const char *buf, size_t size)
1290 {
1291 int ret;
1292 unsigned long val;
1293 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1294
1295 ret = kstrtoul(buf, 16, &val);
1296 if (ret)
1297 return ret;
1298
1299 drvdata->seq_13_event = val & ETM_EVENT_MASK;
1300 return size;
1301 }
1302 static DEVICE_ATTR_RW(seq_13_event);
1303
1304 static ssize_t seq_curr_state_show(struct device *dev,
1305 struct device_attribute *attr, char *buf)
1306 {
1307 unsigned long val, flags;
1308 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1309
1310 if (!drvdata->enable) {
1311 val = drvdata->seq_curr_state;
1312 goto out;
1313 }
1314
1315 pm_runtime_get_sync(drvdata->dev);
1316 spin_lock_irqsave(&drvdata->spinlock, flags);
1317
1318 CS_UNLOCK(drvdata->base);
1319 val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
1320 CS_LOCK(drvdata->base);
1321
1322 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1323 pm_runtime_put(drvdata->dev);
1324 out:
1325 return sprintf(buf, "%#lx\n", val);
1326 }
1327
1328 static ssize_t seq_curr_state_store(struct device *dev,
1329 struct device_attribute *attr,
1330 const char *buf, size_t size)
1331 {
1332 int ret;
1333 unsigned long val;
1334 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1335
1336 ret = kstrtoul(buf, 16, &val);
1337 if (ret)
1338 return ret;
1339
1340 if (val > ETM_SEQ_STATE_MAX_VAL)
1341 return -EINVAL;
1342
1343 drvdata->seq_curr_state = val;
1344
1345 return size;
1346 }
1347 static DEVICE_ATTR_RW(seq_curr_state);
1348
1349 static ssize_t ctxid_idx_show(struct device *dev,
1350 struct device_attribute *attr, char *buf)
1351 {
1352 unsigned long val;
1353 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1354
1355 val = drvdata->ctxid_idx;
1356 return sprintf(buf, "%#lx\n", val);
1357 }
1358
1359 static ssize_t ctxid_idx_store(struct device *dev,
1360 struct device_attribute *attr,
1361 const char *buf, size_t size)
1362 {
1363 int ret;
1364 unsigned long val;
1365 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1366
1367 ret = kstrtoul(buf, 16, &val);
1368 if (ret)
1369 return ret;
1370
1371 if (val >= drvdata->nr_ctxid_cmp)
1372 return -EINVAL;
1373
1374 /*
1375 * Use spinlock to ensure index doesn't change while it gets
1376 * dereferenced multiple times within a spinlock block elsewhere.
1377 */
1378 spin_lock(&drvdata->spinlock);
1379 drvdata->ctxid_idx = val;
1380 spin_unlock(&drvdata->spinlock);
1381
1382 return size;
1383 }
1384 static DEVICE_ATTR_RW(ctxid_idx);
1385
1386 static ssize_t ctxid_pid_show(struct device *dev,
1387 struct device_attribute *attr, char *buf)
1388 {
1389 unsigned long val;
1390 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1391
1392 spin_lock(&drvdata->spinlock);
1393 val = drvdata->ctxid_vpid[drvdata->ctxid_idx];
1394 spin_unlock(&drvdata->spinlock);
1395
1396 return sprintf(buf, "%#lx\n", val);
1397 }
1398
1399 static ssize_t ctxid_pid_store(struct device *dev,
1400 struct device_attribute *attr,
1401 const char *buf, size_t size)
1402 {
1403 int ret;
1404 unsigned long vpid, pid;
1405 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1406
1407 ret = kstrtoul(buf, 16, &vpid);
1408 if (ret)
1409 return ret;
1410
1411 pid = coresight_vpid_to_pid(vpid);
1412
1413 spin_lock(&drvdata->spinlock);
1414 drvdata->ctxid_pid[drvdata->ctxid_idx] = pid;
1415 drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid;
1416 spin_unlock(&drvdata->spinlock);
1417
1418 return size;
1419 }
1420 static DEVICE_ATTR_RW(ctxid_pid);
1421
1422 static ssize_t ctxid_mask_show(struct device *dev,
1423 struct device_attribute *attr, char *buf)
1424 {
1425 unsigned long val;
1426 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1427
1428 val = drvdata->ctxid_mask;
1429 return sprintf(buf, "%#lx\n", val);
1430 }
1431
1432 static ssize_t ctxid_mask_store(struct device *dev,
1433 struct device_attribute *attr,
1434 const char *buf, size_t size)
1435 {
1436 int ret;
1437 unsigned long val;
1438 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1439
1440 ret = kstrtoul(buf, 16, &val);
1441 if (ret)
1442 return ret;
1443
1444 drvdata->ctxid_mask = val;
1445 return size;
1446 }
1447 static DEVICE_ATTR_RW(ctxid_mask);
1448
1449 static ssize_t sync_freq_show(struct device *dev,
1450 struct device_attribute *attr, char *buf)
1451 {
1452 unsigned long val;
1453 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1454
1455 val = drvdata->sync_freq;
1456 return sprintf(buf, "%#lx\n", val);
1457 }
1458
1459 static ssize_t sync_freq_store(struct device *dev,
1460 struct device_attribute *attr,
1461 const char *buf, size_t size)
1462 {
1463 int ret;
1464 unsigned long val;
1465 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1466
1467 ret = kstrtoul(buf, 16, &val);
1468 if (ret)
1469 return ret;
1470
1471 drvdata->sync_freq = val & ETM_SYNC_MASK;
1472 return size;
1473 }
1474 static DEVICE_ATTR_RW(sync_freq);
1475
1476 static ssize_t timestamp_event_show(struct device *dev,
1477 struct device_attribute *attr, char *buf)
1478 {
1479 unsigned long val;
1480 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1481
1482 val = drvdata->timestamp_event;
1483 return sprintf(buf, "%#lx\n", val);
1484 }
1485
1486 static ssize_t timestamp_event_store(struct device *dev,
1487 struct device_attribute *attr,
1488 const char *buf, size_t size)
1489 {
1490 int ret;
1491 unsigned long val;
1492 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1493
1494 ret = kstrtoul(buf, 16, &val);
1495 if (ret)
1496 return ret;
1497
1498 drvdata->timestamp_event = val & ETM_EVENT_MASK;
1499 return size;
1500 }
1501 static DEVICE_ATTR_RW(timestamp_event);
1502
1503 static ssize_t cpu_show(struct device *dev,
1504 struct device_attribute *attr, char *buf)
1505 {
1506 int val;
1507 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1508
1509 val = drvdata->cpu;
1510 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
1511
1512 }
1513 static DEVICE_ATTR_RO(cpu);
1514
1515 static ssize_t traceid_show(struct device *dev,
1516 struct device_attribute *attr, char *buf)
1517 {
1518 unsigned long val, flags;
1519 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1520
1521 if (!drvdata->enable) {
1522 val = drvdata->traceid;
1523 goto out;
1524 }
1525
1526 pm_runtime_get_sync(drvdata->dev);
1527 spin_lock_irqsave(&drvdata->spinlock, flags);
1528 CS_UNLOCK(drvdata->base);
1529
1530 val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
1531
1532 CS_LOCK(drvdata->base);
1533 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1534 pm_runtime_put(drvdata->dev);
1535 out:
1536 return sprintf(buf, "%#lx\n", val);
1537 }
1538
1539 static ssize_t traceid_store(struct device *dev,
1540 struct device_attribute *attr,
1541 const char *buf, size_t size)
1542 {
1543 int ret;
1544 unsigned long val;
1545 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1546
1547 ret = kstrtoul(buf, 16, &val);
1548 if (ret)
1549 return ret;
1550
1551 drvdata->traceid = val & ETM_TRACEID_MASK;
1552 return size;
1553 }
1554 static DEVICE_ATTR_RW(traceid);
1555
1556 static struct attribute *coresight_etm_attrs[] = {
1557 &dev_attr_nr_addr_cmp.attr,
1558 &dev_attr_nr_cntr.attr,
1559 &dev_attr_nr_ctxid_cmp.attr,
1560 &dev_attr_etmsr.attr,
1561 &dev_attr_reset.attr,
1562 &dev_attr_mode.attr,
1563 &dev_attr_trigger_event.attr,
1564 &dev_attr_enable_event.attr,
1565 &dev_attr_fifofull_level.attr,
1566 &dev_attr_addr_idx.attr,
1567 &dev_attr_addr_single.attr,
1568 &dev_attr_addr_range.attr,
1569 &dev_attr_addr_start.attr,
1570 &dev_attr_addr_stop.attr,
1571 &dev_attr_addr_acctype.attr,
1572 &dev_attr_cntr_idx.attr,
1573 &dev_attr_cntr_rld_val.attr,
1574 &dev_attr_cntr_event.attr,
1575 &dev_attr_cntr_rld_event.attr,
1576 &dev_attr_cntr_val.attr,
1577 &dev_attr_seq_12_event.attr,
1578 &dev_attr_seq_21_event.attr,
1579 &dev_attr_seq_23_event.attr,
1580 &dev_attr_seq_31_event.attr,
1581 &dev_attr_seq_32_event.attr,
1582 &dev_attr_seq_13_event.attr,
1583 &dev_attr_seq_curr_state.attr,
1584 &dev_attr_ctxid_idx.attr,
1585 &dev_attr_ctxid_pid.attr,
1586 &dev_attr_ctxid_mask.attr,
1587 &dev_attr_sync_freq.attr,
1588 &dev_attr_timestamp_event.attr,
1589 &dev_attr_traceid.attr,
1590 &dev_attr_cpu.attr,
1591 NULL,
1592 };
1593
1594 #define coresight_simple_func(name, offset) \
1595 static ssize_t name##_show(struct device *_dev, \
1596 struct device_attribute *attr, char *buf) \
1597 { \
1598 struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
1599 return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
1600 readl_relaxed(drvdata->base + offset)); \
1601 } \
1602 DEVICE_ATTR_RO(name)
1603
1604 coresight_simple_func(etmccr, ETMCCR);
1605 coresight_simple_func(etmccer, ETMCCER);
1606 coresight_simple_func(etmscr, ETMSCR);
1607 coresight_simple_func(etmidr, ETMIDR);
1608 coresight_simple_func(etmcr, ETMCR);
1609 coresight_simple_func(etmtraceidr, ETMTRACEIDR);
1610 coresight_simple_func(etmteevr, ETMTEEVR);
1611 coresight_simple_func(etmtssvr, ETMTSSCR);
1612 coresight_simple_func(etmtecr1, ETMTECR1);
1613 coresight_simple_func(etmtecr2, ETMTECR2);
1614
1615 static struct attribute *coresight_etm_mgmt_attrs[] = {
1616 &dev_attr_etmccr.attr,
1617 &dev_attr_etmccer.attr,
1618 &dev_attr_etmscr.attr,
1619 &dev_attr_etmidr.attr,
1620 &dev_attr_etmcr.attr,
1621 &dev_attr_etmtraceidr.attr,
1622 &dev_attr_etmteevr.attr,
1623 &dev_attr_etmtssvr.attr,
1624 &dev_attr_etmtecr1.attr,
1625 &dev_attr_etmtecr2.attr,
1626 NULL,
1627 };
1628
1629 static const struct attribute_group coresight_etm_group = {
1630 .attrs = coresight_etm_attrs,
1631 };
1632
1633
1634 static const struct attribute_group coresight_etm_mgmt_group = {
1635 .attrs = coresight_etm_mgmt_attrs,
1636 .name = "mgmt",
1637 };
1638
1639 static const struct attribute_group *coresight_etm_groups[] = {
1640 &coresight_etm_group,
1641 &coresight_etm_mgmt_group,
1642 NULL,
1643 };
1644
1645 static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
1646 void *hcpu)
1647 {
1648 unsigned int cpu = (unsigned long)hcpu;
1649
1650 if (!etmdrvdata[cpu])
1651 goto out;
1652
1653 switch (action & (~CPU_TASKS_FROZEN)) {
1654 case CPU_STARTING:
1655 spin_lock(&etmdrvdata[cpu]->spinlock);
1656 if (!etmdrvdata[cpu]->os_unlock) {
1657 etm_os_unlock(etmdrvdata[cpu]);
1658 etmdrvdata[cpu]->os_unlock = true;
1659 }
1660
1661 if (etmdrvdata[cpu]->enable)
1662 etm_enable_hw(etmdrvdata[cpu]);
1663 spin_unlock(&etmdrvdata[cpu]->spinlock);
1664 break;
1665
1666 case CPU_ONLINE:
1667 if (etmdrvdata[cpu]->boot_enable &&
1668 !etmdrvdata[cpu]->sticky_enable)
1669 coresight_enable(etmdrvdata[cpu]->csdev);
1670 break;
1671
1672 case CPU_DYING:
1673 spin_lock(&etmdrvdata[cpu]->spinlock);
1674 if (etmdrvdata[cpu]->enable)
1675 etm_disable_hw(etmdrvdata[cpu]);
1676 spin_unlock(&etmdrvdata[cpu]->spinlock);
1677 break;
1678 }
1679 out:
1680 return NOTIFY_OK;
1681 }
1682
1683 static struct notifier_block etm_cpu_notifier = {
1684 .notifier_call = etm_cpu_callback,
1685 };
1686
1687 static bool etm_arch_supported(u8 arch)
1688 {
1689 switch (arch) {
1690 case ETM_ARCH_V3_3:
1691 break;
1692 case ETM_ARCH_V3_5:
1693 break;
1694 case PFT_ARCH_V1_0:
1695 break;
1696 case PFT_ARCH_V1_1:
1697 break;
1698 default:
1699 return false;
1700 }
1701 return true;
1702 }
1703
1704 static void etm_init_arch_data(void *info)
1705 {
1706 u32 etmidr;
1707 u32 etmccr;
1708 struct etm_drvdata *drvdata = info;
1709
1710 CS_UNLOCK(drvdata->base);
1711
1712 /* First dummy read */
1713 (void)etm_readl(drvdata, ETMPDSR);
1714 /* Provide power to ETM: ETMPDCR[3] == 1 */
1715 etm_set_pwrup(drvdata);
1716 /*
1717 * Clear power down bit since when this bit is set writes to
1718 * certain registers might be ignored.
1719 */
1720 etm_clr_pwrdwn(drvdata);
1721 /*
1722 * Set prog bit. It will be set from reset but this is included to
1723 * ensure it is set
1724 */
1725 etm_set_prog(drvdata);
1726
1727 /* Find all capabilities */
1728 etmidr = etm_readl(drvdata, ETMIDR);
1729 drvdata->arch = BMVAL(etmidr, 4, 11);
1730 drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
1731
1732 drvdata->etmccer = etm_readl(drvdata, ETMCCER);
1733 etmccr = etm_readl(drvdata, ETMCCR);
1734 drvdata->etmccr = etmccr;
1735 drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
1736 drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
1737 drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
1738 drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
1739 drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
1740
1741 etm_set_pwrdwn(drvdata);
1742 etm_clr_pwrup(drvdata);
1743 CS_LOCK(drvdata->base);
1744 }
1745
1746 static void etm_init_default_data(struct etm_drvdata *drvdata)
1747 {
1748 /*
1749 * A trace ID of value 0 is invalid, so let's start at some
1750 * random value that fits in 7 bits and will be just as good.
1751 */
1752 static int etm3x_traceid = 0x10;
1753
1754 u32 flags = (1 << 0 | /* instruction execute*/
1755 3 << 3 | /* ARM instruction */
1756 0 << 5 | /* No data value comparison */
1757 0 << 7 | /* No exact mach */
1758 0 << 8 | /* Ignore context ID */
1759 0 << 10); /* Security ignored */
1760
1761 /*
1762 * Initial configuration only - guarantees sources handled by
1763 * this driver have a unique ID at startup time but not between
1764 * all other types of sources. For that we lean on the core
1765 * framework.
1766 */
1767 drvdata->traceid = etm3x_traceid++;
1768 drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
1769 drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
1770 if (drvdata->nr_addr_cmp >= 2) {
1771 drvdata->addr_val[0] = (u32) _stext;
1772 drvdata->addr_val[1] = (u32) _etext;
1773 drvdata->addr_acctype[0] = flags;
1774 drvdata->addr_acctype[1] = flags;
1775 drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
1776 drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
1777 }
1778
1779 etm_set_default(drvdata);
1780 }
1781
1782 static int etm_probe(struct amba_device *adev, const struct amba_id *id)
1783 {
1784 int ret;
1785 void __iomem *base;
1786 struct device *dev = &adev->dev;
1787 struct coresight_platform_data *pdata = NULL;
1788 struct etm_drvdata *drvdata;
1789 struct resource *res = &adev->res;
1790 struct coresight_desc *desc;
1791 struct device_node *np = adev->dev.of_node;
1792
1793 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
1794 if (!desc)
1795 return -ENOMEM;
1796
1797 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
1798 if (!drvdata)
1799 return -ENOMEM;
1800
1801 if (np) {
1802 pdata = of_get_coresight_platform_data(dev, np);
1803 if (IS_ERR(pdata))
1804 return PTR_ERR(pdata);
1805
1806 adev->dev.platform_data = pdata;
1807 drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
1808 }
1809
1810 drvdata->dev = &adev->dev;
1811 dev_set_drvdata(dev, drvdata);
1812
1813 /* Validity for the resource is already checked by the AMBA core */
1814 base = devm_ioremap_resource(dev, res);
1815 if (IS_ERR(base))
1816 return PTR_ERR(base);
1817
1818 drvdata->base = base;
1819
1820 spin_lock_init(&drvdata->spinlock);
1821
1822 drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
1823 if (!IS_ERR(drvdata->atclk)) {
1824 ret = clk_prepare_enable(drvdata->atclk);
1825 if (ret)
1826 return ret;
1827 }
1828
1829 drvdata->cpu = pdata ? pdata->cpu : 0;
1830
1831 get_online_cpus();
1832 etmdrvdata[drvdata->cpu] = drvdata;
1833
1834 if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
1835 drvdata->os_unlock = true;
1836
1837 if (smp_call_function_single(drvdata->cpu,
1838 etm_init_arch_data, drvdata, 1))
1839 dev_err(dev, "ETM arch init failed\n");
1840
1841 if (!etm_count++)
1842 register_hotcpu_notifier(&etm_cpu_notifier);
1843
1844 put_online_cpus();
1845
1846 if (etm_arch_supported(drvdata->arch) == false) {
1847 ret = -EINVAL;
1848 goto err_arch_supported;
1849 }
1850 etm_init_default_data(drvdata);
1851
1852 desc->type = CORESIGHT_DEV_TYPE_SOURCE;
1853 desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
1854 desc->ops = &etm_cs_ops;
1855 desc->pdata = pdata;
1856 desc->dev = dev;
1857 desc->groups = coresight_etm_groups;
1858 drvdata->csdev = coresight_register(desc);
1859 if (IS_ERR(drvdata->csdev)) {
1860 ret = PTR_ERR(drvdata->csdev);
1861 goto err_arch_supported;
1862 }
1863
1864 pm_runtime_put(&adev->dev);
1865 dev_info(dev, "%s initialized\n", (char *)id->data);
1866
1867 if (boot_enable) {
1868 coresight_enable(drvdata->csdev);
1869 drvdata->boot_enable = true;
1870 }
1871
1872 return 0;
1873
1874 err_arch_supported:
1875 if (--etm_count == 0)
1876 unregister_hotcpu_notifier(&etm_cpu_notifier);
1877 return ret;
1878 }
1879
1880 static int etm_remove(struct amba_device *adev)
1881 {
1882 struct etm_drvdata *drvdata = amba_get_drvdata(adev);
1883
1884 coresight_unregister(drvdata->csdev);
1885 if (--etm_count == 0)
1886 unregister_hotcpu_notifier(&etm_cpu_notifier);
1887
1888 return 0;
1889 }
1890
1891 #ifdef CONFIG_PM
1892 static int etm_runtime_suspend(struct device *dev)
1893 {
1894 struct etm_drvdata *drvdata = dev_get_drvdata(dev);
1895
1896 if (drvdata && !IS_ERR(drvdata->atclk))
1897 clk_disable_unprepare(drvdata->atclk);
1898
1899 return 0;
1900 }
1901
1902 static int etm_runtime_resume(struct device *dev)
1903 {
1904 struct etm_drvdata *drvdata = dev_get_drvdata(dev);
1905
1906 if (drvdata && !IS_ERR(drvdata->atclk))
1907 clk_prepare_enable(drvdata->atclk);
1908
1909 return 0;
1910 }
1911 #endif
1912
1913 static const struct dev_pm_ops etm_dev_pm_ops = {
1914 SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
1915 };
1916
1917 static struct amba_id etm_ids[] = {
1918 { /* ETM 3.3 */
1919 .id = 0x0003b921,
1920 .mask = 0x0003ffff,
1921 .data = "ETM 3.3",
1922 },
1923 { /* ETM 3.5 */
1924 .id = 0x0003b956,
1925 .mask = 0x0003ffff,
1926 .data = "ETM 3.5",
1927 },
1928 { /* PTM 1.0 */
1929 .id = 0x0003b950,
1930 .mask = 0x0003ffff,
1931 .data = "PTM 1.0",
1932 },
1933 { /* PTM 1.1 */
1934 .id = 0x0003b95f,
1935 .mask = 0x0003ffff,
1936 .data = "PTM 1.1",
1937 },
1938 { /* PTM 1.1 Qualcomm */
1939 .id = 0x0003006f,
1940 .mask = 0x0003ffff,
1941 .data = "PTM 1.1",
1942 },
1943 { 0, 0},
1944 };
1945
1946 static struct amba_driver etm_driver = {
1947 .drv = {
1948 .name = "coresight-etm3x",
1949 .owner = THIS_MODULE,
1950 .pm = &etm_dev_pm_ops,
1951 },
1952 .probe = etm_probe,
1953 .remove = etm_remove,
1954 .id_table = etm_ids,
1955 };
1956
1957 module_amba_driver(etm_driver);
1958
1959 MODULE_LICENSE("GPL v2");
1960 MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
This page took 0.091346 seconds and 5 git commands to generate.