tcp: increment sk_drops for listeners
[deliverable/linux.git] / drivers / hwtracing / coresight / coresight-etm4x.c
CommitLineData
2e1cdfe1
PP
1/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
16#include <linux/types.h>
17#include <linux/device.h>
18#include <linux/io.h>
19#include <linux/err.h>
20#include <linux/fs.h>
21#include <linux/slab.h>
22#include <linux/delay.h>
23#include <linux/smp.h>
24#include <linux/sysfs.h>
25#include <linux/stat.h>
26#include <linux/clk.h>
27#include <linux/cpu.h>
28#include <linux/coresight.h>
29#include <linux/pm_wakeup.h>
30#include <linux/amba/bus.h>
31#include <linux/seq_file.h>
32#include <linux/uaccess.h>
33#include <linux/pm_runtime.h>
882d5e11 34#include <linux/perf_event.h>
2e1cdfe1
PP
35#include <asm/sections.h>
36
37#include "coresight-etm4x.h"
38
39static int boot_enable;
40module_param_named(boot_enable, boot_enable, int, S_IRUGO);
41
42/* The number of ETMv4 currently registered */
43static int etm4_count;
44static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
45
46static void etm4_os_unlock(void *info)
47{
48 struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info;
49
50 /* Writing any value to ETMOSLAR unlocks the trace registers */
51 writel_relaxed(0x0, drvdata->base + TRCOSLAR);
52 isb();
53}
54
55static bool etm4_arch_supported(u8 arch)
56{
57 switch (arch) {
58 case ETM_ARCH_V4:
59 break;
60 default:
61 return false;
62 }
63 return true;
64}
65
52210c87
MP
66static int etm4_cpu_id(struct coresight_device *csdev)
67{
68 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
69
70 return drvdata->cpu;
71}
72
2e1cdfe1
PP
73static int etm4_trace_id(struct coresight_device *csdev)
74{
75 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
76 unsigned long flags;
77 int trace_id = -1;
78
79 if (!drvdata->enable)
80 return drvdata->trcid;
81
2e1cdfe1
PP
82 spin_lock_irqsave(&drvdata->spinlock, flags);
83
84 CS_UNLOCK(drvdata->base);
85 trace_id = readl_relaxed(drvdata->base + TRCTRACEIDR);
86 trace_id &= ETM_TRACEID_MASK;
87 CS_LOCK(drvdata->base);
88
89 spin_unlock_irqrestore(&drvdata->spinlock, flags);
2e1cdfe1
PP
90
91 return trace_id;
92}
93
94static void etm4_enable_hw(void *info)
95{
96 int i;
97 struct etmv4_drvdata *drvdata = info;
98
99 CS_UNLOCK(drvdata->base);
100
101 etm4_os_unlock(drvdata);
102
103 /* Disable the trace unit before programming trace registers */
104 writel_relaxed(0, drvdata->base + TRCPRGCTLR);
105
106 /* wait for TRCSTATR.IDLE to go up */
107 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
108 dev_err(drvdata->dev,
109 "timeout observed when probing at offset %#x\n",
110 TRCSTATR);
111
112 writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR);
113 writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR);
114 /* nothing specific implemented */
115 writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
116 writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R);
117 writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R);
118 writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR);
119 writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR);
120 writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR);
121 writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR);
122 writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR);
123 writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
124 writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR);
125 writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR);
126 writel_relaxed(drvdata->vissctlr,
127 drvdata->base + TRCVISSCTLR);
128 writel_relaxed(drvdata->vipcssctlr,
129 drvdata->base + TRCVIPCSSCTLR);
130 for (i = 0; i < drvdata->nrseqstate - 1; i++)
131 writel_relaxed(drvdata->seq_ctrl[i],
132 drvdata->base + TRCSEQEVRn(i));
133 writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR);
134 writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR);
135 writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR);
136 for (i = 0; i < drvdata->nr_cntr; i++) {
137 writel_relaxed(drvdata->cntrldvr[i],
138 drvdata->base + TRCCNTRLDVRn(i));
139 writel_relaxed(drvdata->cntr_ctrl[i],
140 drvdata->base + TRCCNTCTLRn(i));
141 writel_relaxed(drvdata->cntr_val[i],
142 drvdata->base + TRCCNTVRn(i));
143 }
497b5956
CZ
144
145 /* Resource selector pair 0 is always implemented and reserved */
146 for (i = 2; i < drvdata->nr_resource * 2; i++)
2e1cdfe1
PP
147 writel_relaxed(drvdata->res_ctrl[i],
148 drvdata->base + TRCRSCTLRn(i));
149
150 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
151 writel_relaxed(drvdata->ss_ctrl[i],
152 drvdata->base + TRCSSCCRn(i));
153 writel_relaxed(drvdata->ss_status[i],
154 drvdata->base + TRCSSCSRn(i));
155 writel_relaxed(drvdata->ss_pe_cmp[i],
156 drvdata->base + TRCSSPCICRn(i));
157 }
158 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
159 writeq_relaxed(drvdata->addr_val[i],
160 drvdata->base + TRCACVRn(i));
161 writeq_relaxed(drvdata->addr_acc[i],
162 drvdata->base + TRCACATRn(i));
163 }
164 for (i = 0; i < drvdata->numcidc; i++)
cd196ac3 165 writeq_relaxed(drvdata->ctxid_pid[i],
2e1cdfe1
PP
166 drvdata->base + TRCCIDCVRn(i));
167 writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
168 writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
169
170 for (i = 0; i < drvdata->numvmidc; i++)
171 writeq_relaxed(drvdata->vmid_val[i],
172 drvdata->base + TRCVMIDCVRn(i));
173 writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
174 writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
175
176 /* Enable the trace unit */
177 writel_relaxed(1, drvdata->base + TRCPRGCTLR);
178
179 /* wait for TRCSTATR.IDLE to go back down to '0' */
180 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
181 dev_err(drvdata->dev,
182 "timeout observed when probing at offset %#x\n",
183 TRCSTATR);
184
185 CS_LOCK(drvdata->base);
186
187 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
188}
189
882d5e11
MP
190static int etm4_enable(struct coresight_device *csdev,
191 struct perf_event_attr *attr, u32 mode)
2e1cdfe1
PP
192{
193 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
194 int ret;
195
2e1cdfe1
PP
196 spin_lock(&drvdata->spinlock);
197
198 /*
199 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
200 * ensures that register writes occur when cpu is powered.
201 */
202 ret = smp_call_function_single(drvdata->cpu,
203 etm4_enable_hw, drvdata, 1);
204 if (ret)
205 goto err;
206 drvdata->enable = true;
207 drvdata->sticky_enable = true;
208
209 spin_unlock(&drvdata->spinlock);
210
211 dev_info(drvdata->dev, "ETM tracing enabled\n");
212 return 0;
213err:
214 spin_unlock(&drvdata->spinlock);
2e1cdfe1
PP
215 return ret;
216}
217
218static void etm4_disable_hw(void *info)
219{
220 u32 control;
221 struct etmv4_drvdata *drvdata = info;
222
223 CS_UNLOCK(drvdata->base);
224
225 control = readl_relaxed(drvdata->base + TRCPRGCTLR);
226
227 /* EN, bit[0] Trace unit enable bit */
228 control &= ~0x1;
229
230 /* make sure everything completes before disabling */
231 mb();
232 isb();
233 writel_relaxed(control, drvdata->base + TRCPRGCTLR);
234
235 CS_LOCK(drvdata->base);
236
237 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
238}
239
240static void etm4_disable(struct coresight_device *csdev)
241{
242 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
243
244 /*
245 * Taking hotplug lock here protects from clocks getting disabled
246 * with tracing being left on (crash scenario) if user disable occurs
247 * after cpu online mask indicates the cpu is offline but before the
248 * DYING hotplug callback is serviced by the ETM driver.
249 */
250 get_online_cpus();
251 spin_lock(&drvdata->spinlock);
252
253 /*
254 * Executing etm4_disable_hw on the cpu whose ETM is being disabled
255 * ensures that register writes occur when cpu is powered.
256 */
257 smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
258 drvdata->enable = false;
259
260 spin_unlock(&drvdata->spinlock);
261 put_online_cpus();
262
2e1cdfe1
PP
263 dev_info(drvdata->dev, "ETM tracing disabled\n");
264}
265
266static const struct coresight_ops_source etm4_source_ops = {
52210c87 267 .cpu_id = etm4_cpu_id,
2e1cdfe1
PP
268 .trace_id = etm4_trace_id,
269 .enable = etm4_enable,
270 .disable = etm4_disable,
271};
272
273static const struct coresight_ops etm4_cs_ops = {
274 .source_ops = &etm4_source_ops,
275};
276
d8c66962
PP
277static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
278{
279 u8 idx = drvdata->addr_idx;
280
281 /*
282 * TRCACATRn.TYPE bit[1:0]: type of comparison
283 * the trace unit performs
284 */
285 if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
286 if (idx % 2 != 0)
287 return -EINVAL;
288
289 /*
290 * We are performing instruction address comparison. Set the
291 * relevant bit of ViewInst Include/Exclude Control register
292 * for corresponding address comparator pair.
293 */
294 if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
295 drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
296 return -EINVAL;
297
298 if (exclude == true) {
299 /*
300 * Set exclude bit and unset the include bit
301 * corresponding to comparator pair
302 */
303 drvdata->viiectlr |= BIT(idx / 2 + 16);
304 drvdata->viiectlr &= ~BIT(idx / 2);
305 } else {
306 /*
307 * Set include bit and unset exclude bit
308 * corresponding to comparator pair
309 */
310 drvdata->viiectlr |= BIT(idx / 2);
311 drvdata->viiectlr &= ~BIT(idx / 2 + 16);
312 }
313 }
314 return 0;
315}
316
c0ddbfea
PP
317static ssize_t nr_pe_cmp_show(struct device *dev,
318 struct device_attribute *attr,
319 char *buf)
320{
321 unsigned long val;
322 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
323
324 val = drvdata->nr_pe_cmp;
325 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
326}
327static DEVICE_ATTR_RO(nr_pe_cmp);
328
329static ssize_t nr_addr_cmp_show(struct device *dev,
330 struct device_attribute *attr,
331 char *buf)
332{
333 unsigned long val;
334 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
335
336 val = drvdata->nr_addr_cmp;
337 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
338}
339static DEVICE_ATTR_RO(nr_addr_cmp);
340
341static ssize_t nr_cntr_show(struct device *dev,
342 struct device_attribute *attr,
343 char *buf)
344{
345 unsigned long val;
346 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
347
348 val = drvdata->nr_cntr;
349 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
350}
351static DEVICE_ATTR_RO(nr_cntr);
352
353static ssize_t nr_ext_inp_show(struct device *dev,
354 struct device_attribute *attr,
355 char *buf)
356{
357 unsigned long val;
358 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
359
360 val = drvdata->nr_ext_inp;
361 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
362}
363static DEVICE_ATTR_RO(nr_ext_inp);
364
365static ssize_t numcidc_show(struct device *dev,
366 struct device_attribute *attr,
367 char *buf)
368{
369 unsigned long val;
370 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
371
372 val = drvdata->numcidc;
373 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
374}
375static DEVICE_ATTR_RO(numcidc);
376
377static ssize_t numvmidc_show(struct device *dev,
378 struct device_attribute *attr,
379 char *buf)
380{
381 unsigned long val;
382 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
383
384 val = drvdata->numvmidc;
385 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
386}
387static DEVICE_ATTR_RO(numvmidc);
388
389static ssize_t nrseqstate_show(struct device *dev,
390 struct device_attribute *attr,
391 char *buf)
392{
393 unsigned long val;
394 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
395
396 val = drvdata->nrseqstate;
397 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
398}
399static DEVICE_ATTR_RO(nrseqstate);
400
401static ssize_t nr_resource_show(struct device *dev,
402 struct device_attribute *attr,
403 char *buf)
404{
405 unsigned long val;
406 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
407
408 val = drvdata->nr_resource;
409 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
410}
411static DEVICE_ATTR_RO(nr_resource);
412
413static ssize_t nr_ss_cmp_show(struct device *dev,
414 struct device_attribute *attr,
415 char *buf)
416{
417 unsigned long val;
418 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
419
420 val = drvdata->nr_ss_cmp;
421 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
422}
423static DEVICE_ATTR_RO(nr_ss_cmp);
424
d8c66962
PP
425static ssize_t reset_store(struct device *dev,
426 struct device_attribute *attr,
427 const char *buf, size_t size)
428{
429 int i;
430 unsigned long val;
431 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
432
433 if (kstrtoul(buf, 16, &val))
434 return -EINVAL;
435
436 spin_lock(&drvdata->spinlock);
437 if (val)
438 drvdata->mode = 0x0;
439
440 /* Disable data tracing: do not trace load and store data transfers */
441 drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
442 drvdata->cfg &= ~(BIT(1) | BIT(2));
443
444 /* Disable data value and data address tracing */
445 drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
446 ETM_MODE_DATA_TRACE_VAL);
447 drvdata->cfg &= ~(BIT(16) | BIT(17));
448
449 /* Disable all events tracing */
450 drvdata->eventctrl0 = 0x0;
451 drvdata->eventctrl1 = 0x0;
452
453 /* Disable timestamp event */
454 drvdata->ts_ctrl = 0x0;
455
456 /* Disable stalling */
457 drvdata->stall_ctrl = 0x0;
458
459 /* Reset trace synchronization period to 2^8 = 256 bytes*/
460 if (drvdata->syncpr == false)
461 drvdata->syncfreq = 0x8;
462
463 /*
464 * Enable ViewInst to trace everything with start-stop logic in
465 * started state. ARM recommends start-stop logic is set before
466 * each trace run.
467 */
468 drvdata->vinst_ctrl |= BIT(0);
469 if (drvdata->nr_addr_cmp == true) {
470 drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP;
471 /* SSSTATUS, bit[9] */
472 drvdata->vinst_ctrl |= BIT(9);
473 }
474
475 /* No address range filtering for ViewInst */
476 drvdata->viiectlr = 0x0;
477
478 /* No start-stop filtering for ViewInst */
479 drvdata->vissctlr = 0x0;
480
481 /* Disable seq events */
482 for (i = 0; i < drvdata->nrseqstate-1; i++)
483 drvdata->seq_ctrl[i] = 0x0;
484 drvdata->seq_rst = 0x0;
485 drvdata->seq_state = 0x0;
486
487 /* Disable external input events */
488 drvdata->ext_inp = 0x0;
489
490 drvdata->cntr_idx = 0x0;
491 for (i = 0; i < drvdata->nr_cntr; i++) {
492 drvdata->cntrldvr[i] = 0x0;
493 drvdata->cntr_ctrl[i] = 0x0;
494 drvdata->cntr_val[i] = 0x0;
495 }
496
497b5956
CZ
497 /* Resource selector pair 0 is always implemented and reserved */
498 drvdata->res_idx = 0x2;
499 for (i = 2; i < drvdata->nr_resource * 2; i++)
d8c66962
PP
500 drvdata->res_ctrl[i] = 0x0;
501
502 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
503 drvdata->ss_ctrl[i] = 0x0;
504 drvdata->ss_pe_cmp[i] = 0x0;
505 }
506
507 drvdata->addr_idx = 0x0;
508 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
509 drvdata->addr_val[i] = 0x0;
510 drvdata->addr_acc[i] = 0x0;
511 drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
512 }
513
514 drvdata->ctxid_idx = 0x0;
f67b467a 515 for (i = 0; i < drvdata->numcidc; i++) {
cd196ac3 516 drvdata->ctxid_pid[i] = 0x0;
f67b467a
CZ
517 drvdata->ctxid_vpid[i] = 0x0;
518 }
519
d8c66962
PP
520 drvdata->ctxid_mask0 = 0x0;
521 drvdata->ctxid_mask1 = 0x0;
522
523 drvdata->vmid_idx = 0x0;
524 for (i = 0; i < drvdata->numvmidc; i++)
525 drvdata->vmid_val[i] = 0x0;
526 drvdata->vmid_mask0 = 0x0;
527 drvdata->vmid_mask1 = 0x0;
528
529 drvdata->trcid = drvdata->cpu + 1;
530 spin_unlock(&drvdata->spinlock);
531 return size;
532}
533static DEVICE_ATTR_WO(reset);
534
535static ssize_t mode_show(struct device *dev,
536 struct device_attribute *attr,
537 char *buf)
538{
539 unsigned long val;
540 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
541
542 val = drvdata->mode;
543 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
544}
545
546static ssize_t mode_store(struct device *dev,
547 struct device_attribute *attr,
548 const char *buf, size_t size)
549{
550 unsigned long val, mode;
551 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
552
553 if (kstrtoul(buf, 16, &val))
554 return -EINVAL;
555
556 spin_lock(&drvdata->spinlock);
557 drvdata->mode = val & ETMv4_MODE_ALL;
558
559 if (drvdata->mode & ETM_MODE_EXCLUDE)
560 etm4_set_mode_exclude(drvdata, true);
561 else
562 etm4_set_mode_exclude(drvdata, false);
563
564 if (drvdata->instrp0 == true) {
565 /* start by clearing instruction P0 field */
566 drvdata->cfg &= ~(BIT(1) | BIT(2));
567 if (drvdata->mode & ETM_MODE_LOAD)
568 /* 0b01 Trace load instructions as P0 instructions */
569 drvdata->cfg |= BIT(1);
570 if (drvdata->mode & ETM_MODE_STORE)
571 /* 0b10 Trace store instructions as P0 instructions */
572 drvdata->cfg |= BIT(2);
573 if (drvdata->mode & ETM_MODE_LOAD_STORE)
574 /*
575 * 0b11 Trace load and store instructions
576 * as P0 instructions
577 */
578 drvdata->cfg |= BIT(1) | BIT(2);
579 }
580
581 /* bit[3], Branch broadcast mode */
582 if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
583 drvdata->cfg |= BIT(3);
584 else
585 drvdata->cfg &= ~BIT(3);
586
587 /* bit[4], Cycle counting instruction trace bit */
588 if ((drvdata->mode & ETMv4_MODE_CYCACC) &&
589 (drvdata->trccci == true))
590 drvdata->cfg |= BIT(4);
591 else
592 drvdata->cfg &= ~BIT(4);
593
594 /* bit[6], Context ID tracing bit */
595 if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
596 drvdata->cfg |= BIT(6);
597 else
598 drvdata->cfg &= ~BIT(6);
599
600 if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
601 drvdata->cfg |= BIT(7);
602 else
603 drvdata->cfg &= ~BIT(7);
604
605 /* bits[10:8], Conditional instruction tracing bit */
606 mode = ETM_MODE_COND(drvdata->mode);
607 if (drvdata->trccond == true) {
608 drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
609 drvdata->cfg |= mode << 8;
610 }
611
612 /* bit[11], Global timestamp tracing bit */
613 if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
614 drvdata->cfg |= BIT(11);
615 else
616 drvdata->cfg &= ~BIT(11);
617
618 /* bit[12], Return stack enable bit */
619 if ((drvdata->mode & ETM_MODE_RETURNSTACK) &&
620 (drvdata->retstack == true))
621 drvdata->cfg |= BIT(12);
622 else
623 drvdata->cfg &= ~BIT(12);
624
625 /* bits[14:13], Q element enable field */
626 mode = ETM_MODE_QELEM(drvdata->mode);
627 /* start by clearing QE bits */
628 drvdata->cfg &= ~(BIT(13) | BIT(14));
629 /* if supported, Q elements with instruction counts are enabled */
630 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
631 drvdata->cfg |= BIT(13);
632 /*
633 * if supported, Q elements with and without instruction
634 * counts are enabled
635 */
636 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
637 drvdata->cfg |= BIT(14);
638
639 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
640 if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) &&
641 (drvdata->atbtrig == true))
642 drvdata->eventctrl1 |= BIT(11);
643 else
644 drvdata->eventctrl1 &= ~BIT(11);
645
646 /* bit[12], Low-power state behavior override bit */
647 if ((drvdata->mode & ETM_MODE_LPOVERRIDE) &&
648 (drvdata->lpoverride == true))
649 drvdata->eventctrl1 |= BIT(12);
650 else
651 drvdata->eventctrl1 &= ~BIT(12);
652
653 /* bit[8], Instruction stall bit */
654 if (drvdata->mode & ETM_MODE_ISTALL_EN)
655 drvdata->stall_ctrl |= BIT(8);
656 else
657 drvdata->stall_ctrl &= ~BIT(8);
658
659 /* bit[10], Prioritize instruction trace bit */
660 if (drvdata->mode & ETM_MODE_INSTPRIO)
661 drvdata->stall_ctrl |= BIT(10);
662 else
663 drvdata->stall_ctrl &= ~BIT(10);
664
665 /* bit[13], Trace overflow prevention bit */
666 if ((drvdata->mode & ETM_MODE_NOOVERFLOW) &&
667 (drvdata->nooverflow == true))
668 drvdata->stall_ctrl |= BIT(13);
669 else
670 drvdata->stall_ctrl &= ~BIT(13);
671
672 /* bit[9] Start/stop logic control bit */
673 if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP)
674 drvdata->vinst_ctrl |= BIT(9);
675 else
676 drvdata->vinst_ctrl &= ~BIT(9);
677
678 /* bit[10], Whether a trace unit must trace a Reset exception */
679 if (drvdata->mode & ETM_MODE_TRACE_RESET)
680 drvdata->vinst_ctrl |= BIT(10);
681 else
682 drvdata->vinst_ctrl &= ~BIT(10);
683
684 /* bit[11], Whether a trace unit must trace a system error exception */
685 if ((drvdata->mode & ETM_MODE_TRACE_ERR) &&
686 (drvdata->trc_error == true))
687 drvdata->vinst_ctrl |= BIT(11);
688 else
689 drvdata->vinst_ctrl &= ~BIT(11);
690
691 spin_unlock(&drvdata->spinlock);
692 return size;
693}
694static DEVICE_ATTR_RW(mode);
695
696static ssize_t pe_show(struct device *dev,
697 struct device_attribute *attr,
698 char *buf)
699{
700 unsigned long val;
701 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
702
703 val = drvdata->pe_sel;
704 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
705}
706
707static ssize_t pe_store(struct device *dev,
708 struct device_attribute *attr,
709 const char *buf, size_t size)
710{
711 unsigned long val;
712 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
713
714 if (kstrtoul(buf, 16, &val))
715 return -EINVAL;
716
717 spin_lock(&drvdata->spinlock);
718 if (val > drvdata->nr_pe) {
719 spin_unlock(&drvdata->spinlock);
720 return -EINVAL;
721 }
722
723 drvdata->pe_sel = val;
724 spin_unlock(&drvdata->spinlock);
725 return size;
726}
727static DEVICE_ATTR_RW(pe);
728
729static ssize_t event_show(struct device *dev,
730 struct device_attribute *attr,
731 char *buf)
732{
733 unsigned long val;
734 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
735
736 val = drvdata->eventctrl0;
737 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
738}
739
740static ssize_t event_store(struct device *dev,
741 struct device_attribute *attr,
742 const char *buf, size_t size)
743{
744 unsigned long val;
745 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
746
747 if (kstrtoul(buf, 16, &val))
748 return -EINVAL;
749
750 spin_lock(&drvdata->spinlock);
751 switch (drvdata->nr_event) {
752 case 0x0:
753 /* EVENT0, bits[7:0] */
754 drvdata->eventctrl0 = val & 0xFF;
755 break;
756 case 0x1:
757 /* EVENT1, bits[15:8] */
758 drvdata->eventctrl0 = val & 0xFFFF;
759 break;
760 case 0x2:
761 /* EVENT2, bits[23:16] */
762 drvdata->eventctrl0 = val & 0xFFFFFF;
763 break;
764 case 0x3:
765 /* EVENT3, bits[31:24] */
766 drvdata->eventctrl0 = val;
767 break;
768 default:
769 break;
770 }
771 spin_unlock(&drvdata->spinlock);
772 return size;
773}
774static DEVICE_ATTR_RW(event);
775
776static ssize_t event_instren_show(struct device *dev,
777 struct device_attribute *attr,
778 char *buf)
779{
780 unsigned long val;
781 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
782
783 val = BMVAL(drvdata->eventctrl1, 0, 3);
784 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
785}
786
787static ssize_t event_instren_store(struct device *dev,
788 struct device_attribute *attr,
789 const char *buf, size_t size)
790{
791 unsigned long val;
792 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
793
794 if (kstrtoul(buf, 16, &val))
795 return -EINVAL;
796
797 spin_lock(&drvdata->spinlock);
798 /* start by clearing all instruction event enable bits */
799 drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
800 switch (drvdata->nr_event) {
801 case 0x0:
802 /* generate Event element for event 1 */
803 drvdata->eventctrl1 |= val & BIT(1);
804 break;
805 case 0x1:
806 /* generate Event element for event 1 and 2 */
807 drvdata->eventctrl1 |= val & (BIT(0) | BIT(1));
808 break;
809 case 0x2:
810 /* generate Event element for event 1, 2 and 3 */
811 drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
812 break;
813 case 0x3:
814 /* generate Event element for all 4 events */
815 drvdata->eventctrl1 |= val & 0xF;
816 break;
817 default:
818 break;
819 }
820 spin_unlock(&drvdata->spinlock);
821 return size;
822}
823static DEVICE_ATTR_RW(event_instren);
824
b460daf8
PP
825static ssize_t event_ts_show(struct device *dev,
826 struct device_attribute *attr,
827 char *buf)
828{
829 unsigned long val;
830 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
831
832 val = drvdata->ts_ctrl;
833 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
834}
835
836static ssize_t event_ts_store(struct device *dev,
837 struct device_attribute *attr,
838 const char *buf, size_t size)
839{
840 unsigned long val;
841 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
842
843 if (kstrtoul(buf, 16, &val))
844 return -EINVAL;
845 if (!drvdata->ts_size)
846 return -EINVAL;
847
848 drvdata->ts_ctrl = val & ETMv4_EVENT_MASK;
849 return size;
850}
851static DEVICE_ATTR_RW(event_ts);
852
853static ssize_t syncfreq_show(struct device *dev,
854 struct device_attribute *attr,
855 char *buf)
856{
857 unsigned long val;
858 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
859
860 val = drvdata->syncfreq;
861 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
862}
863
864static ssize_t syncfreq_store(struct device *dev,
865 struct device_attribute *attr,
866 const char *buf, size_t size)
867{
868 unsigned long val;
869 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
870
871 if (kstrtoul(buf, 16, &val))
872 return -EINVAL;
873 if (drvdata->syncpr == true)
874 return -EINVAL;
875
876 drvdata->syncfreq = val & ETMv4_SYNC_MASK;
877 return size;
878}
879static DEVICE_ATTR_RW(syncfreq);
880
881static ssize_t cyc_threshold_show(struct device *dev,
882 struct device_attribute *attr,
883 char *buf)
884{
885 unsigned long val;
886 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
887
888 val = drvdata->ccctlr;
889 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
890}
891
892static ssize_t cyc_threshold_store(struct device *dev,
893 struct device_attribute *attr,
894 const char *buf, size_t size)
895{
896 unsigned long val;
897 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
898
899 if (kstrtoul(buf, 16, &val))
900 return -EINVAL;
901 if (val < drvdata->ccitmin)
902 return -EINVAL;
903
904 drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
905 return size;
906}
907static DEVICE_ATTR_RW(cyc_threshold);
908
909static ssize_t bb_ctrl_show(struct device *dev,
910 struct device_attribute *attr,
911 char *buf)
912{
913 unsigned long val;
914 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
915
916 val = drvdata->bb_ctrl;
917 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
918}
919
920static ssize_t bb_ctrl_store(struct device *dev,
921 struct device_attribute *attr,
922 const char *buf, size_t size)
923{
924 unsigned long val;
925 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
926
927 if (kstrtoul(buf, 16, &val))
928 return -EINVAL;
929 if (drvdata->trcbb == false)
930 return -EINVAL;
931 if (!drvdata->nr_addr_cmp)
932 return -EINVAL;
933 /*
934 * Bit[7:0] selects which address range comparator is used for
935 * branch broadcast control.
936 */
937 if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
938 return -EINVAL;
939
940 drvdata->bb_ctrl = val;
941 return size;
942}
943static DEVICE_ATTR_RW(bb_ctrl);
944
43ba6a7b
PP
945static ssize_t event_vinst_show(struct device *dev,
946 struct device_attribute *attr,
947 char *buf)
948{
949 unsigned long val;
950 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
951
952 val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK;
953 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
954}
955
956static ssize_t event_vinst_store(struct device *dev,
957 struct device_attribute *attr,
958 const char *buf, size_t size)
959{
960 unsigned long val;
961 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
962
963 if (kstrtoul(buf, 16, &val))
964 return -EINVAL;
965
966 spin_lock(&drvdata->spinlock);
967 val &= ETMv4_EVENT_MASK;
968 drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK;
969 drvdata->vinst_ctrl |= val;
970 spin_unlock(&drvdata->spinlock);
971 return size;
972}
973static DEVICE_ATTR_RW(event_vinst);
974
975static ssize_t s_exlevel_vinst_show(struct device *dev,
976 struct device_attribute *attr,
977 char *buf)
978{
979 unsigned long val;
980 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
981
982 val = BMVAL(drvdata->vinst_ctrl, 16, 19);
983 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
984}
985
986static ssize_t s_exlevel_vinst_store(struct device *dev,
987 struct device_attribute *attr,
988 const char *buf, size_t size)
989{
990 unsigned long val;
991 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
992
993 if (kstrtoul(buf, 16, &val))
994 return -EINVAL;
995
996 spin_lock(&drvdata->spinlock);
997 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
998 drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
999 /* enable instruction tracing for corresponding exception level */
1000 val &= drvdata->s_ex_level;
1001 drvdata->vinst_ctrl |= (val << 16);
1002 spin_unlock(&drvdata->spinlock);
1003 return size;
1004}
1005static DEVICE_ATTR_RW(s_exlevel_vinst);
1006
1007static ssize_t ns_exlevel_vinst_show(struct device *dev,
1008 struct device_attribute *attr,
1009 char *buf)
1010{
1011 unsigned long val;
1012 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1013
1014 /* EXLEVEL_NS, bits[23:20] */
1015 val = BMVAL(drvdata->vinst_ctrl, 20, 23);
1016 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1017}
1018
1019static ssize_t ns_exlevel_vinst_store(struct device *dev,
1020 struct device_attribute *attr,
1021 const char *buf, size_t size)
1022{
1023 unsigned long val;
1024 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1025
1026 if (kstrtoul(buf, 16, &val))
1027 return -EINVAL;
1028
1029 spin_lock(&drvdata->spinlock);
1030 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
1031 drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
1032 /* enable instruction tracing for corresponding exception level */
1033 val &= drvdata->ns_ex_level;
1034 drvdata->vinst_ctrl |= (val << 20);
1035 spin_unlock(&drvdata->spinlock);
1036 return size;
1037}
1038static DEVICE_ATTR_RW(ns_exlevel_vinst);
1039
35c9b29b
PP
1040static ssize_t addr_idx_show(struct device *dev,
1041 struct device_attribute *attr,
1042 char *buf)
1043{
1044 unsigned long val;
1045 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1046
1047 val = drvdata->addr_idx;
1048 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1049}
1050
1051static ssize_t addr_idx_store(struct device *dev,
1052 struct device_attribute *attr,
1053 const char *buf, size_t size)
1054{
1055 unsigned long val;
1056 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1057
1058 if (kstrtoul(buf, 16, &val))
1059 return -EINVAL;
1060 if (val >= drvdata->nr_addr_cmp * 2)
1061 return -EINVAL;
1062
1063 /*
1064 * Use spinlock to ensure index doesn't change while it gets
1065 * dereferenced multiple times within a spinlock block elsewhere.
1066 */
1067 spin_lock(&drvdata->spinlock);
1068 drvdata->addr_idx = val;
1069 spin_unlock(&drvdata->spinlock);
1070 return size;
1071}
1072static DEVICE_ATTR_RW(addr_idx);
1073
1074static ssize_t addr_instdatatype_show(struct device *dev,
1075 struct device_attribute *attr,
1076 char *buf)
1077{
1078 ssize_t len;
1079 u8 val, idx;
1080 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1081
1082 spin_lock(&drvdata->spinlock);
1083 idx = drvdata->addr_idx;
1084 val = BMVAL(drvdata->addr_acc[idx], 0, 1);
1085 len = scnprintf(buf, PAGE_SIZE, "%s\n",
1086 val == ETM_INSTR_ADDR ? "instr" :
1087 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
1088 (val == ETM_DATA_STORE_ADDR ? "data_store" :
1089 "data_load_store")));
1090 spin_unlock(&drvdata->spinlock);
1091 return len;
1092}
1093
1094static ssize_t addr_instdatatype_store(struct device *dev,
1095 struct device_attribute *attr,
1096 const char *buf, size_t size)
1097{
1098 u8 idx;
1099 char str[20] = "";
1100 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1101
1102 if (strlen(buf) >= 20)
1103 return -EINVAL;
1104 if (sscanf(buf, "%s", str) != 1)
1105 return -EINVAL;
1106
1107 spin_lock(&drvdata->spinlock);
1108 idx = drvdata->addr_idx;
1109 if (!strcmp(str, "instr"))
1110 /* TYPE, bits[1:0] */
1111 drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1));
1112
1113 spin_unlock(&drvdata->spinlock);
1114 return size;
1115}
1116static DEVICE_ATTR_RW(addr_instdatatype);
1117
1118static ssize_t addr_single_show(struct device *dev,
1119 struct device_attribute *attr,
1120 char *buf)
1121{
1122 u8 idx;
1123 unsigned long val;
1124 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1125
1126 idx = drvdata->addr_idx;
1127 spin_lock(&drvdata->spinlock);
1128 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1129 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
1130 spin_unlock(&drvdata->spinlock);
1131 return -EPERM;
1132 }
1133 val = (unsigned long)drvdata->addr_val[idx];
1134 spin_unlock(&drvdata->spinlock);
1135 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1136}
1137
1138static ssize_t addr_single_store(struct device *dev,
1139 struct device_attribute *attr,
1140 const char *buf, size_t size)
1141{
1142 u8 idx;
1143 unsigned long val;
1144 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1145
1146 if (kstrtoul(buf, 16, &val))
1147 return -EINVAL;
1148
1149 spin_lock(&drvdata->spinlock);
1150 idx = drvdata->addr_idx;
1151 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1152 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
1153 spin_unlock(&drvdata->spinlock);
1154 return -EPERM;
1155 }
1156
1157 drvdata->addr_val[idx] = (u64)val;
1158 drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
1159 spin_unlock(&drvdata->spinlock);
1160 return size;
1161}
1162static DEVICE_ATTR_RW(addr_single);
1163
1164static ssize_t addr_range_show(struct device *dev,
1165 struct device_attribute *attr,
1166 char *buf)
1167{
1168 u8 idx;
1169 unsigned long val1, val2;
1170 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1171
1172 spin_lock(&drvdata->spinlock);
1173 idx = drvdata->addr_idx;
1174 if (idx % 2 != 0) {
1175 spin_unlock(&drvdata->spinlock);
1176 return -EPERM;
1177 }
1178 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1179 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1180 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1181 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1182 spin_unlock(&drvdata->spinlock);
1183 return -EPERM;
1184 }
1185
1186 val1 = (unsigned long)drvdata->addr_val[idx];
1187 val2 = (unsigned long)drvdata->addr_val[idx + 1];
1188 spin_unlock(&drvdata->spinlock);
1189 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1190}
1191
1192static ssize_t addr_range_store(struct device *dev,
1193 struct device_attribute *attr,
1194 const char *buf, size_t size)
1195{
1196 u8 idx;
1197 unsigned long val1, val2;
1198 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1199
1200 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1201 return -EINVAL;
1202 /* lower address comparator cannot have a higher address value */
1203 if (val1 > val2)
1204 return -EINVAL;
1205
1206 spin_lock(&drvdata->spinlock);
1207 idx = drvdata->addr_idx;
1208 if (idx % 2 != 0) {
1209 spin_unlock(&drvdata->spinlock);
1210 return -EPERM;
1211 }
1212
1213 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1214 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1215 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1216 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1217 spin_unlock(&drvdata->spinlock);
1218 return -EPERM;
1219 }
1220
1221 drvdata->addr_val[idx] = (u64)val1;
1222 drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1223 drvdata->addr_val[idx + 1] = (u64)val2;
1224 drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1225 /*
1226 * Program include or exclude control bits for vinst or vdata
1227 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1228 */
1229 if (drvdata->mode & ETM_MODE_EXCLUDE)
1230 etm4_set_mode_exclude(drvdata, true);
1231 else
1232 etm4_set_mode_exclude(drvdata, false);
1233
1234 spin_unlock(&drvdata->spinlock);
1235 return size;
1236}
1237static DEVICE_ATTR_RW(addr_range);
1238
1239static ssize_t addr_start_show(struct device *dev,
1240 struct device_attribute *attr,
1241 char *buf)
1242{
1243 u8 idx;
1244 unsigned long val;
1245 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1246
1247 spin_lock(&drvdata->spinlock);
1248 idx = drvdata->addr_idx;
1249
1250 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1251 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1252 spin_unlock(&drvdata->spinlock);
1253 return -EPERM;
1254 }
1255
1256 val = (unsigned long)drvdata->addr_val[idx];
1257 spin_unlock(&drvdata->spinlock);
1258 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1259}
1260
1261static ssize_t addr_start_store(struct device *dev,
1262 struct device_attribute *attr,
1263 const char *buf, size_t size)
1264{
1265 u8 idx;
1266 unsigned long val;
1267 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1268
1269 if (kstrtoul(buf, 16, &val))
1270 return -EINVAL;
1271
1272 spin_lock(&drvdata->spinlock);
1273 idx = drvdata->addr_idx;
1274 if (!drvdata->nr_addr_cmp) {
1275 spin_unlock(&drvdata->spinlock);
1276 return -EINVAL;
1277 }
1278 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1279 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1280 spin_unlock(&drvdata->spinlock);
1281 return -EPERM;
1282 }
1283
1284 drvdata->addr_val[idx] = (u64)val;
1285 drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
1286 drvdata->vissctlr |= BIT(idx);
1287 /* SSSTATUS, bit[9] - turn on start/stop logic */
1288 drvdata->vinst_ctrl |= BIT(9);
1289 spin_unlock(&drvdata->spinlock);
1290 return size;
1291}
1292static DEVICE_ATTR_RW(addr_start);
1293
1294static ssize_t addr_stop_show(struct device *dev,
1295 struct device_attribute *attr,
1296 char *buf)
1297{
1298 u8 idx;
1299 unsigned long val;
1300 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1301
1302 spin_lock(&drvdata->spinlock);
1303 idx = drvdata->addr_idx;
1304
1305 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1306 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1307 spin_unlock(&drvdata->spinlock);
1308 return -EPERM;
1309 }
1310
1311 val = (unsigned long)drvdata->addr_val[idx];
1312 spin_unlock(&drvdata->spinlock);
1313 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1314}
1315
1316static ssize_t addr_stop_store(struct device *dev,
1317 struct device_attribute *attr,
1318 const char *buf, size_t size)
1319{
1320 u8 idx;
1321 unsigned long val;
1322 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1323
1324 if (kstrtoul(buf, 16, &val))
1325 return -EINVAL;
1326
1327 spin_lock(&drvdata->spinlock);
1328 idx = drvdata->addr_idx;
1329 if (!drvdata->nr_addr_cmp) {
1330 spin_unlock(&drvdata->spinlock);
1331 return -EINVAL;
1332 }
1333 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1334 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1335 spin_unlock(&drvdata->spinlock);
1336 return -EPERM;
1337 }
1338
1339 drvdata->addr_val[idx] = (u64)val;
1340 drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1341 drvdata->vissctlr |= BIT(idx + 16);
1342 /* SSSTATUS, bit[9] - turn on start/stop logic */
1343 drvdata->vinst_ctrl |= BIT(9);
1344 spin_unlock(&drvdata->spinlock);
1345 return size;
1346}
1347static DEVICE_ATTR_RW(addr_stop);
1348
1349static ssize_t addr_ctxtype_show(struct device *dev,
1350 struct device_attribute *attr,
1351 char *buf)
1352{
1353 ssize_t len;
1354 u8 idx, val;
1355 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1356
1357 spin_lock(&drvdata->spinlock);
1358 idx = drvdata->addr_idx;
1359 /* CONTEXTTYPE, bits[3:2] */
1360 val = BMVAL(drvdata->addr_acc[idx], 2, 3);
1361 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1362 (val == ETM_CTX_CTXID ? "ctxid" :
1363 (val == ETM_CTX_VMID ? "vmid" : "all")));
1364 spin_unlock(&drvdata->spinlock);
1365 return len;
1366}
1367
1368static ssize_t addr_ctxtype_store(struct device *dev,
1369 struct device_attribute *attr,
1370 const char *buf, size_t size)
1371{
1372 u8 idx;
1373 char str[10] = "";
1374 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1375
1376 if (strlen(buf) >= 10)
1377 return -EINVAL;
1378 if (sscanf(buf, "%s", str) != 1)
1379 return -EINVAL;
1380
1381 spin_lock(&drvdata->spinlock);
1382 idx = drvdata->addr_idx;
1383 if (!strcmp(str, "none"))
1384 /* start by clearing context type bits */
1385 drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1386 else if (!strcmp(str, "ctxid")) {
1387 /* 0b01 The trace unit performs a Context ID */
1388 if (drvdata->numcidc) {
1389 drvdata->addr_acc[idx] |= BIT(2);
1390 drvdata->addr_acc[idx] &= ~BIT(3);
1391 }
1392 } else if (!strcmp(str, "vmid")) {
1393 /* 0b10 The trace unit performs a VMID */
1394 if (drvdata->numvmidc) {
1395 drvdata->addr_acc[idx] &= ~BIT(2);
1396 drvdata->addr_acc[idx] |= BIT(3);
1397 }
1398 } else if (!strcmp(str, "all")) {
1399 /*
1400 * 0b11 The trace unit performs a Context ID
1401 * comparison and a VMID
1402 */
1403 if (drvdata->numcidc)
1404 drvdata->addr_acc[idx] |= BIT(2);
1405 if (drvdata->numvmidc)
1406 drvdata->addr_acc[idx] |= BIT(3);
1407 }
1408 spin_unlock(&drvdata->spinlock);
1409 return size;
1410}
1411static DEVICE_ATTR_RW(addr_ctxtype);
1412
1413static ssize_t addr_context_show(struct device *dev,
1414 struct device_attribute *attr,
1415 char *buf)
1416{
1417 u8 idx;
1418 unsigned long val;
1419 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1420
1421 spin_lock(&drvdata->spinlock);
1422 idx = drvdata->addr_idx;
1423 /* context ID comparator bits[6:4] */
1424 val = BMVAL(drvdata->addr_acc[idx], 4, 6);
1425 spin_unlock(&drvdata->spinlock);
1426 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1427}
1428
1429static ssize_t addr_context_store(struct device *dev,
1430 struct device_attribute *attr,
1431 const char *buf, size_t size)
1432{
1433 u8 idx;
1434 unsigned long val;
1435 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1436
1437 if (kstrtoul(buf, 16, &val))
1438 return -EINVAL;
1439 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1440 return -EINVAL;
1441 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1442 drvdata->numcidc : drvdata->numvmidc))
1443 return -EINVAL;
1444
1445 spin_lock(&drvdata->spinlock);
1446 idx = drvdata->addr_idx;
1447 /* clear context ID comparator bits[6:4] */
1448 drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1449 drvdata->addr_acc[idx] |= (val << 4);
1450 spin_unlock(&drvdata->spinlock);
1451 return size;
1452}
1453static DEVICE_ATTR_RW(addr_context);
1454
5e5ff344
PP
1455static ssize_t seq_idx_show(struct device *dev,
1456 struct device_attribute *attr,
1457 char *buf)
1458{
1459 unsigned long val;
1460 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1461
1462 val = drvdata->seq_idx;
1463 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1464}
1465
1466static ssize_t seq_idx_store(struct device *dev,
1467 struct device_attribute *attr,
1468 const char *buf, size_t size)
1469{
1470 unsigned long val;
1471 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1472
1473 if (kstrtoul(buf, 16, &val))
1474 return -EINVAL;
1475 if (val >= drvdata->nrseqstate - 1)
1476 return -EINVAL;
1477
1478 /*
1479 * Use spinlock to ensure index doesn't change while it gets
1480 * dereferenced multiple times within a spinlock block elsewhere.
1481 */
1482 spin_lock(&drvdata->spinlock);
1483 drvdata->seq_idx = val;
1484 spin_unlock(&drvdata->spinlock);
1485 return size;
1486}
1487static DEVICE_ATTR_RW(seq_idx);
1488
1489static ssize_t seq_state_show(struct device *dev,
1490 struct device_attribute *attr,
1491 char *buf)
1492{
1493 unsigned long val;
1494 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1495
1496 val = drvdata->seq_state;
1497 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1498}
1499
1500static ssize_t seq_state_store(struct device *dev,
1501 struct device_attribute *attr,
1502 const char *buf, size_t size)
1503{
1504 unsigned long val;
1505 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1506
1507 if (kstrtoul(buf, 16, &val))
1508 return -EINVAL;
1509 if (val >= drvdata->nrseqstate)
1510 return -EINVAL;
1511
1512 drvdata->seq_state = val;
1513 return size;
1514}
1515static DEVICE_ATTR_RW(seq_state);
1516
1517static ssize_t seq_event_show(struct device *dev,
1518 struct device_attribute *attr,
1519 char *buf)
1520{
1521 u8 idx;
1522 unsigned long val;
1523 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1524
1525 spin_lock(&drvdata->spinlock);
1526 idx = drvdata->seq_idx;
1527 val = drvdata->seq_ctrl[idx];
1528 spin_unlock(&drvdata->spinlock);
1529 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1530}
1531
1532static ssize_t seq_event_store(struct device *dev,
1533 struct device_attribute *attr,
1534 const char *buf, size_t size)
1535{
1536 u8 idx;
1537 unsigned long val;
1538 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1539
1540 if (kstrtoul(buf, 16, &val))
1541 return -EINVAL;
1542
1543 spin_lock(&drvdata->spinlock);
1544 idx = drvdata->seq_idx;
1545 /* RST, bits[7:0] */
1546 drvdata->seq_ctrl[idx] = val & 0xFF;
1547 spin_unlock(&drvdata->spinlock);
1548 return size;
1549}
1550static DEVICE_ATTR_RW(seq_event);
1551
1552static ssize_t seq_reset_event_show(struct device *dev,
1553 struct device_attribute *attr,
1554 char *buf)
1555{
1556 unsigned long val;
1557 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1558
1559 val = drvdata->seq_rst;
1560 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1561}
1562
1563static ssize_t seq_reset_event_store(struct device *dev,
1564 struct device_attribute *attr,
1565 const char *buf, size_t size)
1566{
1567 unsigned long val;
1568 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1569
1570 if (kstrtoul(buf, 16, &val))
1571 return -EINVAL;
1572 if (!(drvdata->nrseqstate))
1573 return -EINVAL;
1574
1575 drvdata->seq_rst = val & ETMv4_EVENT_MASK;
1576 return size;
1577}
1578static DEVICE_ATTR_RW(seq_reset_event);
1579
add2d5d0
PP
1580static ssize_t cntr_idx_show(struct device *dev,
1581 struct device_attribute *attr,
1582 char *buf)
1583{
1584 unsigned long val;
1585 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1586
1587 val = drvdata->cntr_idx;
1588 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1589}
1590
1591static ssize_t cntr_idx_store(struct device *dev,
1592 struct device_attribute *attr,
1593 const char *buf, size_t size)
1594{
1595 unsigned long val;
1596 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1597
1598 if (kstrtoul(buf, 16, &val))
1599 return -EINVAL;
1600 if (val >= drvdata->nr_cntr)
1601 return -EINVAL;
1602
1603 /*
1604 * Use spinlock to ensure index doesn't change while it gets
1605 * dereferenced multiple times within a spinlock block elsewhere.
1606 */
1607 spin_lock(&drvdata->spinlock);
1608 drvdata->cntr_idx = val;
1609 spin_unlock(&drvdata->spinlock);
1610 return size;
1611}
1612static DEVICE_ATTR_RW(cntr_idx);
1613
1614static ssize_t cntrldvr_show(struct device *dev,
1615 struct device_attribute *attr,
1616 char *buf)
1617{
1618 u8 idx;
1619 unsigned long val;
1620 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1621
1622 spin_lock(&drvdata->spinlock);
1623 idx = drvdata->cntr_idx;
1624 val = drvdata->cntrldvr[idx];
1625 spin_unlock(&drvdata->spinlock);
1626 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1627}
1628
1629static ssize_t cntrldvr_store(struct device *dev,
1630 struct device_attribute *attr,
1631 const char *buf, size_t size)
1632{
1633 u8 idx;
1634 unsigned long val;
1635 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1636
1637 if (kstrtoul(buf, 16, &val))
1638 return -EINVAL;
1639 if (val > ETM_CNTR_MAX_VAL)
1640 return -EINVAL;
1641
1642 spin_lock(&drvdata->spinlock);
1643 idx = drvdata->cntr_idx;
1644 drvdata->cntrldvr[idx] = val;
1645 spin_unlock(&drvdata->spinlock);
1646 return size;
1647}
1648static DEVICE_ATTR_RW(cntrldvr);
1649
1650static ssize_t cntr_val_show(struct device *dev,
1651 struct device_attribute *attr,
1652 char *buf)
1653{
1654 u8 idx;
1655 unsigned long val;
1656 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1657
1658 spin_lock(&drvdata->spinlock);
1659 idx = drvdata->cntr_idx;
1660 val = drvdata->cntr_val[idx];
1661 spin_unlock(&drvdata->spinlock);
1662 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1663}
1664
1665static ssize_t cntr_val_store(struct device *dev,
1666 struct device_attribute *attr,
1667 const char *buf, size_t size)
1668{
1669 u8 idx;
1670 unsigned long val;
1671 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1672
1673 if (kstrtoul(buf, 16, &val))
1674 return -EINVAL;
1675 if (val > ETM_CNTR_MAX_VAL)
1676 return -EINVAL;
1677
1678 spin_lock(&drvdata->spinlock);
1679 idx = drvdata->cntr_idx;
1680 drvdata->cntr_val[idx] = val;
1681 spin_unlock(&drvdata->spinlock);
1682 return size;
1683}
1684static DEVICE_ATTR_RW(cntr_val);
1685
1686static ssize_t cntr_ctrl_show(struct device *dev,
1687 struct device_attribute *attr,
1688 char *buf)
1689{
1690 u8 idx;
1691 unsigned long val;
1692 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1693
1694 spin_lock(&drvdata->spinlock);
1695 idx = drvdata->cntr_idx;
1696 val = drvdata->cntr_ctrl[idx];
1697 spin_unlock(&drvdata->spinlock);
1698 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1699}
1700
1701static ssize_t cntr_ctrl_store(struct device *dev,
1702 struct device_attribute *attr,
1703 const char *buf, size_t size)
1704{
1705 u8 idx;
1706 unsigned long val;
1707 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1708
1709 if (kstrtoul(buf, 16, &val))
1710 return -EINVAL;
1711
1712 spin_lock(&drvdata->spinlock);
1713 idx = drvdata->cntr_idx;
1714 drvdata->cntr_ctrl[idx] = val;
1715 spin_unlock(&drvdata->spinlock);
1716 return size;
1717}
1718static DEVICE_ATTR_RW(cntr_ctrl);
1719
6afa8a13
PP
1720static ssize_t res_idx_show(struct device *dev,
1721 struct device_attribute *attr,
1722 char *buf)
1723{
1724 unsigned long val;
1725 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1726
1727 val = drvdata->res_idx;
1728 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1729}
1730
1731static ssize_t res_idx_store(struct device *dev,
1732 struct device_attribute *attr,
1733 const char *buf, size_t size)
1734{
1735 unsigned long val;
1736 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1737
1738 if (kstrtoul(buf, 16, &val))
1739 return -EINVAL;
1740 /* Resource selector pair 0 is always implemented and reserved */
497b5956 1741 if (val < 2 || val >= drvdata->nr_resource * 2)
6afa8a13
PP
1742 return -EINVAL;
1743
1744 /*
1745 * Use spinlock to ensure index doesn't change while it gets
1746 * dereferenced multiple times within a spinlock block elsewhere.
1747 */
1748 spin_lock(&drvdata->spinlock);
1749 drvdata->res_idx = val;
1750 spin_unlock(&drvdata->spinlock);
1751 return size;
1752}
1753static DEVICE_ATTR_RW(res_idx);
1754
1755static ssize_t res_ctrl_show(struct device *dev,
1756 struct device_attribute *attr,
1757 char *buf)
1758{
1759 u8 idx;
1760 unsigned long val;
1761 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1762
1763 spin_lock(&drvdata->spinlock);
1764 idx = drvdata->res_idx;
1765 val = drvdata->res_ctrl[idx];
1766 spin_unlock(&drvdata->spinlock);
1767 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1768}
1769
1770static ssize_t res_ctrl_store(struct device *dev,
1771 struct device_attribute *attr,
1772 const char *buf, size_t size)
1773{
1774 u8 idx;
1775 unsigned long val;
1776 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1777
1778 if (kstrtoul(buf, 16, &val))
1779 return -EINVAL;
1780
1781 spin_lock(&drvdata->spinlock);
1782 idx = drvdata->res_idx;
1783 /* For odd idx pair inversal bit is RES0 */
1784 if (idx % 2 != 0)
1785 /* PAIRINV, bit[21] */
1786 val &= ~BIT(21);
1787 drvdata->res_ctrl[idx] = val;
1788 spin_unlock(&drvdata->spinlock);
1789 return size;
1790}
1791static DEVICE_ATTR_RW(res_ctrl);
1792
4a584be1
PP
1793static ssize_t ctxid_idx_show(struct device *dev,
1794 struct device_attribute *attr,
1795 char *buf)
1796{
1797 unsigned long val;
1798 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1799
1800 val = drvdata->ctxid_idx;
1801 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1802}
1803
1804static ssize_t ctxid_idx_store(struct device *dev,
1805 struct device_attribute *attr,
1806 const char *buf, size_t size)
1807{
1808 unsigned long val;
1809 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1810
1811 if (kstrtoul(buf, 16, &val))
1812 return -EINVAL;
1813 if (val >= drvdata->numcidc)
1814 return -EINVAL;
1815
1816 /*
1817 * Use spinlock to ensure index doesn't change while it gets
1818 * dereferenced multiple times within a spinlock block elsewhere.
1819 */
1820 spin_lock(&drvdata->spinlock);
1821 drvdata->ctxid_idx = val;
1822 spin_unlock(&drvdata->spinlock);
1823 return size;
1824}
1825static DEVICE_ATTR_RW(ctxid_idx);
1826
cd196ac3 1827static ssize_t ctxid_pid_show(struct device *dev,
4a584be1
PP
1828 struct device_attribute *attr,
1829 char *buf)
1830{
1831 u8 idx;
1832 unsigned long val;
1833 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1834
1835 spin_lock(&drvdata->spinlock);
1836 idx = drvdata->ctxid_idx;
f67b467a 1837 val = (unsigned long)drvdata->ctxid_vpid[idx];
4a584be1
PP
1838 spin_unlock(&drvdata->spinlock);
1839 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1840}
1841
cd196ac3 1842static ssize_t ctxid_pid_store(struct device *dev,
4a584be1
PP
1843 struct device_attribute *attr,
1844 const char *buf, size_t size)
1845{
1846 u8 idx;
f67b467a 1847 unsigned long vpid, pid;
4a584be1
PP
1848 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1849
1850 /*
1851 * only implemented when ctxid tracing is enabled, i.e. at least one
1852 * ctxid comparator is implemented and ctxid is greater than 0 bits
1853 * in length
1854 */
1855 if (!drvdata->ctxid_size || !drvdata->numcidc)
1856 return -EINVAL;
f67b467a 1857 if (kstrtoul(buf, 16, &vpid))
4a584be1
PP
1858 return -EINVAL;
1859
f67b467a
CZ
1860 pid = coresight_vpid_to_pid(vpid);
1861
4a584be1
PP
1862 spin_lock(&drvdata->spinlock);
1863 idx = drvdata->ctxid_idx;
f67b467a
CZ
1864 drvdata->ctxid_pid[idx] = (u64)pid;
1865 drvdata->ctxid_vpid[idx] = (u64)vpid;
4a584be1
PP
1866 spin_unlock(&drvdata->spinlock);
1867 return size;
1868}
cd196ac3 1869static DEVICE_ATTR_RW(ctxid_pid);
4a584be1
PP
1870
1871static ssize_t ctxid_masks_show(struct device *dev,
1872 struct device_attribute *attr,
1873 char *buf)
1874{
1875 unsigned long val1, val2;
1876 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1877
1878 spin_lock(&drvdata->spinlock);
1879 val1 = drvdata->ctxid_mask0;
1880 val2 = drvdata->ctxid_mask1;
1881 spin_unlock(&drvdata->spinlock);
1882 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1883}
1884
1885static ssize_t ctxid_masks_store(struct device *dev,
1886 struct device_attribute *attr,
1887 const char *buf, size_t size)
1888{
1889 u8 i, j, maskbyte;
1890 unsigned long val1, val2, mask;
1891 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1892
1893 /*
1894 * only implemented when ctxid tracing is enabled, i.e. at least one
1895 * ctxid comparator is implemented and ctxid is greater than 0 bits
1896 * in length
1897 */
1898 if (!drvdata->ctxid_size || !drvdata->numcidc)
1899 return -EINVAL;
1900 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1901 return -EINVAL;
1902
1903 spin_lock(&drvdata->spinlock);
1904 /*
1905 * each byte[0..3] controls mask value applied to ctxid
1906 * comparator[0..3]
1907 */
1908 switch (drvdata->numcidc) {
1909 case 0x1:
1910 /* COMP0, bits[7:0] */
1911 drvdata->ctxid_mask0 = val1 & 0xFF;
1912 break;
1913 case 0x2:
1914 /* COMP1, bits[15:8] */
1915 drvdata->ctxid_mask0 = val1 & 0xFFFF;
1916 break;
1917 case 0x3:
1918 /* COMP2, bits[23:16] */
1919 drvdata->ctxid_mask0 = val1 & 0xFFFFFF;
1920 break;
1921 case 0x4:
1922 /* COMP3, bits[31:24] */
1923 drvdata->ctxid_mask0 = val1;
1924 break;
1925 case 0x5:
1926 /* COMP4, bits[7:0] */
1927 drvdata->ctxid_mask0 = val1;
1928 drvdata->ctxid_mask1 = val2 & 0xFF;
1929 break;
1930 case 0x6:
1931 /* COMP5, bits[15:8] */
1932 drvdata->ctxid_mask0 = val1;
1933 drvdata->ctxid_mask1 = val2 & 0xFFFF;
1934 break;
1935 case 0x7:
1936 /* COMP6, bits[23:16] */
1937 drvdata->ctxid_mask0 = val1;
1938 drvdata->ctxid_mask1 = val2 & 0xFFFFFF;
1939 break;
1940 case 0x8:
1941 /* COMP7, bits[31:24] */
1942 drvdata->ctxid_mask0 = val1;
1943 drvdata->ctxid_mask1 = val2;
1944 break;
1945 default:
1946 break;
1947 }
1948 /*
1949 * If software sets a mask bit to 1, it must program relevant byte
1950 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1951 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1952 * of ctxid comparator0 value (corresponding to byte 0) register.
1953 */
1954 mask = drvdata->ctxid_mask0;
1955 for (i = 0; i < drvdata->numcidc; i++) {
1956 /* mask value of corresponding ctxid comparator */
1957 maskbyte = mask & ETMv4_EVENT_MASK;
1958 /*
1959 * each bit corresponds to a byte of respective ctxid comparator
1960 * value register
1961 */
1962 for (j = 0; j < 8; j++) {
1963 if (maskbyte & 1)
cd196ac3 1964 drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8));
4a584be1
PP
1965 maskbyte >>= 1;
1966 }
1967 /* Select the next ctxid comparator mask value */
1968 if (i == 3)
1969 /* ctxid comparators[4-7] */
1970 mask = drvdata->ctxid_mask1;
1971 else
1972 mask >>= 0x8;
1973 }
1974
1975 spin_unlock(&drvdata->spinlock);
1976 return size;
1977}
1978static DEVICE_ATTR_RW(ctxid_masks);
1979
40d8ebf0
PP
1980static ssize_t vmid_idx_show(struct device *dev,
1981 struct device_attribute *attr,
1982 char *buf)
1983{
1984 unsigned long val;
1985 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1986
1987 val = drvdata->vmid_idx;
1988 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1989}
1990
1991static ssize_t vmid_idx_store(struct device *dev,
1992 struct device_attribute *attr,
1993 const char *buf, size_t size)
1994{
1995 unsigned long val;
1996 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1997
1998 if (kstrtoul(buf, 16, &val))
1999 return -EINVAL;
2000 if (val >= drvdata->numvmidc)
2001 return -EINVAL;
2002
2003 /*
2004 * Use spinlock to ensure index doesn't change while it gets
2005 * dereferenced multiple times within a spinlock block elsewhere.
2006 */
2007 spin_lock(&drvdata->spinlock);
2008 drvdata->vmid_idx = val;
2009 spin_unlock(&drvdata->spinlock);
2010 return size;
2011}
2012static DEVICE_ATTR_RW(vmid_idx);
2013
2014static ssize_t vmid_val_show(struct device *dev,
2015 struct device_attribute *attr,
2016 char *buf)
2017{
2018 unsigned long val;
2019 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2020
2021 val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx];
2022 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2023}
2024
2025static ssize_t vmid_val_store(struct device *dev,
2026 struct device_attribute *attr,
2027 const char *buf, size_t size)
2028{
2029 unsigned long val;
2030 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2031
2032 /*
2033 * only implemented when vmid tracing is enabled, i.e. at least one
2034 * vmid comparator is implemented and at least 8 bit vmid size
2035 */
2036 if (!drvdata->vmid_size || !drvdata->numvmidc)
2037 return -EINVAL;
2038 if (kstrtoul(buf, 16, &val))
2039 return -EINVAL;
2040
2041 spin_lock(&drvdata->spinlock);
2042 drvdata->vmid_val[drvdata->vmid_idx] = (u64)val;
2043 spin_unlock(&drvdata->spinlock);
2044 return size;
2045}
2046static DEVICE_ATTR_RW(vmid_val);
2047
2048static ssize_t vmid_masks_show(struct device *dev,
2049 struct device_attribute *attr, char *buf)
2050{
2051 unsigned long val1, val2;
2052 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2053
2054 spin_lock(&drvdata->spinlock);
2055 val1 = drvdata->vmid_mask0;
2056 val2 = drvdata->vmid_mask1;
2057 spin_unlock(&drvdata->spinlock);
2058 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2059}
2060
2061static ssize_t vmid_masks_store(struct device *dev,
2062 struct device_attribute *attr,
2063 const char *buf, size_t size)
2064{
2065 u8 i, j, maskbyte;
2066 unsigned long val1, val2, mask;
2067 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2068 /*
2069 * only implemented when vmid tracing is enabled, i.e. at least one
2070 * vmid comparator is implemented and at least 8 bit vmid size
2071 */
2072 if (!drvdata->vmid_size || !drvdata->numvmidc)
2073 return -EINVAL;
2074 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
2075 return -EINVAL;
2076
2077 spin_lock(&drvdata->spinlock);
2078
2079 /*
2080 * each byte[0..3] controls mask value applied to vmid
2081 * comparator[0..3]
2082 */
2083 switch (drvdata->numvmidc) {
2084 case 0x1:
2085 /* COMP0, bits[7:0] */
2086 drvdata->vmid_mask0 = val1 & 0xFF;
2087 break;
2088 case 0x2:
2089 /* COMP1, bits[15:8] */
2090 drvdata->vmid_mask0 = val1 & 0xFFFF;
2091 break;
2092 case 0x3:
2093 /* COMP2, bits[23:16] */
2094 drvdata->vmid_mask0 = val1 & 0xFFFFFF;
2095 break;
2096 case 0x4:
2097 /* COMP3, bits[31:24] */
2098 drvdata->vmid_mask0 = val1;
2099 break;
2100 case 0x5:
2101 /* COMP4, bits[7:0] */
2102 drvdata->vmid_mask0 = val1;
2103 drvdata->vmid_mask1 = val2 & 0xFF;
2104 break;
2105 case 0x6:
2106 /* COMP5, bits[15:8] */
2107 drvdata->vmid_mask0 = val1;
2108 drvdata->vmid_mask1 = val2 & 0xFFFF;
2109 break;
2110 case 0x7:
2111 /* COMP6, bits[23:16] */
2112 drvdata->vmid_mask0 = val1;
2113 drvdata->vmid_mask1 = val2 & 0xFFFFFF;
2114 break;
2115 case 0x8:
2116 /* COMP7, bits[31:24] */
2117 drvdata->vmid_mask0 = val1;
2118 drvdata->vmid_mask1 = val2;
2119 break;
2120 default:
2121 break;
2122 }
2123
2124 /*
2125 * If software sets a mask bit to 1, it must program relevant byte
2126 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2127 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2128 * of vmid comparator0 value (corresponding to byte 0) register.
2129 */
2130 mask = drvdata->vmid_mask0;
2131 for (i = 0; i < drvdata->numvmidc; i++) {
2132 /* mask value of corresponding vmid comparator */
2133 maskbyte = mask & ETMv4_EVENT_MASK;
2134 /*
2135 * each bit corresponds to a byte of respective vmid comparator
2136 * value register
2137 */
2138 for (j = 0; j < 8; j++) {
2139 if (maskbyte & 1)
2140 drvdata->vmid_val[i] &= ~(0xFF << (j * 8));
2141 maskbyte >>= 1;
2142 }
2143 /* Select the next vmid comparator mask value */
2144 if (i == 3)
2145 /* vmid comparators[4-7] */
2146 mask = drvdata->vmid_mask1;
2147 else
2148 mask >>= 0x8;
2149 }
2150 spin_unlock(&drvdata->spinlock);
2151 return size;
2152}
2153static DEVICE_ATTR_RW(vmid_masks);
2154
2e1cdfe1
PP
2155static ssize_t cpu_show(struct device *dev,
2156 struct device_attribute *attr, char *buf)
2157{
2158 int val;
2159 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2160
2161 val = drvdata->cpu;
2162 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2163
2164}
2165static DEVICE_ATTR_RO(cpu);
2166
2167static struct attribute *coresight_etmv4_attrs[] = {
c0ddbfea
PP
2168 &dev_attr_nr_pe_cmp.attr,
2169 &dev_attr_nr_addr_cmp.attr,
2170 &dev_attr_nr_cntr.attr,
2171 &dev_attr_nr_ext_inp.attr,
2172 &dev_attr_numcidc.attr,
2173 &dev_attr_numvmidc.attr,
2174 &dev_attr_nrseqstate.attr,
2175 &dev_attr_nr_resource.attr,
2176 &dev_attr_nr_ss_cmp.attr,
d8c66962
PP
2177 &dev_attr_reset.attr,
2178 &dev_attr_mode.attr,
2179 &dev_attr_pe.attr,
2180 &dev_attr_event.attr,
2181 &dev_attr_event_instren.attr,
b460daf8
PP
2182 &dev_attr_event_ts.attr,
2183 &dev_attr_syncfreq.attr,
2184 &dev_attr_cyc_threshold.attr,
2185 &dev_attr_bb_ctrl.attr,
43ba6a7b
PP
2186 &dev_attr_event_vinst.attr,
2187 &dev_attr_s_exlevel_vinst.attr,
2188 &dev_attr_ns_exlevel_vinst.attr,
35c9b29b
PP
2189 &dev_attr_addr_idx.attr,
2190 &dev_attr_addr_instdatatype.attr,
2191 &dev_attr_addr_single.attr,
2192 &dev_attr_addr_range.attr,
2193 &dev_attr_addr_start.attr,
2194 &dev_attr_addr_stop.attr,
2195 &dev_attr_addr_ctxtype.attr,
2196 &dev_attr_addr_context.attr,
5e5ff344
PP
2197 &dev_attr_seq_idx.attr,
2198 &dev_attr_seq_state.attr,
2199 &dev_attr_seq_event.attr,
2200 &dev_attr_seq_reset_event.attr,
add2d5d0
PP
2201 &dev_attr_cntr_idx.attr,
2202 &dev_attr_cntrldvr.attr,
2203 &dev_attr_cntr_val.attr,
2204 &dev_attr_cntr_ctrl.attr,
6afa8a13
PP
2205 &dev_attr_res_idx.attr,
2206 &dev_attr_res_ctrl.attr,
4a584be1 2207 &dev_attr_ctxid_idx.attr,
cd196ac3 2208 &dev_attr_ctxid_pid.attr,
4a584be1 2209 &dev_attr_ctxid_masks.attr,
40d8ebf0
PP
2210 &dev_attr_vmid_idx.attr,
2211 &dev_attr_vmid_val.attr,
2212 &dev_attr_vmid_masks.attr,
2e1cdfe1
PP
2213 &dev_attr_cpu.attr,
2214 NULL,
2215};
a467dae1
MP
2216
2217#define coresight_simple_func(name, offset) \
2218static ssize_t name##_show(struct device *_dev, \
2219 struct device_attribute *attr, char *buf) \
2220{ \
2221 struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
2222 return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
2223 readl_relaxed(drvdata->base + offset)); \
2224} \
bf16e5b8 2225static DEVICE_ATTR_RO(name)
a467dae1
MP
2226
2227coresight_simple_func(trcoslsr, TRCOSLSR);
2228coresight_simple_func(trcpdcr, TRCPDCR);
2229coresight_simple_func(trcpdsr, TRCPDSR);
2230coresight_simple_func(trclsr, TRCLSR);
2231coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
2232coresight_simple_func(trcdevid, TRCDEVID);
2233coresight_simple_func(trcdevtype, TRCDEVTYPE);
2234coresight_simple_func(trcpidr0, TRCPIDR0);
2235coresight_simple_func(trcpidr1, TRCPIDR1);
2236coresight_simple_func(trcpidr2, TRCPIDR2);
2237coresight_simple_func(trcpidr3, TRCPIDR3);
2238
2239static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2240 &dev_attr_trcoslsr.attr,
2241 &dev_attr_trcpdcr.attr,
2242 &dev_attr_trcpdsr.attr,
2243 &dev_attr_trclsr.attr,
2244 &dev_attr_trcauthstatus.attr,
2245 &dev_attr_trcdevid.attr,
2246 &dev_attr_trcdevtype.attr,
2247 &dev_attr_trcpidr0.attr,
2248 &dev_attr_trcpidr1.attr,
2249 &dev_attr_trcpidr2.attr,
2250 &dev_attr_trcpidr3.attr,
2251 NULL,
2252};
2253
5625988e
MP
2254coresight_simple_func(trcidr0, TRCIDR0);
2255coresight_simple_func(trcidr1, TRCIDR1);
2256coresight_simple_func(trcidr2, TRCIDR2);
2257coresight_simple_func(trcidr3, TRCIDR3);
2258coresight_simple_func(trcidr4, TRCIDR4);
2259coresight_simple_func(trcidr5, TRCIDR5);
2260/* trcidr[6,7] are reserved */
2261coresight_simple_func(trcidr8, TRCIDR8);
2262coresight_simple_func(trcidr9, TRCIDR9);
2263coresight_simple_func(trcidr10, TRCIDR10);
2264coresight_simple_func(trcidr11, TRCIDR11);
2265coresight_simple_func(trcidr12, TRCIDR12);
2266coresight_simple_func(trcidr13, TRCIDR13);
2267
2268static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2269 &dev_attr_trcidr0.attr,
2270 &dev_attr_trcidr1.attr,
2271 &dev_attr_trcidr2.attr,
2272 &dev_attr_trcidr3.attr,
2273 &dev_attr_trcidr4.attr,
2274 &dev_attr_trcidr5.attr,
2275 /* trcidr[6,7] are reserved */
2276 &dev_attr_trcidr8.attr,
2277 &dev_attr_trcidr9.attr,
2278 &dev_attr_trcidr10.attr,
2279 &dev_attr_trcidr11.attr,
2280 &dev_attr_trcidr12.attr,
2281 &dev_attr_trcidr13.attr,
2282 NULL,
2283};
2284
a467dae1
MP
2285static const struct attribute_group coresight_etmv4_group = {
2286 .attrs = coresight_etmv4_attrs,
2287};
2288
2289static const struct attribute_group coresight_etmv4_mgmt_group = {
2290 .attrs = coresight_etmv4_mgmt_attrs,
2291 .name = "mgmt",
2292};
2293
5625988e
MP
2294static const struct attribute_group coresight_etmv4_trcidr_group = {
2295 .attrs = coresight_etmv4_trcidr_attrs,
2296 .name = "trcidr",
2297};
2298
a467dae1
MP
2299static const struct attribute_group *coresight_etmv4_groups[] = {
2300 &coresight_etmv4_group,
2301 &coresight_etmv4_mgmt_group,
5625988e 2302 &coresight_etmv4_trcidr_group,
a467dae1
MP
2303 NULL,
2304};
2e1cdfe1
PP
2305
2306static void etm4_init_arch_data(void *info)
2307{
2308 u32 etmidr0;
2309 u32 etmidr1;
2310 u32 etmidr2;
2311 u32 etmidr3;
2312 u32 etmidr4;
2313 u32 etmidr5;
2314 struct etmv4_drvdata *drvdata = info;
2315
2316 CS_UNLOCK(drvdata->base);
2317
2318 /* find all capabilities of the tracing unit */
2319 etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
2320
2321 /* INSTP0, bits[2:1] P0 tracing support field */
2322 if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
2323 drvdata->instrp0 = true;
2324 else
2325 drvdata->instrp0 = false;
2326
2327 /* TRCBB, bit[5] Branch broadcast tracing support bit */
2328 if (BMVAL(etmidr0, 5, 5))
2329 drvdata->trcbb = true;
2330 else
2331 drvdata->trcbb = false;
2332
2333 /* TRCCOND, bit[6] Conditional instruction tracing support bit */
2334 if (BMVAL(etmidr0, 6, 6))
2335 drvdata->trccond = true;
2336 else
2337 drvdata->trccond = false;
2338
2339 /* TRCCCI, bit[7] Cycle counting instruction bit */
2340 if (BMVAL(etmidr0, 7, 7))
2341 drvdata->trccci = true;
2342 else
2343 drvdata->trccci = false;
2344
2345 /* RETSTACK, bit[9] Return stack bit */
2346 if (BMVAL(etmidr0, 9, 9))
2347 drvdata->retstack = true;
2348 else
2349 drvdata->retstack = false;
2350
2351 /* NUMEVENT, bits[11:10] Number of events field */
2352 drvdata->nr_event = BMVAL(etmidr0, 10, 11);
2353 /* QSUPP, bits[16:15] Q element support field */
2354 drvdata->q_support = BMVAL(etmidr0, 15, 16);
2355 /* TSSIZE, bits[28:24] Global timestamp size field */
2356 drvdata->ts_size = BMVAL(etmidr0, 24, 28);
2357
2358 /* base architecture of trace unit */
2359 etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
2360 /*
2361 * TRCARCHMIN, bits[7:4] architecture the minor version number
2362 * TRCARCHMAJ, bits[11:8] architecture major versin number
2363 */
2364 drvdata->arch = BMVAL(etmidr1, 4, 11);
2365
2366 /* maximum size of resources */
2367 etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
2368 /* CIDSIZE, bits[9:5] Indicates the Context ID size */
2369 drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
2370 /* VMIDSIZE, bits[14:10] Indicates the VMID size */
2371 drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
2372 /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
2373 drvdata->ccsize = BMVAL(etmidr2, 25, 28);
2374
2375 etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
2376 /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
2377 drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
2378 /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
2379 drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
2380 /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
2381 drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
2382
2383 /*
2384 * TRCERR, bit[24] whether a trace unit can trace a
2385 * system error exception.
2386 */
2387 if (BMVAL(etmidr3, 24, 24))
2388 drvdata->trc_error = true;
2389 else
2390 drvdata->trc_error = false;
2391
2392 /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
2393 if (BMVAL(etmidr3, 25, 25))
2394 drvdata->syncpr = true;
2395 else
2396 drvdata->syncpr = false;
2397
2398 /* STALLCTL, bit[26] is stall control implemented? */
2399 if (BMVAL(etmidr3, 26, 26))
2400 drvdata->stallctl = true;
2401 else
2402 drvdata->stallctl = false;
2403
2404 /* SYSSTALL, bit[27] implementation can support stall control? */
2405 if (BMVAL(etmidr3, 27, 27))
2406 drvdata->sysstall = true;
2407 else
2408 drvdata->sysstall = false;
2409
2410 /* NUMPROC, bits[30:28] the number of PEs available for tracing */
2411 drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
2412
2413 /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
2414 if (BMVAL(etmidr3, 31, 31))
2415 drvdata->nooverflow = true;
2416 else
2417 drvdata->nooverflow = false;
2418
2419 /* number of resources trace unit supports */
2420 etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
2421 /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
2422 drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
2423 /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
2424 drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
497b5956
CZ
2425 /*
2426 * NUMRSPAIR, bits[19:16]
2427 * The number of resource pairs conveyed by the HW starts at 0, i.e a
2428 * value of 0x0 indicate 1 resource pair, 0x1 indicate two and so on.
2429 * As such add 1 to the value of NUMRSPAIR for a better representation.
2430 */
2431 drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
2e1cdfe1
PP
2432 /*
2433 * NUMSSCC, bits[23:20] the number of single-shot
2434 * comparator control for tracing
2435 */
2436 drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
2437 /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
2438 drvdata->numcidc = BMVAL(etmidr4, 24, 27);
2439 /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
2440 drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
2441
2442 etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
2443 /* NUMEXTIN, bits[8:0] number of external inputs implemented */
2444 drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
2445 /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
2446 drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
2447 /* ATBTRIG, bit[22] implementation can support ATB triggers? */
2448 if (BMVAL(etmidr5, 22, 22))
2449 drvdata->atbtrig = true;
2450 else
2451 drvdata->atbtrig = false;
2452 /*
2453 * LPOVERRIDE, bit[23] implementation supports
2454 * low-power state override
2455 */
2456 if (BMVAL(etmidr5, 23, 23))
2457 drvdata->lpoverride = true;
2458 else
2459 drvdata->lpoverride = false;
2460 /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
2461 drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
2462 /* NUMCNTR, bits[30:28] number of counters available for tracing */
2463 drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
2464 CS_LOCK(drvdata->base);
2465}
2466
2467static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
2468{
2469 int i;
2470
2471 drvdata->pe_sel = 0x0;
2472 drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID |
2473 ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK);
2474
2475 /* disable all events tracing */
2476 drvdata->eventctrl0 = 0x0;
2477 drvdata->eventctrl1 = 0x0;
2478
2479 /* disable stalling */
2480 drvdata->stall_ctrl = 0x0;
2481
2482 /* disable timestamp event */
2483 drvdata->ts_ctrl = 0x0;
2484
2485 /* enable trace synchronization every 4096 bytes for trace */
2486 if (drvdata->syncpr == false)
2487 drvdata->syncfreq = 0xC;
2488
2489 /*
2490 * enable viewInst to trace everything with start-stop logic in
2491 * started state
2492 */
2493 drvdata->vinst_ctrl |= BIT(0);
2494 /* set initial state of start-stop logic */
2495 if (drvdata->nr_addr_cmp)
2496 drvdata->vinst_ctrl |= BIT(9);
2497
2498 /* no address range filtering for ViewInst */
2499 drvdata->viiectlr = 0x0;
2500 /* no start-stop filtering for ViewInst */
2501 drvdata->vissctlr = 0x0;
2502
2503 /* disable seq events */
2504 for (i = 0; i < drvdata->nrseqstate-1; i++)
2505 drvdata->seq_ctrl[i] = 0x0;
2506 drvdata->seq_rst = 0x0;
2507 drvdata->seq_state = 0x0;
2508
2509 /* disable external input events */
2510 drvdata->ext_inp = 0x0;
2511
2512 for (i = 0; i < drvdata->nr_cntr; i++) {
2513 drvdata->cntrldvr[i] = 0x0;
2514 drvdata->cntr_ctrl[i] = 0x0;
2515 drvdata->cntr_val[i] = 0x0;
2516 }
2517
497b5956
CZ
2518 /* Resource selector pair 0 is always implemented and reserved */
2519 drvdata->res_idx = 0x2;
2e1cdfe1
PP
2520 for (i = 2; i < drvdata->nr_resource * 2; i++)
2521 drvdata->res_ctrl[i] = 0x0;
2522
2523 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
2524 drvdata->ss_ctrl[i] = 0x0;
2525 drvdata->ss_pe_cmp[i] = 0x0;
2526 }
2527
2528 if (drvdata->nr_addr_cmp >= 1) {
2529 drvdata->addr_val[0] = (unsigned long)_stext;
2530 drvdata->addr_val[1] = (unsigned long)_etext;
2531 drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
2532 drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
2533 }
2534
f67b467a 2535 for (i = 0; i < drvdata->numcidc; i++) {
cd196ac3 2536 drvdata->ctxid_pid[i] = 0x0;
f67b467a
CZ
2537 drvdata->ctxid_vpid[i] = 0x0;
2538 }
2539
2e1cdfe1
PP
2540 drvdata->ctxid_mask0 = 0x0;
2541 drvdata->ctxid_mask1 = 0x0;
2542
2543 for (i = 0; i < drvdata->numvmidc; i++)
2544 drvdata->vmid_val[i] = 0x0;
2545 drvdata->vmid_mask0 = 0x0;
2546 drvdata->vmid_mask1 = 0x0;
2547
2548 /*
2549 * A trace ID value of 0 is invalid, so let's start at some
2550 * random value that fits in 7 bits. ETMv3.x has 0x10 so let's
2551 * start at 0x20.
2552 */
2553 drvdata->trcid = 0x20 + drvdata->cpu;
2554}
2555
2556static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
2557 void *hcpu)
2558{
2559 unsigned int cpu = (unsigned long)hcpu;
2560
2561 if (!etmdrvdata[cpu])
2562 goto out;
2563
2564 switch (action & (~CPU_TASKS_FROZEN)) {
2565 case CPU_STARTING:
2566 spin_lock(&etmdrvdata[cpu]->spinlock);
2567 if (!etmdrvdata[cpu]->os_unlock) {
2568 etm4_os_unlock(etmdrvdata[cpu]);
2569 etmdrvdata[cpu]->os_unlock = true;
2570 }
2571
2572 if (etmdrvdata[cpu]->enable)
2573 etm4_enable_hw(etmdrvdata[cpu]);
2574 spin_unlock(&etmdrvdata[cpu]->spinlock);
2575 break;
2576
2577 case CPU_ONLINE:
2578 if (etmdrvdata[cpu]->boot_enable &&
2579 !etmdrvdata[cpu]->sticky_enable)
2580 coresight_enable(etmdrvdata[cpu]->csdev);
2581 break;
2582
2583 case CPU_DYING:
2584 spin_lock(&etmdrvdata[cpu]->spinlock);
2585 if (etmdrvdata[cpu]->enable)
2586 etm4_disable_hw(etmdrvdata[cpu]);
2587 spin_unlock(&etmdrvdata[cpu]->spinlock);
2588 break;
2589 }
2590out:
2591 return NOTIFY_OK;
2592}
2593
2594static struct notifier_block etm4_cpu_notifier = {
2595 .notifier_call = etm4_cpu_callback,
2596};
2597
2598static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
2599{
2600 int ret;
2601 void __iomem *base;
2602 struct device *dev = &adev->dev;
2603 struct coresight_platform_data *pdata = NULL;
2604 struct etmv4_drvdata *drvdata;
2605 struct resource *res = &adev->res;
2606 struct coresight_desc *desc;
2607 struct device_node *np = adev->dev.of_node;
2608
2609 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
2610 if (!desc)
2611 return -ENOMEM;
2612
2613 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
2614 if (!drvdata)
2615 return -ENOMEM;
2616
2617 if (np) {
2618 pdata = of_get_coresight_platform_data(dev, np);
2619 if (IS_ERR(pdata))
2620 return PTR_ERR(pdata);
2621 adev->dev.platform_data = pdata;
2622 }
2623
2624 drvdata->dev = &adev->dev;
2625 dev_set_drvdata(dev, drvdata);
2626
2627 /* Validity for the resource is already checked by the AMBA core */
2628 base = devm_ioremap_resource(dev, res);
2629 if (IS_ERR(base))
2630 return PTR_ERR(base);
2631
2632 drvdata->base = base;
2633
2634 spin_lock_init(&drvdata->spinlock);
2635
2636 drvdata->cpu = pdata ? pdata->cpu : 0;
2637
2638 get_online_cpus();
2639 etmdrvdata[drvdata->cpu] = drvdata;
2640
2641 if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1))
2642 drvdata->os_unlock = true;
2643
2644 if (smp_call_function_single(drvdata->cpu,
2645 etm4_init_arch_data, drvdata, 1))
2646 dev_err(dev, "ETM arch init failed\n");
2647
2648 if (!etm4_count++)
2649 register_hotcpu_notifier(&etm4_cpu_notifier);
2650
2651 put_online_cpus();
2652
2653 if (etm4_arch_supported(drvdata->arch) == false) {
2654 ret = -EINVAL;
2655 goto err_arch_supported;
2656 }
2657 etm4_init_default_data(drvdata);
2658
2659 pm_runtime_put(&adev->dev);
2660
2661 desc->type = CORESIGHT_DEV_TYPE_SOURCE;
2662 desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
2663 desc->ops = &etm4_cs_ops;
2664 desc->pdata = pdata;
2665 desc->dev = dev;
2666 desc->groups = coresight_etmv4_groups;
2667 drvdata->csdev = coresight_register(desc);
2668 if (IS_ERR(drvdata->csdev)) {
2669 ret = PTR_ERR(drvdata->csdev);
2670 goto err_coresight_register;
2671 }
2672
2673 dev_info(dev, "%s initialized\n", (char *)id->data);
2674
2675 if (boot_enable) {
2676 coresight_enable(drvdata->csdev);
2677 drvdata->boot_enable = true;
2678 }
2679
2680 return 0;
2681
2682err_arch_supported:
2683 pm_runtime_put(&adev->dev);
2684err_coresight_register:
2685 if (--etm4_count == 0)
2686 unregister_hotcpu_notifier(&etm4_cpu_notifier);
2687 return ret;
2688}
2689
2e1cdfe1
PP
2690static struct amba_id etm4_ids[] = {
2691 { /* ETM 4.0 - Qualcomm */
2692 .id = 0x0003b95d,
2693 .mask = 0x0003ffff,
2694 .data = "ETM 4.0",
2695 },
2696 { /* ETM 4.0 - Juno board */
2697 .id = 0x000bb95e,
2698 .mask = 0x000fffff,
2699 .data = "ETM 4.0",
2700 },
2701 { 0, 0},
2702};
2703
2704static struct amba_driver etm4x_driver = {
2705 .drv = {
2706 .name = "coresight-etm4x",
b15f0fb6 2707 .suppress_bind_attrs = true,
2e1cdfe1
PP
2708 },
2709 .probe = etm4_probe,
2e1cdfe1
PP
2710 .id_table = etm4_ids,
2711};
941943cf 2712builtin_amba_driver(etm4x_driver);
This page took 0.233476 seconds and 5 git commands to generate.