2 * This file implements the perfmon-2 subsystem which is used
3 * to program the IA-64 Performance Monitoring Unit (PMU).
5 * The initial version of perfmon.c was written by
6 * Ganesh Venkitachalam, IBM Corp.
8 * Then it was modified for perfmon-1.x by Stephane Eranian and
9 * David Mosberger, Hewlett Packard Co.
11 * Version Perfmon-2.x is a rewrite of perfmon-1.x
12 * by Stephane Eranian, Hewlett Packard Co.
14 * Copyright (C) 1999-2005 Hewlett Packard Co
15 * Stephane Eranian <eranian@hpl.hp.com>
16 * David Mosberger-Tang <davidm@hpl.hp.com>
18 * More information about perfmon available at:
19 * http://www.hpl.hp.com/research/linux/perfmon
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/interrupt.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/init.h>
29 #include <linux/vmalloc.h>
31 #include <linux/sysctl.h>
32 #include <linux/list.h>
33 #include <linux/file.h>
34 #include <linux/poll.h>
35 #include <linux/vfs.h>
36 #include <linux/smp.h>
37 #include <linux/pagemap.h>
38 #include <linux/mount.h>
39 #include <linux/bitops.h>
40 #include <linux/capability.h>
41 #include <linux/rcupdate.h>
42 #include <linux/completion.h>
43 #include <linux/tracehook.h>
44 #include <linux/slab.h>
46 #include <asm/errno.h>
47 #include <asm/intrinsics.h>
49 #include <asm/perfmon.h>
50 #include <asm/processor.h>
51 #include <asm/signal.h>
52 #include <asm/uaccess.h>
53 #include <asm/delay.h>
57 * perfmon context state
59 #define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
60 #define PFM_CTX_LOADED 2 /* context is loaded onto a task */
61 #define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
62 #define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
64 #define PFM_INVALID_ACTIVATION (~0UL)
66 #define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */
67 #define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */
70 * depth of message queue
72 #define PFM_MAX_MSGS 32
73 #define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
76 * type of a PMU register (bitmask).
78 * bit0 : register implemented
81 * bit4 : pmc has pmc.pm
82 * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
83 * bit6-7 : register type
86 #define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
87 #define PFM_REG_IMPL 0x1 /* register implemented */
88 #define PFM_REG_END 0x2 /* end marker */
89 #define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
90 #define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
91 #define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
92 #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
93 #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
95 #define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
96 #define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
98 #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
100 /* i assumed unsigned */
101 #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
102 #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
104 /* XXX: these assume that register i is implemented */
105 #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
106 #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
107 #define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
108 #define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
110 #define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
111 #define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
112 #define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
113 #define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
115 #define PFM_NUM_IBRS IA64_NUM_DBG_REGS
116 #define PFM_NUM_DBRS IA64_NUM_DBG_REGS
118 #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
119 #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
120 #define PFM_CTX_TASK(h) (h)->ctx_task
122 #define PMU_PMC_OI 5 /* position of pmc.oi bit */
124 /* XXX: does not support more than 64 PMDs */
125 #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
126 #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
128 #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
130 #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
131 #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
132 #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
133 #define PFM_CODE_RR 0 /* requesting code range restriction */
134 #define PFM_DATA_RR 1 /* requestion data range restriction */
136 #define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
137 #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
138 #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
140 #define RDEP(x) (1UL<<(x))
143 * context protection macros
145 * - we need to protect against CPU concurrency (spin_lock)
146 * - we need to protect against PMU overflow interrupts (local_irq_disable)
148 * - we need to protect against PMU overflow interrupts (local_irq_disable)
150 * spin_lock_irqsave()/spin_unlock_irqrestore():
151 * in SMP: local_irq_disable + spin_lock
152 * in UP : local_irq_disable
154 * spin_lock()/spin_lock():
155 * in UP : removed automatically
156 * in SMP: protect against context accesses from other CPU. interrupts
157 * are not masked. This is useful for the PMU interrupt handler
158 * because we know we will not get PMU concurrency in that code.
160 #define PROTECT_CTX(c, f) \
162 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
163 spin_lock_irqsave(&(c)->ctx_lock, f); \
164 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
167 #define UNPROTECT_CTX(c, f) \
169 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
170 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
173 #define PROTECT_CTX_NOPRINT(c, f) \
175 spin_lock_irqsave(&(c)->ctx_lock, f); \
179 #define UNPROTECT_CTX_NOPRINT(c, f) \
181 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
185 #define PROTECT_CTX_NOIRQ(c) \
187 spin_lock(&(c)->ctx_lock); \
190 #define UNPROTECT_CTX_NOIRQ(c) \
192 spin_unlock(&(c)->ctx_lock); \
198 #define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
199 #define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
200 #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
202 #else /* !CONFIG_SMP */
203 #define SET_ACTIVATION(t) do {} while(0)
204 #define GET_ACTIVATION(t) do {} while(0)
205 #define INC_ACTIVATION(t) do {} while(0)
206 #endif /* CONFIG_SMP */
208 #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
209 #define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
210 #define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
212 #define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
213 #define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
215 #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
218 * cmp0 must be the value of pmc0
220 #define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
222 #define PFMFS_MAGIC 0xa0b4d889
227 #define PFM_DEBUGGING 1
231 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
234 #define DPRINT_ovfl(a) \
236 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
241 * 64-bit software counter structure
243 * the next_reset_type is applied to the next call to pfm_reset_regs()
246 unsigned long val
; /* virtual 64bit counter value */
247 unsigned long lval
; /* last reset value */
248 unsigned long long_reset
; /* reset value on sampling overflow */
249 unsigned long short_reset
; /* reset value on overflow */
250 unsigned long reset_pmds
[4]; /* which other pmds to reset when this counter overflows */
251 unsigned long smpl_pmds
[4]; /* which pmds are accessed when counter overflow */
252 unsigned long seed
; /* seed for random-number generator */
253 unsigned long mask
; /* mask for random-number generator */
254 unsigned int flags
; /* notify/do not notify */
255 unsigned long eventid
; /* overflow event identifier */
262 unsigned int block
:1; /* when 1, task will blocked on user notifications */
263 unsigned int system
:1; /* do system wide monitoring */
264 unsigned int using_dbreg
:1; /* using range restrictions (debug registers) */
265 unsigned int is_sampling
:1; /* true if using a custom format */
266 unsigned int excl_idle
:1; /* exclude idle task in system wide session */
267 unsigned int going_zombie
:1; /* context is zombie (MASKED+blocking) */
268 unsigned int trap_reason
:2; /* reason for going into pfm_handle_work() */
269 unsigned int no_msg
:1; /* no message sent on overflow */
270 unsigned int can_restart
:1; /* allowed to issue a PFM_RESTART */
271 unsigned int reserved
:22;
272 } pfm_context_flags_t
;
274 #define PFM_TRAP_REASON_NONE 0x0 /* default value */
275 #define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
276 #define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
280 * perfmon context: encapsulates all the state of a monitoring session
283 typedef struct pfm_context
{
284 spinlock_t ctx_lock
; /* context protection */
286 pfm_context_flags_t ctx_flags
; /* bitmask of flags (block reason incl.) */
287 unsigned int ctx_state
; /* state: active/inactive (no bitfield) */
289 struct task_struct
*ctx_task
; /* task to which context is attached */
291 unsigned long ctx_ovfl_regs
[4]; /* which registers overflowed (notification) */
293 struct completion ctx_restart_done
; /* use for blocking notification mode */
295 unsigned long ctx_used_pmds
[4]; /* bitmask of PMD used */
296 unsigned long ctx_all_pmds
[4]; /* bitmask of all accessible PMDs */
297 unsigned long ctx_reload_pmds
[4]; /* bitmask of force reload PMD on ctxsw in */
299 unsigned long ctx_all_pmcs
[4]; /* bitmask of all accessible PMCs */
300 unsigned long ctx_reload_pmcs
[4]; /* bitmask of force reload PMC on ctxsw in */
301 unsigned long ctx_used_monitors
[4]; /* bitmask of monitor PMC being used */
303 unsigned long ctx_pmcs
[PFM_NUM_PMC_REGS
]; /* saved copies of PMC values */
305 unsigned int ctx_used_ibrs
[1]; /* bitmask of used IBR (speedup ctxsw in) */
306 unsigned int ctx_used_dbrs
[1]; /* bitmask of used DBR (speedup ctxsw in) */
307 unsigned long ctx_dbrs
[IA64_NUM_DBG_REGS
]; /* DBR values (cache) when not loaded */
308 unsigned long ctx_ibrs
[IA64_NUM_DBG_REGS
]; /* IBR values (cache) when not loaded */
310 pfm_counter_t ctx_pmds
[PFM_NUM_PMD_REGS
]; /* software state for PMDS */
312 unsigned long th_pmcs
[PFM_NUM_PMC_REGS
]; /* PMC thread save state */
313 unsigned long th_pmds
[PFM_NUM_PMD_REGS
]; /* PMD thread save state */
315 unsigned long ctx_saved_psr_up
; /* only contains psr.up value */
317 unsigned long ctx_last_activation
; /* context last activation number for last_cpu */
318 unsigned int ctx_last_cpu
; /* CPU id of current or last CPU used (SMP only) */
319 unsigned int ctx_cpu
; /* cpu to which perfmon is applied (system wide) */
321 int ctx_fd
; /* file descriptor used my this context */
322 pfm_ovfl_arg_t ctx_ovfl_arg
; /* argument to custom buffer format handler */
324 pfm_buffer_fmt_t
*ctx_buf_fmt
; /* buffer format callbacks */
325 void *ctx_smpl_hdr
; /* points to sampling buffer header kernel vaddr */
326 unsigned long ctx_smpl_size
; /* size of sampling buffer */
327 void *ctx_smpl_vaddr
; /* user level virtual address of smpl buffer */
329 wait_queue_head_t ctx_msgq_wait
;
330 pfm_msg_t ctx_msgq
[PFM_MAX_MSGS
];
333 struct fasync_struct
*ctx_async_queue
;
335 wait_queue_head_t ctx_zombieq
; /* termination cleanup wait queue */
339 * magic number used to verify that structure is really
342 #define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
344 #define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
347 #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
348 #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
350 #define SET_LAST_CPU(ctx, v) do {} while(0)
351 #define GET_LAST_CPU(ctx) do {} while(0)
355 #define ctx_fl_block ctx_flags.block
356 #define ctx_fl_system ctx_flags.system
357 #define ctx_fl_using_dbreg ctx_flags.using_dbreg
358 #define ctx_fl_is_sampling ctx_flags.is_sampling
359 #define ctx_fl_excl_idle ctx_flags.excl_idle
360 #define ctx_fl_going_zombie ctx_flags.going_zombie
361 #define ctx_fl_trap_reason ctx_flags.trap_reason
362 #define ctx_fl_no_msg ctx_flags.no_msg
363 #define ctx_fl_can_restart ctx_flags.can_restart
365 #define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
366 #define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
369 * global information about all sessions
370 * mostly used to synchronize between system wide and per-process
373 spinlock_t pfs_lock
; /* lock the structure */
375 unsigned int pfs_task_sessions
; /* number of per task sessions */
376 unsigned int pfs_sys_sessions
; /* number of per system wide sessions */
377 unsigned int pfs_sys_use_dbregs
; /* incremented when a system wide session uses debug regs */
378 unsigned int pfs_ptrace_use_dbregs
; /* incremented when a process uses debug regs */
379 struct task_struct
*pfs_sys_session
[NR_CPUS
]; /* point to task owning a system-wide session */
383 * information about a PMC or PMD.
384 * dep_pmd[]: a bitmask of dependent PMD registers
385 * dep_pmc[]: a bitmask of dependent PMC registers
387 typedef int (*pfm_reg_check_t
)(struct task_struct
*task
, pfm_context_t
*ctx
, unsigned int cnum
, unsigned long *val
, struct pt_regs
*regs
);
391 unsigned long default_value
; /* power-on default value */
392 unsigned long reserved_mask
; /* bitmask of reserved bits */
393 pfm_reg_check_t read_check
;
394 pfm_reg_check_t write_check
;
395 unsigned long dep_pmd
[4];
396 unsigned long dep_pmc
[4];
399 /* assume cnum is a valid monitor */
400 #define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
403 * This structure is initialized at boot time and contains
404 * a description of the PMU main characteristics.
406 * If the probe function is defined, detection is based
407 * on its return value:
408 * - 0 means recognized PMU
409 * - anything else means not supported
410 * When the probe function is not defined, then the pmu_family field
411 * is used and it must match the host CPU family such that:
412 * - cpu->family & config->pmu_family != 0
415 unsigned long ovfl_val
; /* overflow value for counters */
417 pfm_reg_desc_t
*pmc_desc
; /* detailed PMC register dependencies descriptions */
418 pfm_reg_desc_t
*pmd_desc
; /* detailed PMD register dependencies descriptions */
420 unsigned int num_pmcs
; /* number of PMCS: computed at init time */
421 unsigned int num_pmds
; /* number of PMDS: computed at init time */
422 unsigned long impl_pmcs
[4]; /* bitmask of implemented PMCS */
423 unsigned long impl_pmds
[4]; /* bitmask of implemented PMDS */
425 char *pmu_name
; /* PMU family name */
426 unsigned int pmu_family
; /* cpuid family pattern used to identify pmu */
427 unsigned int flags
; /* pmu specific flags */
428 unsigned int num_ibrs
; /* number of IBRS: computed at init time */
429 unsigned int num_dbrs
; /* number of DBRS: computed at init time */
430 unsigned int num_counters
; /* PMC/PMD counting pairs : computed at init time */
431 int (*probe
)(void); /* customized probe routine */
432 unsigned int use_rr_dbregs
:1; /* set if debug registers used for range restriction */
437 #define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
440 * debug register related type definitions
443 unsigned long ibr_mask
:56;
444 unsigned long ibr_plm
:4;
445 unsigned long ibr_ig
:3;
446 unsigned long ibr_x
:1;
450 unsigned long dbr_mask
:56;
451 unsigned long dbr_plm
:4;
452 unsigned long dbr_ig
:2;
453 unsigned long dbr_w
:1;
454 unsigned long dbr_r
:1;
465 * perfmon command descriptions
468 int (*cmd_func
)(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
);
471 unsigned int cmd_narg
;
473 int (*cmd_getsize
)(void *arg
, size_t *sz
);
476 #define PFM_CMD_FD 0x01 /* command requires a file descriptor */
477 #define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
478 #define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
479 #define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
482 #define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
483 #define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
484 #define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
485 #define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
486 #define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
488 #define PFM_CMD_ARG_MANY -1 /* cannot be zero */
491 unsigned long pfm_spurious_ovfl_intr_count
; /* keep track of spurious ovfl interrupts */
492 unsigned long pfm_replay_ovfl_intr_count
; /* keep track of replayed ovfl interrupts */
493 unsigned long pfm_ovfl_intr_count
; /* keep track of ovfl interrupts */
494 unsigned long pfm_ovfl_intr_cycles
; /* cycles spent processing ovfl interrupts */
495 unsigned long pfm_ovfl_intr_cycles_min
; /* min cycles spent processing ovfl interrupts */
496 unsigned long pfm_ovfl_intr_cycles_max
; /* max cycles spent processing ovfl interrupts */
497 unsigned long pfm_smpl_handler_calls
;
498 unsigned long pfm_smpl_handler_cycles
;
499 char pad
[SMP_CACHE_BYTES
] ____cacheline_aligned
;
503 * perfmon internal variables
505 static pfm_stats_t pfm_stats
[NR_CPUS
];
506 static pfm_session_t pfm_sessions
; /* global sessions information */
508 static DEFINE_SPINLOCK(pfm_alt_install_check
);
509 static pfm_intr_handler_desc_t
*pfm_alt_intr_handler
;
511 static struct proc_dir_entry
*perfmon_dir
;
512 static pfm_uuid_t pfm_null_uuid
= {0,};
514 static spinlock_t pfm_buffer_fmt_lock
;
515 static LIST_HEAD(pfm_buffer_fmt_list
);
517 static pmu_config_t
*pmu_conf
;
519 /* sysctl() controls */
520 pfm_sysctl_t pfm_sysctl
;
521 EXPORT_SYMBOL(pfm_sysctl
);
523 static ctl_table pfm_ctl_table
[]={
526 .data
= &pfm_sysctl
.debug
,
527 .maxlen
= sizeof(int),
529 .proc_handler
= proc_dointvec
,
532 .procname
= "debug_ovfl",
533 .data
= &pfm_sysctl
.debug_ovfl
,
534 .maxlen
= sizeof(int),
536 .proc_handler
= proc_dointvec
,
539 .procname
= "fastctxsw",
540 .data
= &pfm_sysctl
.fastctxsw
,
541 .maxlen
= sizeof(int),
543 .proc_handler
= proc_dointvec
,
546 .procname
= "expert_mode",
547 .data
= &pfm_sysctl
.expert_mode
,
548 .maxlen
= sizeof(int),
550 .proc_handler
= proc_dointvec
,
554 static ctl_table pfm_sysctl_dir
[] = {
556 .procname
= "perfmon",
558 .child
= pfm_ctl_table
,
562 static ctl_table pfm_sysctl_root
[] = {
564 .procname
= "kernel",
566 .child
= pfm_sysctl_dir
,
570 static struct ctl_table_header
*pfm_sysctl_header
;
572 static int pfm_context_unload(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
);
574 #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
575 #define pfm_get_cpu_data(a,b) per_cpu(a, b)
578 pfm_put_task(struct task_struct
*task
)
580 if (task
!= current
) put_task_struct(task
);
584 pfm_reserve_page(unsigned long a
)
586 SetPageReserved(vmalloc_to_page((void *)a
));
589 pfm_unreserve_page(unsigned long a
)
591 ClearPageReserved(vmalloc_to_page((void*)a
));
594 static inline unsigned long
595 pfm_protect_ctx_ctxsw(pfm_context_t
*x
)
597 spin_lock(&(x
)->ctx_lock
);
602 pfm_unprotect_ctx_ctxsw(pfm_context_t
*x
, unsigned long f
)
604 spin_unlock(&(x
)->ctx_lock
);
607 /* forward declaration */
608 static const struct dentry_operations pfmfs_dentry_operations
;
610 static struct dentry
*
611 pfmfs_mount(struct file_system_type
*fs_type
, int flags
, const char *dev_name
, void *data
)
613 return mount_pseudo(fs_type
, "pfm:", NULL
, &pfmfs_dentry_operations
,
617 static struct file_system_type pfm_fs_type
= {
619 .mount
= pfmfs_mount
,
620 .kill_sb
= kill_anon_super
,
623 DEFINE_PER_CPU(unsigned long, pfm_syst_info
);
624 DEFINE_PER_CPU(struct task_struct
*, pmu_owner
);
625 DEFINE_PER_CPU(pfm_context_t
*, pmu_ctx
);
626 DEFINE_PER_CPU(unsigned long, pmu_activation_number
);
627 EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info
);
630 /* forward declaration */
631 static const struct file_operations pfm_file_ops
;
634 * forward declarations
637 static void pfm_lazy_save_regs (struct task_struct
*ta
);
640 void dump_pmu_state(const char *);
641 static int pfm_write_ibr_dbr(int mode
, pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
);
643 #include "perfmon_itanium.h"
644 #include "perfmon_mckinley.h"
645 #include "perfmon_montecito.h"
646 #include "perfmon_generic.h"
648 static pmu_config_t
*pmu_confs
[]={
652 &pmu_conf_gen
, /* must be last */
657 static int pfm_end_notify_user(pfm_context_t
*ctx
);
660 pfm_clear_psr_pp(void)
662 ia64_rsm(IA64_PSR_PP
);
669 ia64_ssm(IA64_PSR_PP
);
674 pfm_clear_psr_up(void)
676 ia64_rsm(IA64_PSR_UP
);
683 ia64_ssm(IA64_PSR_UP
);
687 static inline unsigned long
691 tmp
= ia64_getreg(_IA64_REG_PSR
);
697 pfm_set_psr_l(unsigned long val
)
699 ia64_setreg(_IA64_REG_PSR_L
, val
);
711 pfm_unfreeze_pmu(void)
718 pfm_restore_ibrs(unsigned long *ibrs
, unsigned int nibrs
)
722 for (i
=0; i
< nibrs
; i
++) {
723 ia64_set_ibr(i
, ibrs
[i
]);
724 ia64_dv_serialize_instruction();
730 pfm_restore_dbrs(unsigned long *dbrs
, unsigned int ndbrs
)
734 for (i
=0; i
< ndbrs
; i
++) {
735 ia64_set_dbr(i
, dbrs
[i
]);
736 ia64_dv_serialize_data();
742 * PMD[i] must be a counter. no check is made
744 static inline unsigned long
745 pfm_read_soft_counter(pfm_context_t
*ctx
, int i
)
747 return ctx
->ctx_pmds
[i
].val
+ (ia64_get_pmd(i
) & pmu_conf
->ovfl_val
);
751 * PMD[i] must be a counter. no check is made
754 pfm_write_soft_counter(pfm_context_t
*ctx
, int i
, unsigned long val
)
756 unsigned long ovfl_val
= pmu_conf
->ovfl_val
;
758 ctx
->ctx_pmds
[i
].val
= val
& ~ovfl_val
;
760 * writing to unimplemented part is ignore, so we do not need to
763 ia64_set_pmd(i
, val
& ovfl_val
);
767 pfm_get_new_msg(pfm_context_t
*ctx
)
771 next
= (ctx
->ctx_msgq_tail
+1) % PFM_MAX_MSGS
;
773 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx
, ctx
->ctx_msgq_head
, ctx
->ctx_msgq_tail
));
774 if (next
== ctx
->ctx_msgq_head
) return NULL
;
776 idx
= ctx
->ctx_msgq_tail
;
777 ctx
->ctx_msgq_tail
= next
;
779 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx
, ctx
->ctx_msgq_head
, ctx
->ctx_msgq_tail
, idx
));
781 return ctx
->ctx_msgq
+idx
;
785 pfm_get_next_msg(pfm_context_t
*ctx
)
789 DPRINT(("ctx=%p head=%d tail=%d\n", ctx
, ctx
->ctx_msgq_head
, ctx
->ctx_msgq_tail
));
791 if (PFM_CTXQ_EMPTY(ctx
)) return NULL
;
796 msg
= ctx
->ctx_msgq
+ctx
->ctx_msgq_head
;
801 ctx
->ctx_msgq_head
= (ctx
->ctx_msgq_head
+1) % PFM_MAX_MSGS
;
803 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx
, ctx
->ctx_msgq_head
, ctx
->ctx_msgq_tail
, msg
->pfm_gen_msg
.msg_type
));
809 pfm_reset_msgq(pfm_context_t
*ctx
)
811 ctx
->ctx_msgq_head
= ctx
->ctx_msgq_tail
= 0;
812 DPRINT(("ctx=%p msgq reset\n", ctx
));
816 pfm_rvmalloc(unsigned long size
)
821 size
= PAGE_ALIGN(size
);
824 //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
825 addr
= (unsigned long)mem
;
827 pfm_reserve_page(addr
);
836 pfm_rvfree(void *mem
, unsigned long size
)
841 DPRINT(("freeing physical buffer @%p size=%lu\n", mem
, size
));
842 addr
= (unsigned long) mem
;
843 while ((long) size
> 0) {
844 pfm_unreserve_page(addr
);
853 static pfm_context_t
*
854 pfm_context_alloc(int ctx_flags
)
859 * allocate context descriptor
860 * must be able to free with interrupts disabled
862 ctx
= kzalloc(sizeof(pfm_context_t
), GFP_KERNEL
);
864 DPRINT(("alloc ctx @%p\n", ctx
));
867 * init context protection lock
869 spin_lock_init(&ctx
->ctx_lock
);
872 * context is unloaded
874 ctx
->ctx_state
= PFM_CTX_UNLOADED
;
877 * initialization of context's flags
879 ctx
->ctx_fl_block
= (ctx_flags
& PFM_FL_NOTIFY_BLOCK
) ? 1 : 0;
880 ctx
->ctx_fl_system
= (ctx_flags
& PFM_FL_SYSTEM_WIDE
) ? 1: 0;
881 ctx
->ctx_fl_no_msg
= (ctx_flags
& PFM_FL_OVFL_NO_MSG
) ? 1: 0;
883 * will move to set properties
884 * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
888 * init restart semaphore to locked
890 init_completion(&ctx
->ctx_restart_done
);
893 * activation is used in SMP only
895 ctx
->ctx_last_activation
= PFM_INVALID_ACTIVATION
;
896 SET_LAST_CPU(ctx
, -1);
899 * initialize notification message queue
901 ctx
->ctx_msgq_head
= ctx
->ctx_msgq_tail
= 0;
902 init_waitqueue_head(&ctx
->ctx_msgq_wait
);
903 init_waitqueue_head(&ctx
->ctx_zombieq
);
910 pfm_context_free(pfm_context_t
*ctx
)
913 DPRINT(("free ctx @%p\n", ctx
));
919 pfm_mask_monitoring(struct task_struct
*task
)
921 pfm_context_t
*ctx
= PFM_GET_CTX(task
);
922 unsigned long mask
, val
, ovfl_mask
;
925 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task
)));
927 ovfl_mask
= pmu_conf
->ovfl_val
;
929 * monitoring can only be masked as a result of a valid
930 * counter overflow. In UP, it means that the PMU still
931 * has an owner. Note that the owner can be different
932 * from the current task. However the PMU state belongs
934 * In SMP, a valid overflow only happens when task is
935 * current. Therefore if we come here, we know that
936 * the PMU state belongs to the current task, therefore
937 * we can access the live registers.
939 * So in both cases, the live register contains the owner's
940 * state. We can ONLY touch the PMU registers and NOT the PSR.
942 * As a consequence to this call, the ctx->th_pmds[] array
943 * contains stale information which must be ignored
944 * when context is reloaded AND monitoring is active (see
947 mask
= ctx
->ctx_used_pmds
[0];
948 for (i
= 0; mask
; i
++, mask
>>=1) {
949 /* skip non used pmds */
950 if ((mask
& 0x1) == 0) continue;
951 val
= ia64_get_pmd(i
);
953 if (PMD_IS_COUNTING(i
)) {
955 * we rebuild the full 64 bit value of the counter
957 ctx
->ctx_pmds
[i
].val
+= (val
& ovfl_mask
);
959 ctx
->ctx_pmds
[i
].val
= val
;
961 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
963 ctx
->ctx_pmds
[i
].val
,
967 * mask monitoring by setting the privilege level to 0
968 * we cannot use psr.pp/psr.up for this, it is controlled by
971 * if task is current, modify actual registers, otherwise modify
972 * thread save state, i.e., what will be restored in pfm_load_regs()
974 mask
= ctx
->ctx_used_monitors
[0] >> PMU_FIRST_COUNTER
;
975 for(i
= PMU_FIRST_COUNTER
; mask
; i
++, mask
>>=1) {
976 if ((mask
& 0x1) == 0UL) continue;
977 ia64_set_pmc(i
, ctx
->th_pmcs
[i
] & ~0xfUL
);
978 ctx
->th_pmcs
[i
] &= ~0xfUL
;
979 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i
, ctx
->th_pmcs
[i
]));
982 * make all of this visible
988 * must always be done with task == current
990 * context must be in MASKED state when calling
993 pfm_restore_monitoring(struct task_struct
*task
)
995 pfm_context_t
*ctx
= PFM_GET_CTX(task
);
996 unsigned long mask
, ovfl_mask
;
997 unsigned long psr
, val
;
1000 is_system
= ctx
->ctx_fl_system
;
1001 ovfl_mask
= pmu_conf
->ovfl_val
;
1003 if (task
!= current
) {
1004 printk(KERN_ERR
"perfmon.%d: invalid task[%d] current[%d]\n", __LINE__
, task_pid_nr(task
), task_pid_nr(current
));
1007 if (ctx
->ctx_state
!= PFM_CTX_MASKED
) {
1008 printk(KERN_ERR
"perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__
,
1009 task_pid_nr(task
), task_pid_nr(current
), ctx
->ctx_state
);
1012 psr
= pfm_get_psr();
1014 * monitoring is masked via the PMC.
1015 * As we restore their value, we do not want each counter to
1016 * restart right away. We stop monitoring using the PSR,
1017 * restore the PMC (and PMD) and then re-establish the psr
1018 * as it was. Note that there can be no pending overflow at
1019 * this point, because monitoring was MASKED.
1021 * system-wide session are pinned and self-monitoring
1023 if (is_system
&& (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP
)) {
1024 /* disable dcr pp */
1025 ia64_setreg(_IA64_REG_CR_DCR
, ia64_getreg(_IA64_REG_CR_DCR
) & ~IA64_DCR_PP
);
1031 * first, we restore the PMD
1033 mask
= ctx
->ctx_used_pmds
[0];
1034 for (i
= 0; mask
; i
++, mask
>>=1) {
1035 /* skip non used pmds */
1036 if ((mask
& 0x1) == 0) continue;
1038 if (PMD_IS_COUNTING(i
)) {
1040 * we split the 64bit value according to
1043 val
= ctx
->ctx_pmds
[i
].val
& ovfl_mask
;
1044 ctx
->ctx_pmds
[i
].val
&= ~ovfl_mask
;
1046 val
= ctx
->ctx_pmds
[i
].val
;
1048 ia64_set_pmd(i
, val
);
1050 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1052 ctx
->ctx_pmds
[i
].val
,
1058 mask
= ctx
->ctx_used_monitors
[0] >> PMU_FIRST_COUNTER
;
1059 for(i
= PMU_FIRST_COUNTER
; mask
; i
++, mask
>>=1) {
1060 if ((mask
& 0x1) == 0UL) continue;
1061 ctx
->th_pmcs
[i
] = ctx
->ctx_pmcs
[i
];
1062 ia64_set_pmc(i
, ctx
->th_pmcs
[i
]);
1063 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1064 task_pid_nr(task
), i
, ctx
->th_pmcs
[i
]));
1069 * must restore DBR/IBR because could be modified while masked
1070 * XXX: need to optimize
1072 if (ctx
->ctx_fl_using_dbreg
) {
1073 pfm_restore_ibrs(ctx
->ctx_ibrs
, pmu_conf
->num_ibrs
);
1074 pfm_restore_dbrs(ctx
->ctx_dbrs
, pmu_conf
->num_dbrs
);
1080 if (is_system
&& (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP
)) {
1082 ia64_setreg(_IA64_REG_CR_DCR
, ia64_getreg(_IA64_REG_CR_DCR
) | IA64_DCR_PP
);
1089 pfm_save_pmds(unsigned long *pmds
, unsigned long mask
)
1095 for (i
=0; mask
; i
++, mask
>>=1) {
1096 if (mask
& 0x1) pmds
[i
] = ia64_get_pmd(i
);
1101 * reload from thread state (used for ctxw only)
1104 pfm_restore_pmds(unsigned long *pmds
, unsigned long mask
)
1107 unsigned long val
, ovfl_val
= pmu_conf
->ovfl_val
;
1109 for (i
=0; mask
; i
++, mask
>>=1) {
1110 if ((mask
& 0x1) == 0) continue;
1111 val
= PMD_IS_COUNTING(i
) ? pmds
[i
] & ovfl_val
: pmds
[i
];
1112 ia64_set_pmd(i
, val
);
1118 * propagate PMD from context to thread-state
1121 pfm_copy_pmds(struct task_struct
*task
, pfm_context_t
*ctx
)
1123 unsigned long ovfl_val
= pmu_conf
->ovfl_val
;
1124 unsigned long mask
= ctx
->ctx_all_pmds
[0];
1128 DPRINT(("mask=0x%lx\n", mask
));
1130 for (i
=0; mask
; i
++, mask
>>=1) {
1132 val
= ctx
->ctx_pmds
[i
].val
;
1135 * We break up the 64 bit value into 2 pieces
1136 * the lower bits go to the machine state in the
1137 * thread (will be reloaded on ctxsw in).
1138 * The upper part stays in the soft-counter.
1140 if (PMD_IS_COUNTING(i
)) {
1141 ctx
->ctx_pmds
[i
].val
= val
& ~ovfl_val
;
1144 ctx
->th_pmds
[i
] = val
;
1146 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1149 ctx
->ctx_pmds
[i
].val
));
1154 * propagate PMC from context to thread-state
1157 pfm_copy_pmcs(struct task_struct
*task
, pfm_context_t
*ctx
)
1159 unsigned long mask
= ctx
->ctx_all_pmcs
[0];
1162 DPRINT(("mask=0x%lx\n", mask
));
1164 for (i
=0; mask
; i
++, mask
>>=1) {
1165 /* masking 0 with ovfl_val yields 0 */
1166 ctx
->th_pmcs
[i
] = ctx
->ctx_pmcs
[i
];
1167 DPRINT(("pmc[%d]=0x%lx\n", i
, ctx
->th_pmcs
[i
]));
1174 pfm_restore_pmcs(unsigned long *pmcs
, unsigned long mask
)
1178 for (i
=0; mask
; i
++, mask
>>=1) {
1179 if ((mask
& 0x1) == 0) continue;
1180 ia64_set_pmc(i
, pmcs
[i
]);
1186 pfm_uuid_cmp(pfm_uuid_t a
, pfm_uuid_t b
)
1188 return memcmp(a
, b
, sizeof(pfm_uuid_t
));
1192 pfm_buf_fmt_exit(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, void *buf
, struct pt_regs
*regs
)
1195 if (fmt
->fmt_exit
) ret
= (*fmt
->fmt_exit
)(task
, buf
, regs
);
1200 pfm_buf_fmt_getsize(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, unsigned int flags
, int cpu
, void *arg
, unsigned long *size
)
1203 if (fmt
->fmt_getsize
) ret
= (*fmt
->fmt_getsize
)(task
, flags
, cpu
, arg
, size
);
1209 pfm_buf_fmt_validate(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, unsigned int flags
,
1213 if (fmt
->fmt_validate
) ret
= (*fmt
->fmt_validate
)(task
, flags
, cpu
, arg
);
1218 pfm_buf_fmt_init(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, void *buf
, unsigned int flags
,
1222 if (fmt
->fmt_init
) ret
= (*fmt
->fmt_init
)(task
, buf
, flags
, cpu
, arg
);
1227 pfm_buf_fmt_restart(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, pfm_ovfl_ctrl_t
*ctrl
, void *buf
, struct pt_regs
*regs
)
1230 if (fmt
->fmt_restart
) ret
= (*fmt
->fmt_restart
)(task
, ctrl
, buf
, regs
);
1235 pfm_buf_fmt_restart_active(pfm_buffer_fmt_t
*fmt
, struct task_struct
*task
, pfm_ovfl_ctrl_t
*ctrl
, void *buf
, struct pt_regs
*regs
)
1238 if (fmt
->fmt_restart_active
) ret
= (*fmt
->fmt_restart_active
)(task
, ctrl
, buf
, regs
);
1242 static pfm_buffer_fmt_t
*
1243 __pfm_find_buffer_fmt(pfm_uuid_t uuid
)
1245 struct list_head
* pos
;
1246 pfm_buffer_fmt_t
* entry
;
1248 list_for_each(pos
, &pfm_buffer_fmt_list
) {
1249 entry
= list_entry(pos
, pfm_buffer_fmt_t
, fmt_list
);
1250 if (pfm_uuid_cmp(uuid
, entry
->fmt_uuid
) == 0)
1257 * find a buffer format based on its uuid
1259 static pfm_buffer_fmt_t
*
1260 pfm_find_buffer_fmt(pfm_uuid_t uuid
)
1262 pfm_buffer_fmt_t
* fmt
;
1263 spin_lock(&pfm_buffer_fmt_lock
);
1264 fmt
= __pfm_find_buffer_fmt(uuid
);
1265 spin_unlock(&pfm_buffer_fmt_lock
);
1270 pfm_register_buffer_fmt(pfm_buffer_fmt_t
*fmt
)
1274 /* some sanity checks */
1275 if (fmt
== NULL
|| fmt
->fmt_name
== NULL
) return -EINVAL
;
1277 /* we need at least a handler */
1278 if (fmt
->fmt_handler
== NULL
) return -EINVAL
;
1281 * XXX: need check validity of fmt_arg_size
1284 spin_lock(&pfm_buffer_fmt_lock
);
1286 if (__pfm_find_buffer_fmt(fmt
->fmt_uuid
)) {
1287 printk(KERN_ERR
"perfmon: duplicate sampling format: %s\n", fmt
->fmt_name
);
1291 list_add(&fmt
->fmt_list
, &pfm_buffer_fmt_list
);
1292 printk(KERN_INFO
"perfmon: added sampling format %s\n", fmt
->fmt_name
);
1295 spin_unlock(&pfm_buffer_fmt_lock
);
1298 EXPORT_SYMBOL(pfm_register_buffer_fmt
);
1301 pfm_unregister_buffer_fmt(pfm_uuid_t uuid
)
1303 pfm_buffer_fmt_t
*fmt
;
1306 spin_lock(&pfm_buffer_fmt_lock
);
1308 fmt
= __pfm_find_buffer_fmt(uuid
);
1310 printk(KERN_ERR
"perfmon: cannot unregister format, not found\n");
1314 list_del_init(&fmt
->fmt_list
);
1315 printk(KERN_INFO
"perfmon: removed sampling format: %s\n", fmt
->fmt_name
);
1318 spin_unlock(&pfm_buffer_fmt_lock
);
1322 EXPORT_SYMBOL(pfm_unregister_buffer_fmt
);
1324 extern void update_pal_halt_status(int);
1327 pfm_reserve_session(struct task_struct
*task
, int is_syswide
, unsigned int cpu
)
1329 unsigned long flags
;
1331 * validity checks on cpu_mask have been done upstream
1335 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1336 pfm_sessions
.pfs_sys_sessions
,
1337 pfm_sessions
.pfs_task_sessions
,
1338 pfm_sessions
.pfs_sys_use_dbregs
,
1344 * cannot mix system wide and per-task sessions
1346 if (pfm_sessions
.pfs_task_sessions
> 0UL) {
1347 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1348 pfm_sessions
.pfs_task_sessions
));
1352 if (pfm_sessions
.pfs_sys_session
[cpu
]) goto error_conflict
;
1354 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu
, smp_processor_id()));
1356 pfm_sessions
.pfs_sys_session
[cpu
] = task
;
1358 pfm_sessions
.pfs_sys_sessions
++ ;
1361 if (pfm_sessions
.pfs_sys_sessions
) goto abort
;
1362 pfm_sessions
.pfs_task_sessions
++;
1365 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1366 pfm_sessions
.pfs_sys_sessions
,
1367 pfm_sessions
.pfs_task_sessions
,
1368 pfm_sessions
.pfs_sys_use_dbregs
,
1373 * disable default_idle() to go to PAL_HALT
1375 update_pal_halt_status(0);
1382 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1383 task_pid_nr(pfm_sessions
.pfs_sys_session
[cpu
]),
1393 pfm_unreserve_session(pfm_context_t
*ctx
, int is_syswide
, unsigned int cpu
)
1395 unsigned long flags
;
1397 * validity checks on cpu_mask have been done upstream
1401 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1402 pfm_sessions
.pfs_sys_sessions
,
1403 pfm_sessions
.pfs_task_sessions
,
1404 pfm_sessions
.pfs_sys_use_dbregs
,
1410 pfm_sessions
.pfs_sys_session
[cpu
] = NULL
;
1412 * would not work with perfmon+more than one bit in cpu_mask
1414 if (ctx
&& ctx
->ctx_fl_using_dbreg
) {
1415 if (pfm_sessions
.pfs_sys_use_dbregs
== 0) {
1416 printk(KERN_ERR
"perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx
);
1418 pfm_sessions
.pfs_sys_use_dbregs
--;
1421 pfm_sessions
.pfs_sys_sessions
--;
1423 pfm_sessions
.pfs_task_sessions
--;
1425 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1426 pfm_sessions
.pfs_sys_sessions
,
1427 pfm_sessions
.pfs_task_sessions
,
1428 pfm_sessions
.pfs_sys_use_dbregs
,
1433 * if possible, enable default_idle() to go into PAL_HALT
1435 if (pfm_sessions
.pfs_task_sessions
== 0 && pfm_sessions
.pfs_sys_sessions
== 0)
1436 update_pal_halt_status(1);
1444 * removes virtual mapping of the sampling buffer.
1445 * IMPORTANT: cannot be called with interrupts disable, e.g. inside
1446 * a PROTECT_CTX() section.
1449 pfm_remove_smpl_mapping(void *vaddr
, unsigned long size
)
1451 struct task_struct
*task
= current
;
1455 if (task
->mm
== NULL
|| size
== 0UL || vaddr
== NULL
) {
1456 printk(KERN_ERR
"perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task
), task
->mm
);
1460 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr
, size
));
1463 * does the actual unmapping
1465 r
= vm_munmap((unsigned long)vaddr
, size
);
1468 printk(KERN_ERR
"perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task
), vaddr
, size
);
1471 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr
, size
, r
));
1477 * free actual physical storage used by sampling buffer
1481 pfm_free_smpl_buffer(pfm_context_t
*ctx
)
1483 pfm_buffer_fmt_t
*fmt
;
1485 if (ctx
->ctx_smpl_hdr
== NULL
) goto invalid_free
;
1488 * we won't use the buffer format anymore
1490 fmt
= ctx
->ctx_buf_fmt
;
1492 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1495 ctx
->ctx_smpl_vaddr
));
1497 pfm_buf_fmt_exit(fmt
, current
, NULL
, NULL
);
1502 pfm_rvfree(ctx
->ctx_smpl_hdr
, ctx
->ctx_smpl_size
);
1504 ctx
->ctx_smpl_hdr
= NULL
;
1505 ctx
->ctx_smpl_size
= 0UL;
1510 printk(KERN_ERR
"perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current
));
1516 pfm_exit_smpl_buffer(pfm_buffer_fmt_t
*fmt
)
1518 if (fmt
== NULL
) return;
1520 pfm_buf_fmt_exit(fmt
, current
, NULL
, NULL
);
1525 * pfmfs should _never_ be mounted by userland - too much of security hassle,
1526 * no real gain from having the whole whorehouse mounted. So we don't need
1527 * any operations on the root directory. However, we need a non-trivial
1528 * d_name - pfm: will go nicely and kill the special-casing in procfs.
1530 static struct vfsmount
*pfmfs_mnt __read_mostly
;
1535 int err
= register_filesystem(&pfm_fs_type
);
1537 pfmfs_mnt
= kern_mount(&pfm_fs_type
);
1538 err
= PTR_ERR(pfmfs_mnt
);
1539 if (IS_ERR(pfmfs_mnt
))
1540 unregister_filesystem(&pfm_fs_type
);
1548 pfm_read(struct file
*filp
, char __user
*buf
, size_t size
, loff_t
*ppos
)
1553 unsigned long flags
;
1554 DECLARE_WAITQUEUE(wait
, current
);
1555 if (PFM_IS_FILE(filp
) == 0) {
1556 printk(KERN_ERR
"perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current
));
1560 ctx
= filp
->private_data
;
1562 printk(KERN_ERR
"perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current
));
1567 * check even when there is no message
1569 if (size
< sizeof(pfm_msg_t
)) {
1570 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx
, sizeof(pfm_msg_t
)));
1574 PROTECT_CTX(ctx
, flags
);
1577 * put ourselves on the wait queue
1579 add_wait_queue(&ctx
->ctx_msgq_wait
, &wait
);
1587 set_current_state(TASK_INTERRUPTIBLE
);
1589 DPRINT(("head=%d tail=%d\n", ctx
->ctx_msgq_head
, ctx
->ctx_msgq_tail
));
1592 if(PFM_CTXQ_EMPTY(ctx
) == 0) break;
1594 UNPROTECT_CTX(ctx
, flags
);
1597 * check non-blocking read
1600 if(filp
->f_flags
& O_NONBLOCK
) break;
1603 * check pending signals
1605 if(signal_pending(current
)) {
1610 * no message, so wait
1614 PROTECT_CTX(ctx
, flags
);
1616 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current
), ret
));
1617 set_current_state(TASK_RUNNING
);
1618 remove_wait_queue(&ctx
->ctx_msgq_wait
, &wait
);
1620 if (ret
< 0) goto abort
;
1623 msg
= pfm_get_next_msg(ctx
);
1625 printk(KERN_ERR
"perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx
, task_pid_nr(current
));
1629 DPRINT(("fd=%d type=%d\n", msg
->pfm_gen_msg
.msg_ctx_fd
, msg
->pfm_gen_msg
.msg_type
));
1632 if(copy_to_user(buf
, msg
, sizeof(pfm_msg_t
)) == 0) ret
= sizeof(pfm_msg_t
);
1635 UNPROTECT_CTX(ctx
, flags
);
1641 pfm_write(struct file
*file
, const char __user
*ubuf
,
1642 size_t size
, loff_t
*ppos
)
1644 DPRINT(("pfm_write called\n"));
1649 pfm_poll(struct file
*filp
, poll_table
* wait
)
1652 unsigned long flags
;
1653 unsigned int mask
= 0;
1655 if (PFM_IS_FILE(filp
) == 0) {
1656 printk(KERN_ERR
"perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current
));
1660 ctx
= filp
->private_data
;
1662 printk(KERN_ERR
"perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current
));
1667 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx
->ctx_fd
));
1669 poll_wait(filp
, &ctx
->ctx_msgq_wait
, wait
);
1671 PROTECT_CTX(ctx
, flags
);
1673 if (PFM_CTXQ_EMPTY(ctx
) == 0)
1674 mask
= POLLIN
| POLLRDNORM
;
1676 UNPROTECT_CTX(ctx
, flags
);
1678 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx
->ctx_fd
, mask
));
1684 pfm_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1686 DPRINT(("pfm_ioctl called\n"));
1691 * interrupt cannot be masked when coming here
1694 pfm_do_fasync(int fd
, struct file
*filp
, pfm_context_t
*ctx
, int on
)
1698 ret
= fasync_helper (fd
, filp
, on
, &ctx
->ctx_async_queue
);
1700 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1701 task_pid_nr(current
),
1704 ctx
->ctx_async_queue
, ret
));
1710 pfm_fasync(int fd
, struct file
*filp
, int on
)
1715 if (PFM_IS_FILE(filp
) == 0) {
1716 printk(KERN_ERR
"perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current
));
1720 ctx
= filp
->private_data
;
1722 printk(KERN_ERR
"perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current
));
1726 * we cannot mask interrupts during this call because this may
1727 * may go to sleep if memory is not readily avalaible.
1729 * We are protected from the conetxt disappearing by the get_fd()/put_fd()
1730 * done in caller. Serialization of this function is ensured by caller.
1732 ret
= pfm_do_fasync(fd
, filp
, ctx
, on
);
1735 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1738 ctx
->ctx_async_queue
, ret
));
1745 * this function is exclusively called from pfm_close().
1746 * The context is not protected at that time, nor are interrupts
1747 * on the remote CPU. That's necessary to avoid deadlocks.
1750 pfm_syswide_force_stop(void *info
)
1752 pfm_context_t
*ctx
= (pfm_context_t
*)info
;
1753 struct pt_regs
*regs
= task_pt_regs(current
);
1754 struct task_struct
*owner
;
1755 unsigned long flags
;
1758 if (ctx
->ctx_cpu
!= smp_processor_id()) {
1759 printk(KERN_ERR
"perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1761 smp_processor_id());
1764 owner
= GET_PMU_OWNER();
1765 if (owner
!= ctx
->ctx_task
) {
1766 printk(KERN_ERR
"perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1768 task_pid_nr(owner
), task_pid_nr(ctx
->ctx_task
));
1771 if (GET_PMU_CTX() != ctx
) {
1772 printk(KERN_ERR
"perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1774 GET_PMU_CTX(), ctx
);
1778 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx
->ctx_task
)));
1780 * the context is already protected in pfm_close(), we simply
1781 * need to mask interrupts to avoid a PMU interrupt race on
1784 local_irq_save(flags
);
1786 ret
= pfm_context_unload(ctx
, NULL
, 0, regs
);
1788 DPRINT(("context_unload returned %d\n", ret
));
1792 * unmask interrupts, PMU interrupts are now spurious here
1794 local_irq_restore(flags
);
1798 pfm_syswide_cleanup_other_cpu(pfm_context_t
*ctx
)
1802 DPRINT(("calling CPU%d for cleanup\n", ctx
->ctx_cpu
));
1803 ret
= smp_call_function_single(ctx
->ctx_cpu
, pfm_syswide_force_stop
, ctx
, 1);
1804 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx
->ctx_cpu
, ret
));
1806 #endif /* CONFIG_SMP */
1809 * called for each close(). Partially free resources.
1810 * When caller is self-monitoring, the context is unloaded.
1813 pfm_flush(struct file
*filp
, fl_owner_t id
)
1816 struct task_struct
*task
;
1817 struct pt_regs
*regs
;
1818 unsigned long flags
;
1819 unsigned long smpl_buf_size
= 0UL;
1820 void *smpl_buf_vaddr
= NULL
;
1821 int state
, is_system
;
1823 if (PFM_IS_FILE(filp
) == 0) {
1824 DPRINT(("bad magic for\n"));
1828 ctx
= filp
->private_data
;
1830 printk(KERN_ERR
"perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current
));
1835 * remove our file from the async queue, if we use this mode.
1836 * This can be done without the context being protected. We come
1837 * here when the context has become unreachable by other tasks.
1839 * We may still have active monitoring at this point and we may
1840 * end up in pfm_overflow_handler(). However, fasync_helper()
1841 * operates with interrupts disabled and it cleans up the
1842 * queue. If the PMU handler is called prior to entering
1843 * fasync_helper() then it will send a signal. If it is
1844 * invoked after, it will find an empty queue and no
1845 * signal will be sent. In both case, we are safe
1847 PROTECT_CTX(ctx
, flags
);
1849 state
= ctx
->ctx_state
;
1850 is_system
= ctx
->ctx_fl_system
;
1852 task
= PFM_CTX_TASK(ctx
);
1853 regs
= task_pt_regs(task
);
1855 DPRINT(("ctx_state=%d is_current=%d\n",
1857 task
== current
? 1 : 0));
1860 * if state == UNLOADED, then task is NULL
1864 * we must stop and unload because we are losing access to the context.
1866 if (task
== current
) {
1869 * the task IS the owner but it migrated to another CPU: that's bad
1870 * but we must handle this cleanly. Unfortunately, the kernel does
1871 * not provide a mechanism to block migration (while the context is loaded).
1873 * We need to release the resource on the ORIGINAL cpu.
1875 if (is_system
&& ctx
->ctx_cpu
!= smp_processor_id()) {
1877 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
1879 * keep context protected but unmask interrupt for IPI
1881 local_irq_restore(flags
);
1883 pfm_syswide_cleanup_other_cpu(ctx
);
1886 * restore interrupt masking
1888 local_irq_save(flags
);
1891 * context is unloaded at this point
1894 #endif /* CONFIG_SMP */
1897 DPRINT(("forcing unload\n"));
1899 * stop and unload, returning with state UNLOADED
1900 * and session unreserved.
1902 pfm_context_unload(ctx
, NULL
, 0, regs
);
1904 DPRINT(("ctx_state=%d\n", ctx
->ctx_state
));
1909 * remove virtual mapping, if any, for the calling task.
1910 * cannot reset ctx field until last user is calling close().
1912 * ctx_smpl_vaddr must never be cleared because it is needed
1913 * by every task with access to the context
1915 * When called from do_exit(), the mm context is gone already, therefore
1916 * mm is NULL, i.e., the VMA is already gone and we do not have to
1919 if (ctx
->ctx_smpl_vaddr
&& current
->mm
) {
1920 smpl_buf_vaddr
= ctx
->ctx_smpl_vaddr
;
1921 smpl_buf_size
= ctx
->ctx_smpl_size
;
1924 UNPROTECT_CTX(ctx
, flags
);
1927 * if there was a mapping, then we systematically remove it
1928 * at this point. Cannot be done inside critical section
1929 * because some VM function reenables interrupts.
1932 if (smpl_buf_vaddr
) pfm_remove_smpl_mapping(smpl_buf_vaddr
, smpl_buf_size
);
1937 * called either on explicit close() or from exit_files().
1938 * Only the LAST user of the file gets to this point, i.e., it is
1941 * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
1942 * (fput()),i.e, last task to access the file. Nobody else can access the
1943 * file at this point.
1945 * When called from exit_files(), the VMA has been freed because exit_mm()
1946 * is executed before exit_files().
1948 * When called from exit_files(), the current task is not yet ZOMBIE but we
1949 * flush the PMU state to the context.
1952 pfm_close(struct inode
*inode
, struct file
*filp
)
1955 struct task_struct
*task
;
1956 struct pt_regs
*regs
;
1957 DECLARE_WAITQUEUE(wait
, current
);
1958 unsigned long flags
;
1959 unsigned long smpl_buf_size
= 0UL;
1960 void *smpl_buf_addr
= NULL
;
1961 int free_possible
= 1;
1962 int state
, is_system
;
1964 DPRINT(("pfm_close called private=%p\n", filp
->private_data
));
1966 if (PFM_IS_FILE(filp
) == 0) {
1967 DPRINT(("bad magic\n"));
1971 ctx
= filp
->private_data
;
1973 printk(KERN_ERR
"perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current
));
1977 PROTECT_CTX(ctx
, flags
);
1979 state
= ctx
->ctx_state
;
1980 is_system
= ctx
->ctx_fl_system
;
1982 task
= PFM_CTX_TASK(ctx
);
1983 regs
= task_pt_regs(task
);
1985 DPRINT(("ctx_state=%d is_current=%d\n",
1987 task
== current
? 1 : 0));
1990 * if task == current, then pfm_flush() unloaded the context
1992 if (state
== PFM_CTX_UNLOADED
) goto doit
;
1995 * context is loaded/masked and task != current, we need to
1996 * either force an unload or go zombie
2000 * The task is currently blocked or will block after an overflow.
2001 * we must force it to wakeup to get out of the
2002 * MASKED state and transition to the unloaded state by itself.
2004 * This situation is only possible for per-task mode
2006 if (state
== PFM_CTX_MASKED
&& CTX_OVFL_NOBLOCK(ctx
) == 0) {
2009 * set a "partial" zombie state to be checked
2010 * upon return from down() in pfm_handle_work().
2012 * We cannot use the ZOMBIE state, because it is checked
2013 * by pfm_load_regs() which is called upon wakeup from down().
2014 * In such case, it would free the context and then we would
2015 * return to pfm_handle_work() which would access the
2016 * stale context. Instead, we set a flag invisible to pfm_load_regs()
2017 * but visible to pfm_handle_work().
2019 * For some window of time, we have a zombie context with
2020 * ctx_state = MASKED and not ZOMBIE
2022 ctx
->ctx_fl_going_zombie
= 1;
2025 * force task to wake up from MASKED state
2027 complete(&ctx
->ctx_restart_done
);
2029 DPRINT(("waking up ctx_state=%d\n", state
));
2032 * put ourself to sleep waiting for the other
2033 * task to report completion
2035 * the context is protected by mutex, therefore there
2036 * is no risk of being notified of completion before
2037 * begin actually on the waitq.
2039 set_current_state(TASK_INTERRUPTIBLE
);
2040 add_wait_queue(&ctx
->ctx_zombieq
, &wait
);
2042 UNPROTECT_CTX(ctx
, flags
);
2045 * XXX: check for signals :
2046 * - ok for explicit close
2047 * - not ok when coming from exit_files()
2052 PROTECT_CTX(ctx
, flags
);
2055 remove_wait_queue(&ctx
->ctx_zombieq
, &wait
);
2056 set_current_state(TASK_RUNNING
);
2059 * context is unloaded at this point
2061 DPRINT(("after zombie wakeup ctx_state=%d for\n", state
));
2063 else if (task
!= current
) {
2066 * switch context to zombie state
2068 ctx
->ctx_state
= PFM_CTX_ZOMBIE
;
2070 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task
)));
2072 * cannot free the context on the spot. deferred until
2073 * the task notices the ZOMBIE state
2077 pfm_context_unload(ctx
, NULL
, 0, regs
);
2082 /* reload state, may have changed during opening of critical section */
2083 state
= ctx
->ctx_state
;
2086 * the context is still attached to a task (possibly current)
2087 * we cannot destroy it right now
2091 * we must free the sampling buffer right here because
2092 * we cannot rely on it being cleaned up later by the
2093 * monitored task. It is not possible to free vmalloc'ed
2094 * memory in pfm_load_regs(). Instead, we remove the buffer
2095 * now. should there be subsequent PMU overflow originally
2096 * meant for sampling, the will be converted to spurious
2097 * and that's fine because the monitoring tools is gone anyway.
2099 if (ctx
->ctx_smpl_hdr
) {
2100 smpl_buf_addr
= ctx
->ctx_smpl_hdr
;
2101 smpl_buf_size
= ctx
->ctx_smpl_size
;
2102 /* no more sampling */
2103 ctx
->ctx_smpl_hdr
= NULL
;
2104 ctx
->ctx_fl_is_sampling
= 0;
2107 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2113 if (smpl_buf_addr
) pfm_exit_smpl_buffer(ctx
->ctx_buf_fmt
);
2116 * UNLOADED that the session has already been unreserved.
2118 if (state
== PFM_CTX_ZOMBIE
) {
2119 pfm_unreserve_session(ctx
, ctx
->ctx_fl_system
, ctx
->ctx_cpu
);
2123 * disconnect file descriptor from context must be done
2126 filp
->private_data
= NULL
;
2129 * if we free on the spot, the context is now completely unreachable
2130 * from the callers side. The monitored task side is also cut, so we
2133 * If we have a deferred free, only the caller side is disconnected.
2135 UNPROTECT_CTX(ctx
, flags
);
2138 * All memory free operations (especially for vmalloc'ed memory)
2139 * MUST be done with interrupts ENABLED.
2141 if (smpl_buf_addr
) pfm_rvfree(smpl_buf_addr
, smpl_buf_size
);
2144 * return the memory used by the context
2146 if (free_possible
) pfm_context_free(ctx
);
2152 pfm_no_open(struct inode
*irrelevant
, struct file
*dontcare
)
2154 DPRINT(("pfm_no_open called\n"));
2160 static const struct file_operations pfm_file_ops
= {
2161 .llseek
= no_llseek
,
2165 .unlocked_ioctl
= pfm_ioctl
,
2166 .open
= pfm_no_open
, /* special open code to disallow open via /proc */
2167 .fasync
= pfm_fasync
,
2168 .release
= pfm_close
,
2173 pfmfs_delete_dentry(const struct dentry
*dentry
)
2178 static char *pfmfs_dname(struct dentry
*dentry
, char *buffer
, int buflen
)
2180 return dynamic_dname(dentry
, buffer
, buflen
, "pfm:[%lu]",
2181 dentry
->d_inode
->i_ino
);
2184 static const struct dentry_operations pfmfs_dentry_operations
= {
2185 .d_delete
= pfmfs_delete_dentry
,
2186 .d_dname
= pfmfs_dname
,
2190 static struct file
*
2191 pfm_alloc_file(pfm_context_t
*ctx
)
2194 struct inode
*inode
;
2196 struct qstr
this = { .name
= "" };
2199 * allocate a new inode
2201 inode
= new_inode(pfmfs_mnt
->mnt_sb
);
2203 return ERR_PTR(-ENOMEM
);
2205 DPRINT(("new inode ino=%ld @%p\n", inode
->i_ino
, inode
));
2207 inode
->i_mode
= S_IFCHR
|S_IRUGO
;
2208 inode
->i_uid
= current_fsuid();
2209 inode
->i_gid
= current_fsgid();
2212 * allocate a new dcache entry
2214 path
.dentry
= d_alloc(pfmfs_mnt
->mnt_root
, &this);
2217 return ERR_PTR(-ENOMEM
);
2219 path
.mnt
= mntget(pfmfs_mnt
);
2221 d_add(path
.dentry
, inode
);
2223 file
= alloc_file(&path
, FMODE_READ
, &pfm_file_ops
);
2226 return ERR_PTR(-ENFILE
);
2229 file
->f_flags
= O_RDONLY
;
2230 file
->private_data
= ctx
;
2236 pfm_remap_buffer(struct vm_area_struct
*vma
, unsigned long buf
, unsigned long addr
, unsigned long size
)
2238 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf
, addr
, size
));
2241 unsigned long pfn
= ia64_tpa(buf
) >> PAGE_SHIFT
;
2244 if (remap_pfn_range(vma
, addr
, pfn
, PAGE_SIZE
, PAGE_READONLY
))
2255 * allocate a sampling buffer and remaps it into the user address space of the task
2258 pfm_smpl_buffer_alloc(struct task_struct
*task
, struct file
*filp
, pfm_context_t
*ctx
, unsigned long rsize
, void **user_vaddr
)
2260 struct mm_struct
*mm
= task
->mm
;
2261 struct vm_area_struct
*vma
= NULL
;
2267 * the fixed header + requested size and align to page boundary
2269 size
= PAGE_ALIGN(rsize
);
2271 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize
, size
));
2274 * check requested size to avoid Denial-of-service attacks
2275 * XXX: may have to refine this test
2276 * Check against address space limit.
2278 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
2281 if (size
> task_rlimit(task
, RLIMIT_MEMLOCK
))
2285 * We do the easy to undo allocations first.
2287 * pfm_rvmalloc(), clears the buffer, so there is no leak
2289 smpl_buf
= pfm_rvmalloc(size
);
2290 if (smpl_buf
== NULL
) {
2291 DPRINT(("Can't allocate sampling buffer\n"));
2295 DPRINT(("smpl_buf @%p\n", smpl_buf
));
2298 vma
= kmem_cache_zalloc(vm_area_cachep
, GFP_KERNEL
);
2300 DPRINT(("Cannot allocate vma\n"));
2303 INIT_LIST_HEAD(&vma
->anon_vma_chain
);
2306 * partially initialize the vma for the sampling buffer
2309 vma
->vm_file
= filp
;
2310 vma
->vm_flags
= VM_READ
| VM_MAYREAD
|VM_RESERVED
;
2311 vma
->vm_page_prot
= PAGE_READONLY
; /* XXX may need to change */
2314 * Now we have everything we need and we can initialize
2315 * and connect all the data structures
2318 ctx
->ctx_smpl_hdr
= smpl_buf
;
2319 ctx
->ctx_smpl_size
= size
; /* aligned size */
2322 * Let's do the difficult operations next.
2324 * now we atomically find some area in the address space and
2325 * remap the buffer in it.
2327 down_write(&task
->mm
->mmap_sem
);
2329 /* find some free area in address space, must have mmap sem held */
2330 vma
->vm_start
= get_unmapped_area(NULL
, 0, size
, 0, MAP_PRIVATE
|MAP_ANONYMOUS
);
2331 if (IS_ERR_VALUE(vma
->vm_start
)) {
2332 DPRINT(("Cannot find unmapped area for size %ld\n", size
));
2333 up_write(&task
->mm
->mmap_sem
);
2336 vma
->vm_end
= vma
->vm_start
+ size
;
2337 vma
->vm_pgoff
= vma
->vm_start
>> PAGE_SHIFT
;
2339 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size
, ctx
->ctx_smpl_hdr
, vma
->vm_start
));
2341 /* can only be applied to current task, need to have the mm semaphore held when called */
2342 if (pfm_remap_buffer(vma
, (unsigned long)smpl_buf
, vma
->vm_start
, size
)) {
2343 DPRINT(("Can't remap buffer\n"));
2344 up_write(&task
->mm
->mmap_sem
);
2351 * now insert the vma in the vm list for the process, must be
2352 * done with mmap lock held
2354 insert_vm_struct(mm
, vma
);
2356 mm
->total_vm
+= size
>> PAGE_SHIFT
;
2357 vm_stat_account(vma
->vm_mm
, vma
->vm_flags
, vma
->vm_file
,
2359 up_write(&task
->mm
->mmap_sem
);
2362 * keep track of user level virtual address
2364 ctx
->ctx_smpl_vaddr
= (void *)vma
->vm_start
;
2365 *(unsigned long *)user_vaddr
= vma
->vm_start
;
2370 kmem_cache_free(vm_area_cachep
, vma
);
2372 pfm_rvfree(smpl_buf
, size
);
2378 * XXX: do something better here
2381 pfm_bad_permissions(struct task_struct
*task
)
2383 const struct cred
*tcred
;
2384 uid_t uid
= current_uid();
2385 gid_t gid
= current_gid();
2389 tcred
= __task_cred(task
);
2391 /* inspired by ptrace_attach() */
2392 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2401 ret
= ((uid
!= tcred
->euid
)
2402 || (uid
!= tcred
->suid
)
2403 || (uid
!= tcred
->uid
)
2404 || (gid
!= tcred
->egid
)
2405 || (gid
!= tcred
->sgid
)
2406 || (gid
!= tcred
->gid
)) && !capable(CAP_SYS_PTRACE
);
2413 pfarg_is_sane(struct task_struct
*task
, pfarg_context_t
*pfx
)
2419 ctx_flags
= pfx
->ctx_flags
;
2421 if (ctx_flags
& PFM_FL_SYSTEM_WIDE
) {
2424 * cannot block in this mode
2426 if (ctx_flags
& PFM_FL_NOTIFY_BLOCK
) {
2427 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2432 /* probably more to add here */
2438 pfm_setup_buffer_fmt(struct task_struct
*task
, struct file
*filp
, pfm_context_t
*ctx
, unsigned int ctx_flags
,
2439 unsigned int cpu
, pfarg_context_t
*arg
)
2441 pfm_buffer_fmt_t
*fmt
= NULL
;
2442 unsigned long size
= 0UL;
2444 void *fmt_arg
= NULL
;
2446 #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2448 /* invoke and lock buffer format, if found */
2449 fmt
= pfm_find_buffer_fmt(arg
->ctx_smpl_buf_id
);
2451 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task
)));
2456 * buffer argument MUST be contiguous to pfarg_context_t
2458 if (fmt
->fmt_arg_size
) fmt_arg
= PFM_CTXARG_BUF_ARG(arg
);
2460 ret
= pfm_buf_fmt_validate(fmt
, task
, ctx_flags
, cpu
, fmt_arg
);
2462 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task
), ctx_flags
, cpu
, fmt_arg
, ret
));
2464 if (ret
) goto error
;
2466 /* link buffer format and context */
2467 ctx
->ctx_buf_fmt
= fmt
;
2468 ctx
->ctx_fl_is_sampling
= 1; /* assume record() is defined */
2471 * check if buffer format wants to use perfmon buffer allocation/mapping service
2473 ret
= pfm_buf_fmt_getsize(fmt
, task
, ctx_flags
, cpu
, fmt_arg
, &size
);
2474 if (ret
) goto error
;
2478 * buffer is always remapped into the caller's address space
2480 ret
= pfm_smpl_buffer_alloc(current
, filp
, ctx
, size
, &uaddr
);
2481 if (ret
) goto error
;
2483 /* keep track of user address of buffer */
2484 arg
->ctx_smpl_vaddr
= uaddr
;
2486 ret
= pfm_buf_fmt_init(fmt
, task
, ctx
->ctx_smpl_hdr
, ctx_flags
, cpu
, fmt_arg
);
2493 pfm_reset_pmu_state(pfm_context_t
*ctx
)
2498 * install reset values for PMC.
2500 for (i
=1; PMC_IS_LAST(i
) == 0; i
++) {
2501 if (PMC_IS_IMPL(i
) == 0) continue;
2502 ctx
->ctx_pmcs
[i
] = PMC_DFL_VAL(i
);
2503 DPRINT(("pmc[%d]=0x%lx\n", i
, ctx
->ctx_pmcs
[i
]));
2506 * PMD registers are set to 0UL when the context in memset()
2510 * On context switched restore, we must restore ALL pmc and ALL pmd even
2511 * when they are not actively used by the task. In UP, the incoming process
2512 * may otherwise pick up left over PMC, PMD state from the previous process.
2513 * As opposed to PMD, stale PMC can cause harm to the incoming
2514 * process because they may change what is being measured.
2515 * Therefore, we must systematically reinstall the entire
2516 * PMC state. In SMP, the same thing is possible on the
2517 * same CPU but also on between 2 CPUs.
2519 * The problem with PMD is information leaking especially
2520 * to user level when psr.sp=0
2522 * There is unfortunately no easy way to avoid this problem
2523 * on either UP or SMP. This definitively slows down the
2524 * pfm_load_regs() function.
2528 * bitmask of all PMCs accessible to this context
2530 * PMC0 is treated differently.
2532 ctx
->ctx_all_pmcs
[0] = pmu_conf
->impl_pmcs
[0] & ~0x1;
2535 * bitmask of all PMDs that are accessible to this context
2537 ctx
->ctx_all_pmds
[0] = pmu_conf
->impl_pmds
[0];
2539 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx
->ctx_fd
, ctx
->ctx_all_pmcs
[0],ctx
->ctx_all_pmds
[0]));
2542 * useful in case of re-enable after disable
2544 ctx
->ctx_used_ibrs
[0] = 0UL;
2545 ctx
->ctx_used_dbrs
[0] = 0UL;
2549 pfm_ctx_getsize(void *arg
, size_t *sz
)
2551 pfarg_context_t
*req
= (pfarg_context_t
*)arg
;
2552 pfm_buffer_fmt_t
*fmt
;
2556 if (!pfm_uuid_cmp(req
->ctx_smpl_buf_id
, pfm_null_uuid
)) return 0;
2558 fmt
= pfm_find_buffer_fmt(req
->ctx_smpl_buf_id
);
2560 DPRINT(("cannot find buffer format\n"));
2563 /* get just enough to copy in user parameters */
2564 *sz
= fmt
->fmt_arg_size
;
2565 DPRINT(("arg_size=%lu\n", *sz
));
2573 * cannot attach if :
2575 * - task not owned by caller
2576 * - task incompatible with context mode
2579 pfm_task_incompatible(pfm_context_t
*ctx
, struct task_struct
*task
)
2582 * no kernel task or task not owner by caller
2584 if (task
->mm
== NULL
) {
2585 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task
)));
2588 if (pfm_bad_permissions(task
)) {
2589 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task
)));
2593 * cannot block in self-monitoring mode
2595 if (CTX_OVFL_NOBLOCK(ctx
) == 0 && task
== current
) {
2596 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task
)));
2600 if (task
->exit_state
== EXIT_ZOMBIE
) {
2601 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task
)));
2606 * always ok for self
2608 if (task
== current
) return 0;
2610 if (!task_is_stopped_or_traced(task
)) {
2611 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task
), task
->state
));
2615 * make sure the task is off any CPU
2617 wait_task_inactive(task
, 0);
2619 /* more to come... */
2625 pfm_get_task(pfm_context_t
*ctx
, pid_t pid
, struct task_struct
**task
)
2627 struct task_struct
*p
= current
;
2630 /* XXX: need to add more checks here */
2631 if (pid
< 2) return -EPERM
;
2633 if (pid
!= task_pid_vnr(current
)) {
2635 read_lock(&tasklist_lock
);
2637 p
= find_task_by_vpid(pid
);
2639 /* make sure task cannot go away while we operate on it */
2640 if (p
) get_task_struct(p
);
2642 read_unlock(&tasklist_lock
);
2644 if (p
== NULL
) return -ESRCH
;
2647 ret
= pfm_task_incompatible(ctx
, p
);
2650 } else if (p
!= current
) {
2659 pfm_context_create(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
2661 pfarg_context_t
*req
= (pfarg_context_t
*)arg
;
2668 /* let's check the arguments first */
2669 ret
= pfarg_is_sane(current
, req
);
2673 ctx_flags
= req
->ctx_flags
;
2677 fd
= get_unused_fd();
2681 ctx
= pfm_context_alloc(ctx_flags
);
2685 filp
= pfm_alloc_file(ctx
);
2687 ret
= PTR_ERR(filp
);
2691 req
->ctx_fd
= ctx
->ctx_fd
= fd
;
2694 * does the user want to sample?
2696 if (pfm_uuid_cmp(req
->ctx_smpl_buf_id
, pfm_null_uuid
)) {
2697 ret
= pfm_setup_buffer_fmt(current
, filp
, ctx
, ctx_flags
, 0, req
);
2702 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n",
2707 ctx
->ctx_fl_excl_idle
,
2712 * initialize soft PMU state
2714 pfm_reset_pmu_state(ctx
);
2716 fd_install(fd
, filp
);
2721 path
= filp
->f_path
;
2725 if (ctx
->ctx_buf_fmt
) {
2726 pfm_buf_fmt_exit(ctx
->ctx_buf_fmt
, current
, NULL
, regs
);
2729 pfm_context_free(ctx
);
2736 static inline unsigned long
2737 pfm_new_counter_value (pfm_counter_t
*reg
, int is_long_reset
)
2739 unsigned long val
= is_long_reset
? reg
->long_reset
: reg
->short_reset
;
2740 unsigned long new_seed
, old_seed
= reg
->seed
, mask
= reg
->mask
;
2741 extern unsigned long carta_random32 (unsigned long seed
);
2743 if (reg
->flags
& PFM_REGFL_RANDOM
) {
2744 new_seed
= carta_random32(old_seed
);
2745 val
-= (old_seed
& mask
); /* counter values are negative numbers! */
2746 if ((mask
>> 32) != 0)
2747 /* construct a full 64-bit random value: */
2748 new_seed
|= carta_random32(old_seed
>> 32) << 32;
2749 reg
->seed
= new_seed
;
2756 pfm_reset_regs_masked(pfm_context_t
*ctx
, unsigned long *ovfl_regs
, int is_long_reset
)
2758 unsigned long mask
= ovfl_regs
[0];
2759 unsigned long reset_others
= 0UL;
2764 * now restore reset value on sampling overflowed counters
2766 mask
>>= PMU_FIRST_COUNTER
;
2767 for(i
= PMU_FIRST_COUNTER
; mask
; i
++, mask
>>= 1) {
2769 if ((mask
& 0x1UL
) == 0UL) continue;
2771 ctx
->ctx_pmds
[i
].val
= val
= pfm_new_counter_value(ctx
->ctx_pmds
+ i
, is_long_reset
);
2772 reset_others
|= ctx
->ctx_pmds
[i
].reset_pmds
[0];
2774 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset
? "long" : "short", i
, val
));
2778 * Now take care of resetting the other registers
2780 for(i
= 0; reset_others
; i
++, reset_others
>>= 1) {
2782 if ((reset_others
& 0x1) == 0) continue;
2784 ctx
->ctx_pmds
[i
].val
= val
= pfm_new_counter_value(ctx
->ctx_pmds
+ i
, is_long_reset
);
2786 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2787 is_long_reset
? "long" : "short", i
, val
));
2792 pfm_reset_regs(pfm_context_t
*ctx
, unsigned long *ovfl_regs
, int is_long_reset
)
2794 unsigned long mask
= ovfl_regs
[0];
2795 unsigned long reset_others
= 0UL;
2799 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs
[0], is_long_reset
));
2801 if (ctx
->ctx_state
== PFM_CTX_MASKED
) {
2802 pfm_reset_regs_masked(ctx
, ovfl_regs
, is_long_reset
);
2807 * now restore reset value on sampling overflowed counters
2809 mask
>>= PMU_FIRST_COUNTER
;
2810 for(i
= PMU_FIRST_COUNTER
; mask
; i
++, mask
>>= 1) {
2812 if ((mask
& 0x1UL
) == 0UL) continue;
2814 val
= pfm_new_counter_value(ctx
->ctx_pmds
+ i
, is_long_reset
);
2815 reset_others
|= ctx
->ctx_pmds
[i
].reset_pmds
[0];
2817 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset
? "long" : "short", i
, val
));
2819 pfm_write_soft_counter(ctx
, i
, val
);
2823 * Now take care of resetting the other registers
2825 for(i
= 0; reset_others
; i
++, reset_others
>>= 1) {
2827 if ((reset_others
& 0x1) == 0) continue;
2829 val
= pfm_new_counter_value(ctx
->ctx_pmds
+ i
, is_long_reset
);
2831 if (PMD_IS_COUNTING(i
)) {
2832 pfm_write_soft_counter(ctx
, i
, val
);
2834 ia64_set_pmd(i
, val
);
2836 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2837 is_long_reset
? "long" : "short", i
, val
));
2843 pfm_write_pmcs(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
2845 struct task_struct
*task
;
2846 pfarg_reg_t
*req
= (pfarg_reg_t
*)arg
;
2847 unsigned long value
, pmc_pm
;
2848 unsigned long smpl_pmds
, reset_pmds
, impl_pmds
;
2849 unsigned int cnum
, reg_flags
, flags
, pmc_type
;
2850 int i
, can_access_pmu
= 0, is_loaded
, is_system
, expert_mode
;
2851 int is_monitor
, is_counting
, state
;
2853 pfm_reg_check_t wr_func
;
2854 #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2856 state
= ctx
->ctx_state
;
2857 is_loaded
= state
== PFM_CTX_LOADED
? 1 : 0;
2858 is_system
= ctx
->ctx_fl_system
;
2859 task
= ctx
->ctx_task
;
2860 impl_pmds
= pmu_conf
->impl_pmds
[0];
2862 if (state
== PFM_CTX_ZOMBIE
) return -EINVAL
;
2866 * In system wide and when the context is loaded, access can only happen
2867 * when the caller is running on the CPU being monitored by the session.
2868 * It does not have to be the owner (ctx_task) of the context per se.
2870 if (is_system
&& ctx
->ctx_cpu
!= smp_processor_id()) {
2871 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
2874 can_access_pmu
= GET_PMU_OWNER() == task
|| is_system
? 1 : 0;
2876 expert_mode
= pfm_sysctl
.expert_mode
;
2878 for (i
= 0; i
< count
; i
++, req
++) {
2880 cnum
= req
->reg_num
;
2881 reg_flags
= req
->reg_flags
;
2882 value
= req
->reg_value
;
2883 smpl_pmds
= req
->reg_smpl_pmds
[0];
2884 reset_pmds
= req
->reg_reset_pmds
[0];
2888 if (cnum
>= PMU_MAX_PMCS
) {
2889 DPRINT(("pmc%u is invalid\n", cnum
));
2893 pmc_type
= pmu_conf
->pmc_desc
[cnum
].type
;
2894 pmc_pm
= (value
>> pmu_conf
->pmc_desc
[cnum
].pm_pos
) & 0x1;
2895 is_counting
= (pmc_type
& PFM_REG_COUNTING
) == PFM_REG_COUNTING
? 1 : 0;
2896 is_monitor
= (pmc_type
& PFM_REG_MONITOR
) == PFM_REG_MONITOR
? 1 : 0;
2899 * we reject all non implemented PMC as well
2900 * as attempts to modify PMC[0-3] which are used
2901 * as status registers by the PMU
2903 if ((pmc_type
& PFM_REG_IMPL
) == 0 || (pmc_type
& PFM_REG_CONTROL
) == PFM_REG_CONTROL
) {
2904 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum
, pmc_type
));
2907 wr_func
= pmu_conf
->pmc_desc
[cnum
].write_check
;
2909 * If the PMC is a monitor, then if the value is not the default:
2910 * - system-wide session: PMCx.pm=1 (privileged monitor)
2911 * - per-task : PMCx.pm=0 (user monitor)
2913 if (is_monitor
&& value
!= PMC_DFL_VAL(cnum
) && is_system
^ pmc_pm
) {
2914 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2923 * enforce generation of overflow interrupt. Necessary on all
2926 value
|= 1 << PMU_PMC_OI
;
2928 if (reg_flags
& PFM_REGFL_OVFL_NOTIFY
) {
2929 flags
|= PFM_REGFL_OVFL_NOTIFY
;
2932 if (reg_flags
& PFM_REGFL_RANDOM
) flags
|= PFM_REGFL_RANDOM
;
2934 /* verify validity of smpl_pmds */
2935 if ((smpl_pmds
& impl_pmds
) != smpl_pmds
) {
2936 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds
, cnum
));
2940 /* verify validity of reset_pmds */
2941 if ((reset_pmds
& impl_pmds
) != reset_pmds
) {
2942 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds
, cnum
));
2946 if (reg_flags
& (PFM_REGFL_OVFL_NOTIFY
|PFM_REGFL_RANDOM
)) {
2947 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum
));
2950 /* eventid on non-counting monitors are ignored */
2954 * execute write checker, if any
2956 if (likely(expert_mode
== 0 && wr_func
)) {
2957 ret
= (*wr_func
)(task
, ctx
, cnum
, &value
, regs
);
2958 if (ret
) goto error
;
2963 * no error on this register
2965 PFM_REG_RETFLAG_SET(req
->reg_flags
, 0);
2968 * Now we commit the changes to the software state
2972 * update overflow information
2976 * full flag update each time a register is programmed
2978 ctx
->ctx_pmds
[cnum
].flags
= flags
;
2980 ctx
->ctx_pmds
[cnum
].reset_pmds
[0] = reset_pmds
;
2981 ctx
->ctx_pmds
[cnum
].smpl_pmds
[0] = smpl_pmds
;
2982 ctx
->ctx_pmds
[cnum
].eventid
= req
->reg_smpl_eventid
;
2985 * Mark all PMDS to be accessed as used.
2987 * We do not keep track of PMC because we have to
2988 * systematically restore ALL of them.
2990 * We do not update the used_monitors mask, because
2991 * if we have not programmed them, then will be in
2992 * a quiescent state, therefore we will not need to
2993 * mask/restore then when context is MASKED.
2995 CTX_USED_PMD(ctx
, reset_pmds
);
2996 CTX_USED_PMD(ctx
, smpl_pmds
);
2998 * make sure we do not try to reset on
2999 * restart because we have established new values
3001 if (state
== PFM_CTX_MASKED
) ctx
->ctx_ovfl_regs
[0] &= ~1UL << cnum
;
3004 * Needed in case the user does not initialize the equivalent
3005 * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
3006 * possible leak here.
3008 CTX_USED_PMD(ctx
, pmu_conf
->pmc_desc
[cnum
].dep_pmd
[0]);
3011 * keep track of the monitor PMC that we are using.
3012 * we save the value of the pmc in ctx_pmcs[] and if
3013 * the monitoring is not stopped for the context we also
3014 * place it in the saved state area so that it will be
3015 * picked up later by the context switch code.
3017 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
3019 * The value in th_pmcs[] may be modified on overflow, i.e., when
3020 * monitoring needs to be stopped.
3022 if (is_monitor
) CTX_USED_MONITOR(ctx
, 1UL << cnum
);
3025 * update context state
3027 ctx
->ctx_pmcs
[cnum
] = value
;
3031 * write thread state
3033 if (is_system
== 0) ctx
->th_pmcs
[cnum
] = value
;
3036 * write hardware register if we can
3038 if (can_access_pmu
) {
3039 ia64_set_pmc(cnum
, value
);
3044 * per-task SMP only here
3046 * we are guaranteed that the task is not running on the other CPU,
3047 * we indicate that this PMD will need to be reloaded if the task
3048 * is rescheduled on the CPU it ran last on.
3050 ctx
->ctx_reload_pmcs
[0] |= 1UL << cnum
;
3055 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3061 ctx
->ctx_all_pmcs
[0],
3062 ctx
->ctx_used_pmds
[0],
3063 ctx
->ctx_pmds
[cnum
].eventid
,
3066 ctx
->ctx_reload_pmcs
[0],
3067 ctx
->ctx_used_monitors
[0],
3068 ctx
->ctx_ovfl_regs
[0]));
3072 * make sure the changes are visible
3074 if (can_access_pmu
) ia64_srlz_d();
3078 PFM_REG_RETFLAG_SET(req
->reg_flags
, PFM_REG_RETFL_EINVAL
);
3083 pfm_write_pmds(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3085 struct task_struct
*task
;
3086 pfarg_reg_t
*req
= (pfarg_reg_t
*)arg
;
3087 unsigned long value
, hw_value
, ovfl_mask
;
3089 int i
, can_access_pmu
= 0, state
;
3090 int is_counting
, is_loaded
, is_system
, expert_mode
;
3092 pfm_reg_check_t wr_func
;
3095 state
= ctx
->ctx_state
;
3096 is_loaded
= state
== PFM_CTX_LOADED
? 1 : 0;
3097 is_system
= ctx
->ctx_fl_system
;
3098 ovfl_mask
= pmu_conf
->ovfl_val
;
3099 task
= ctx
->ctx_task
;
3101 if (unlikely(state
== PFM_CTX_ZOMBIE
)) return -EINVAL
;
3104 * on both UP and SMP, we can only write to the PMC when the task is
3105 * the owner of the local PMU.
3107 if (likely(is_loaded
)) {
3109 * In system wide and when the context is loaded, access can only happen
3110 * when the caller is running on the CPU being monitored by the session.
3111 * It does not have to be the owner (ctx_task) of the context per se.
3113 if (unlikely(is_system
&& ctx
->ctx_cpu
!= smp_processor_id())) {
3114 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
3117 can_access_pmu
= GET_PMU_OWNER() == task
|| is_system
? 1 : 0;
3119 expert_mode
= pfm_sysctl
.expert_mode
;
3121 for (i
= 0; i
< count
; i
++, req
++) {
3123 cnum
= req
->reg_num
;
3124 value
= req
->reg_value
;
3126 if (!PMD_IS_IMPL(cnum
)) {
3127 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum
));
3130 is_counting
= PMD_IS_COUNTING(cnum
);
3131 wr_func
= pmu_conf
->pmd_desc
[cnum
].write_check
;
3134 * execute write checker, if any
3136 if (unlikely(expert_mode
== 0 && wr_func
)) {
3137 unsigned long v
= value
;
3139 ret
= (*wr_func
)(task
, ctx
, cnum
, &v
, regs
);
3140 if (ret
) goto abort_mission
;
3147 * no error on this register
3149 PFM_REG_RETFLAG_SET(req
->reg_flags
, 0);
3152 * now commit changes to software state
3157 * update virtualized (64bits) counter
3161 * write context state
3163 ctx
->ctx_pmds
[cnum
].lval
= value
;
3166 * when context is load we use the split value
3169 hw_value
= value
& ovfl_mask
;
3170 value
= value
& ~ovfl_mask
;
3174 * update reset values (not just for counters)
3176 ctx
->ctx_pmds
[cnum
].long_reset
= req
->reg_long_reset
;
3177 ctx
->ctx_pmds
[cnum
].short_reset
= req
->reg_short_reset
;
3180 * update randomization parameters (not just for counters)
3182 ctx
->ctx_pmds
[cnum
].seed
= req
->reg_random_seed
;
3183 ctx
->ctx_pmds
[cnum
].mask
= req
->reg_random_mask
;
3186 * update context value
3188 ctx
->ctx_pmds
[cnum
].val
= value
;
3191 * Keep track of what we use
3193 * We do not keep track of PMC because we have to
3194 * systematically restore ALL of them.
3196 CTX_USED_PMD(ctx
, PMD_PMD_DEP(cnum
));
3199 * mark this PMD register used as well
3201 CTX_USED_PMD(ctx
, RDEP(cnum
));
3204 * make sure we do not try to reset on
3205 * restart because we have established new values
3207 if (is_counting
&& state
== PFM_CTX_MASKED
) {
3208 ctx
->ctx_ovfl_regs
[0] &= ~1UL << cnum
;
3213 * write thread state
3215 if (is_system
== 0) ctx
->th_pmds
[cnum
] = hw_value
;
3218 * write hardware register if we can
3220 if (can_access_pmu
) {
3221 ia64_set_pmd(cnum
, hw_value
);
3225 * we are guaranteed that the task is not running on the other CPU,
3226 * we indicate that this PMD will need to be reloaded if the task
3227 * is rescheduled on the CPU it ran last on.
3229 ctx
->ctx_reload_pmds
[0] |= 1UL << cnum
;
3234 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3235 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3241 ctx
->ctx_pmds
[cnum
].val
,
3242 ctx
->ctx_pmds
[cnum
].short_reset
,
3243 ctx
->ctx_pmds
[cnum
].long_reset
,
3244 PMC_OVFL_NOTIFY(ctx
, cnum
) ? 'Y':'N',
3245 ctx
->ctx_pmds
[cnum
].seed
,
3246 ctx
->ctx_pmds
[cnum
].mask
,
3247 ctx
->ctx_used_pmds
[0],
3248 ctx
->ctx_pmds
[cnum
].reset_pmds
[0],
3249 ctx
->ctx_reload_pmds
[0],
3250 ctx
->ctx_all_pmds
[0],
3251 ctx
->ctx_ovfl_regs
[0]));
3255 * make changes visible
3257 if (can_access_pmu
) ia64_srlz_d();
3263 * for now, we have only one possibility for error
3265 PFM_REG_RETFLAG_SET(req
->reg_flags
, PFM_REG_RETFL_EINVAL
);
3270 * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
3271 * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
3272 * interrupt is delivered during the call, it will be kept pending until we leave, making
3273 * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
3274 * guaranteed to return consistent data to the user, it may simply be old. It is not
3275 * trivial to treat the overflow while inside the call because you may end up in
3276 * some module sampling buffer code causing deadlocks.
3279 pfm_read_pmds(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3281 struct task_struct
*task
;
3282 unsigned long val
= 0UL, lval
, ovfl_mask
, sval
;
3283 pfarg_reg_t
*req
= (pfarg_reg_t
*)arg
;
3284 unsigned int cnum
, reg_flags
= 0;
3285 int i
, can_access_pmu
= 0, state
;
3286 int is_loaded
, is_system
, is_counting
, expert_mode
;
3288 pfm_reg_check_t rd_func
;
3291 * access is possible when loaded only for
3292 * self-monitoring tasks or in UP mode
3295 state
= ctx
->ctx_state
;
3296 is_loaded
= state
== PFM_CTX_LOADED
? 1 : 0;
3297 is_system
= ctx
->ctx_fl_system
;
3298 ovfl_mask
= pmu_conf
->ovfl_val
;
3299 task
= ctx
->ctx_task
;
3301 if (state
== PFM_CTX_ZOMBIE
) return -EINVAL
;
3303 if (likely(is_loaded
)) {
3305 * In system wide and when the context is loaded, access can only happen
3306 * when the caller is running on the CPU being monitored by the session.
3307 * It does not have to be the owner (ctx_task) of the context per se.
3309 if (unlikely(is_system
&& ctx
->ctx_cpu
!= smp_processor_id())) {
3310 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
3314 * this can be true when not self-monitoring only in UP
3316 can_access_pmu
= GET_PMU_OWNER() == task
|| is_system
? 1 : 0;
3318 if (can_access_pmu
) ia64_srlz_d();
3320 expert_mode
= pfm_sysctl
.expert_mode
;
3322 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3328 * on both UP and SMP, we can only read the PMD from the hardware register when
3329 * the task is the owner of the local PMU.
3332 for (i
= 0; i
< count
; i
++, req
++) {
3334 cnum
= req
->reg_num
;
3335 reg_flags
= req
->reg_flags
;
3337 if (unlikely(!PMD_IS_IMPL(cnum
))) goto error
;
3339 * we can only read the register that we use. That includes
3340 * the one we explicitly initialize AND the one we want included
3341 * in the sampling buffer (smpl_regs).
3343 * Having this restriction allows optimization in the ctxsw routine
3344 * without compromising security (leaks)
3346 if (unlikely(!CTX_IS_USED_PMD(ctx
, cnum
))) goto error
;
3348 sval
= ctx
->ctx_pmds
[cnum
].val
;
3349 lval
= ctx
->ctx_pmds
[cnum
].lval
;
3350 is_counting
= PMD_IS_COUNTING(cnum
);
3353 * If the task is not the current one, then we check if the
3354 * PMU state is still in the local live register due to lazy ctxsw.
3355 * If true, then we read directly from the registers.
3357 if (can_access_pmu
){
3358 val
= ia64_get_pmd(cnum
);
3361 * context has been saved
3362 * if context is zombie, then task does not exist anymore.
3363 * In this case, we use the full value saved in the context (pfm_flush_regs()).
3365 val
= is_loaded
? ctx
->th_pmds
[cnum
] : 0UL;
3367 rd_func
= pmu_conf
->pmd_desc
[cnum
].read_check
;
3371 * XXX: need to check for overflow when loaded
3378 * execute read checker, if any
3380 if (unlikely(expert_mode
== 0 && rd_func
)) {
3381 unsigned long v
= val
;
3382 ret
= (*rd_func
)(ctx
->ctx_task
, ctx
, cnum
, &v
, regs
);
3383 if (ret
) goto error
;
3388 PFM_REG_RETFLAG_SET(reg_flags
, 0);
3390 DPRINT(("pmd[%u]=0x%lx\n", cnum
, val
));
3393 * update register return value, abort all if problem during copy.
3394 * we only modify the reg_flags field. no check mode is fine because
3395 * access has been verified upfront in sys_perfmonctl().
3397 req
->reg_value
= val
;
3398 req
->reg_flags
= reg_flags
;
3399 req
->reg_last_reset_val
= lval
;
3405 PFM_REG_RETFLAG_SET(req
->reg_flags
, PFM_REG_RETFL_EINVAL
);
3410 pfm_mod_write_pmcs(struct task_struct
*task
, void *req
, unsigned int nreq
, struct pt_regs
*regs
)
3414 if (req
== NULL
) return -EINVAL
;
3416 ctx
= GET_PMU_CTX();
3418 if (ctx
== NULL
) return -EINVAL
;
3421 * for now limit to current task, which is enough when calling
3422 * from overflow handler
3424 if (task
!= current
&& ctx
->ctx_fl_system
== 0) return -EBUSY
;
3426 return pfm_write_pmcs(ctx
, req
, nreq
, regs
);
3428 EXPORT_SYMBOL(pfm_mod_write_pmcs
);
3431 pfm_mod_read_pmds(struct task_struct
*task
, void *req
, unsigned int nreq
, struct pt_regs
*regs
)
3435 if (req
== NULL
) return -EINVAL
;
3437 ctx
= GET_PMU_CTX();
3439 if (ctx
== NULL
) return -EINVAL
;
3442 * for now limit to current task, which is enough when calling
3443 * from overflow handler
3445 if (task
!= current
&& ctx
->ctx_fl_system
== 0) return -EBUSY
;
3447 return pfm_read_pmds(ctx
, req
, nreq
, regs
);
3449 EXPORT_SYMBOL(pfm_mod_read_pmds
);
3452 * Only call this function when a process it trying to
3453 * write the debug registers (reading is always allowed)
3456 pfm_use_debug_registers(struct task_struct
*task
)
3458 pfm_context_t
*ctx
= task
->thread
.pfm_context
;
3459 unsigned long flags
;
3462 if (pmu_conf
->use_rr_dbregs
== 0) return 0;
3464 DPRINT(("called for [%d]\n", task_pid_nr(task
)));
3469 if (task
->thread
.flags
& IA64_THREAD_DBG_VALID
) return 0;
3472 * Even on SMP, we do not need to use an atomic here because
3473 * the only way in is via ptrace() and this is possible only when the
3474 * process is stopped. Even in the case where the ctxsw out is not totally
3475 * completed by the time we come here, there is no way the 'stopped' process
3476 * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
3477 * So this is always safe.
3479 if (ctx
&& ctx
->ctx_fl_using_dbreg
== 1) return -1;
3484 * We cannot allow setting breakpoints when system wide monitoring
3485 * sessions are using the debug registers.
3487 if (pfm_sessions
.pfs_sys_use_dbregs
> 0)
3490 pfm_sessions
.pfs_ptrace_use_dbregs
++;
3492 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3493 pfm_sessions
.pfs_ptrace_use_dbregs
,
3494 pfm_sessions
.pfs_sys_use_dbregs
,
3495 task_pid_nr(task
), ret
));
3503 * This function is called for every task that exits with the
3504 * IA64_THREAD_DBG_VALID set. This indicates a task which was
3505 * able to use the debug registers for debugging purposes via
3506 * ptrace(). Therefore we know it was not using them for
3507 * performance monitoring, so we only decrement the number
3508 * of "ptraced" debug register users to keep the count up to date
3511 pfm_release_debug_registers(struct task_struct
*task
)
3513 unsigned long flags
;
3516 if (pmu_conf
->use_rr_dbregs
== 0) return 0;
3519 if (pfm_sessions
.pfs_ptrace_use_dbregs
== 0) {
3520 printk(KERN_ERR
"perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task
));
3523 pfm_sessions
.pfs_ptrace_use_dbregs
--;
3532 pfm_restart(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3534 struct task_struct
*task
;
3535 pfm_buffer_fmt_t
*fmt
;
3536 pfm_ovfl_ctrl_t rst_ctrl
;
3537 int state
, is_system
;
3540 state
= ctx
->ctx_state
;
3541 fmt
= ctx
->ctx_buf_fmt
;
3542 is_system
= ctx
->ctx_fl_system
;
3543 task
= PFM_CTX_TASK(ctx
);
3546 case PFM_CTX_MASKED
:
3548 case PFM_CTX_LOADED
:
3549 if (CTX_HAS_SMPL(ctx
) && fmt
->fmt_restart_active
) break;
3551 case PFM_CTX_UNLOADED
:
3552 case PFM_CTX_ZOMBIE
:
3553 DPRINT(("invalid state=%d\n", state
));
3556 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state
));
3561 * In system wide and when the context is loaded, access can only happen
3562 * when the caller is running on the CPU being monitored by the session.
3563 * It does not have to be the owner (ctx_task) of the context per se.
3565 if (is_system
&& ctx
->ctx_cpu
!= smp_processor_id()) {
3566 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
3571 if (unlikely(task
== NULL
)) {
3572 printk(KERN_ERR
"perfmon: [%d] pfm_restart no task\n", task_pid_nr(current
));
3576 if (task
== current
|| is_system
) {
3578 fmt
= ctx
->ctx_buf_fmt
;
3580 DPRINT(("restarting self %d ovfl=0x%lx\n",
3582 ctx
->ctx_ovfl_regs
[0]));
3584 if (CTX_HAS_SMPL(ctx
)) {
3586 prefetch(ctx
->ctx_smpl_hdr
);
3588 rst_ctrl
.bits
.mask_monitoring
= 0;
3589 rst_ctrl
.bits
.reset_ovfl_pmds
= 0;
3591 if (state
== PFM_CTX_LOADED
)
3592 ret
= pfm_buf_fmt_restart_active(fmt
, task
, &rst_ctrl
, ctx
->ctx_smpl_hdr
, regs
);
3594 ret
= pfm_buf_fmt_restart(fmt
, task
, &rst_ctrl
, ctx
->ctx_smpl_hdr
, regs
);
3596 rst_ctrl
.bits
.mask_monitoring
= 0;
3597 rst_ctrl
.bits
.reset_ovfl_pmds
= 1;
3601 if (rst_ctrl
.bits
.reset_ovfl_pmds
)
3602 pfm_reset_regs(ctx
, ctx
->ctx_ovfl_regs
, PFM_PMD_LONG_RESET
);
3604 if (rst_ctrl
.bits
.mask_monitoring
== 0) {
3605 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task
)));
3607 if (state
== PFM_CTX_MASKED
) pfm_restore_monitoring(task
);
3609 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task
)));
3611 // cannot use pfm_stop_monitoring(task, regs);
3615 * clear overflowed PMD mask to remove any stale information
3617 ctx
->ctx_ovfl_regs
[0] = 0UL;
3620 * back to LOADED state
3622 ctx
->ctx_state
= PFM_CTX_LOADED
;
3625 * XXX: not really useful for self monitoring
3627 ctx
->ctx_fl_can_restart
= 0;
3633 * restart another task
3637 * When PFM_CTX_MASKED, we cannot issue a restart before the previous
3638 * one is seen by the task.
3640 if (state
== PFM_CTX_MASKED
) {
3641 if (ctx
->ctx_fl_can_restart
== 0) return -EINVAL
;
3643 * will prevent subsequent restart before this one is
3644 * seen by other task
3646 ctx
->ctx_fl_can_restart
= 0;
3650 * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
3651 * the task is blocked or on its way to block. That's the normal
3652 * restart path. If the monitoring is not masked, then the task
3653 * can be actively monitoring and we cannot directly intervene.
3654 * Therefore we use the trap mechanism to catch the task and
3655 * force it to reset the buffer/reset PMDs.
3657 * if non-blocking, then we ensure that the task will go into
3658 * pfm_handle_work() before returning to user mode.
3660 * We cannot explicitly reset another task, it MUST always
3661 * be done by the task itself. This works for system wide because
3662 * the tool that is controlling the session is logically doing
3663 * "self-monitoring".
3665 if (CTX_OVFL_NOBLOCK(ctx
) == 0 && state
== PFM_CTX_MASKED
) {
3666 DPRINT(("unblocking [%d]\n", task_pid_nr(task
)));
3667 complete(&ctx
->ctx_restart_done
);
3669 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task
)));
3671 ctx
->ctx_fl_trap_reason
= PFM_TRAP_REASON_RESET
;
3673 PFM_SET_WORK_PENDING(task
, 1);
3675 set_notify_resume(task
);
3678 * XXX: send reschedule if task runs on another CPU
3685 pfm_debug(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3687 unsigned int m
= *(unsigned int *)arg
;
3689 pfm_sysctl
.debug
= m
== 0 ? 0 : 1;
3691 printk(KERN_INFO
"perfmon debugging %s (timing reset)\n", pfm_sysctl
.debug
? "on" : "off");
3694 memset(pfm_stats
, 0, sizeof(pfm_stats
));
3695 for(m
=0; m
< NR_CPUS
; m
++) pfm_stats
[m
].pfm_ovfl_intr_cycles_min
= ~0UL;
3701 * arg can be NULL and count can be zero for this function
3704 pfm_write_ibr_dbr(int mode
, pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3706 struct thread_struct
*thread
= NULL
;
3707 struct task_struct
*task
;
3708 pfarg_dbreg_t
*req
= (pfarg_dbreg_t
*)arg
;
3709 unsigned long flags
;
3714 int i
, can_access_pmu
= 0;
3715 int is_system
, is_loaded
;
3717 if (pmu_conf
->use_rr_dbregs
== 0) return -EINVAL
;
3719 state
= ctx
->ctx_state
;
3720 is_loaded
= state
== PFM_CTX_LOADED
? 1 : 0;
3721 is_system
= ctx
->ctx_fl_system
;
3722 task
= ctx
->ctx_task
;
3724 if (state
== PFM_CTX_ZOMBIE
) return -EINVAL
;
3727 * on both UP and SMP, we can only write to the PMC when the task is
3728 * the owner of the local PMU.
3731 thread
= &task
->thread
;
3733 * In system wide and when the context is loaded, access can only happen
3734 * when the caller is running on the CPU being monitored by the session.
3735 * It does not have to be the owner (ctx_task) of the context per se.
3737 if (unlikely(is_system
&& ctx
->ctx_cpu
!= smp_processor_id())) {
3738 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
3741 can_access_pmu
= GET_PMU_OWNER() == task
|| is_system
? 1 : 0;
3745 * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
3746 * ensuring that no real breakpoint can be installed via this call.
3748 * IMPORTANT: regs can be NULL in this function
3751 first_time
= ctx
->ctx_fl_using_dbreg
== 0;
3754 * don't bother if we are loaded and task is being debugged
3756 if (is_loaded
&& (thread
->flags
& IA64_THREAD_DBG_VALID
) != 0) {
3757 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task
)));
3762 * check for debug registers in system wide mode
3764 * If though a check is done in pfm_context_load(),
3765 * we must repeat it here, in case the registers are
3766 * written after the context is loaded
3771 if (first_time
&& is_system
) {
3772 if (pfm_sessions
.pfs_ptrace_use_dbregs
)
3775 pfm_sessions
.pfs_sys_use_dbregs
++;
3780 if (ret
!= 0) return ret
;
3783 * mark ourself as user of the debug registers for
3786 ctx
->ctx_fl_using_dbreg
= 1;
3789 * clear hardware registers to make sure we don't
3790 * pick up stale state.
3792 * for a system wide session, we do not use
3793 * thread.dbr, thread.ibr because this process
3794 * never leaves the current CPU and the state
3795 * is shared by all processes running on it
3797 if (first_time
&& can_access_pmu
) {
3798 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task
)));
3799 for (i
=0; i
< pmu_conf
->num_ibrs
; i
++) {
3800 ia64_set_ibr(i
, 0UL);
3801 ia64_dv_serialize_instruction();
3804 for (i
=0; i
< pmu_conf
->num_dbrs
; i
++) {
3805 ia64_set_dbr(i
, 0UL);
3806 ia64_dv_serialize_data();
3812 * Now install the values into the registers
3814 for (i
= 0; i
< count
; i
++, req
++) {
3816 rnum
= req
->dbreg_num
;
3817 dbreg
.val
= req
->dbreg_value
;
3821 if ((mode
== PFM_CODE_RR
&& rnum
>= PFM_NUM_IBRS
) || ((mode
== PFM_DATA_RR
) && rnum
>= PFM_NUM_DBRS
)) {
3822 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3823 rnum
, dbreg
.val
, mode
, i
, count
));
3829 * make sure we do not install enabled breakpoint
3832 if (mode
== PFM_CODE_RR
)
3833 dbreg
.ibr
.ibr_x
= 0;
3835 dbreg
.dbr
.dbr_r
= dbreg
.dbr
.dbr_w
= 0;
3838 PFM_REG_RETFLAG_SET(req
->dbreg_flags
, 0);
3841 * Debug registers, just like PMC, can only be modified
3842 * by a kernel call. Moreover, perfmon() access to those
3843 * registers are centralized in this routine. The hardware
3844 * does not modify the value of these registers, therefore,
3845 * if we save them as they are written, we can avoid having
3846 * to save them on context switch out. This is made possible
3847 * by the fact that when perfmon uses debug registers, ptrace()
3848 * won't be able to modify them concurrently.
3850 if (mode
== PFM_CODE_RR
) {
3851 CTX_USED_IBR(ctx
, rnum
);
3853 if (can_access_pmu
) {
3854 ia64_set_ibr(rnum
, dbreg
.val
);
3855 ia64_dv_serialize_instruction();
3858 ctx
->ctx_ibrs
[rnum
] = dbreg
.val
;
3860 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3861 rnum
, dbreg
.val
, ctx
->ctx_used_ibrs
[0], is_loaded
, can_access_pmu
));
3863 CTX_USED_DBR(ctx
, rnum
);
3865 if (can_access_pmu
) {
3866 ia64_set_dbr(rnum
, dbreg
.val
);
3867 ia64_dv_serialize_data();
3869 ctx
->ctx_dbrs
[rnum
] = dbreg
.val
;
3871 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3872 rnum
, dbreg
.val
, ctx
->ctx_used_dbrs
[0], is_loaded
, can_access_pmu
));
3880 * in case it was our first attempt, we undo the global modifications
3884 if (ctx
->ctx_fl_system
) {
3885 pfm_sessions
.pfs_sys_use_dbregs
--;
3888 ctx
->ctx_fl_using_dbreg
= 0;
3891 * install error return flag
3893 PFM_REG_RETFLAG_SET(req
->dbreg_flags
, PFM_REG_RETFL_EINVAL
);
3899 pfm_write_ibrs(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3901 return pfm_write_ibr_dbr(PFM_CODE_RR
, ctx
, arg
, count
, regs
);
3905 pfm_write_dbrs(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3907 return pfm_write_ibr_dbr(PFM_DATA_RR
, ctx
, arg
, count
, regs
);
3911 pfm_mod_write_ibrs(struct task_struct
*task
, void *req
, unsigned int nreq
, struct pt_regs
*regs
)
3915 if (req
== NULL
) return -EINVAL
;
3917 ctx
= GET_PMU_CTX();
3919 if (ctx
== NULL
) return -EINVAL
;
3922 * for now limit to current task, which is enough when calling
3923 * from overflow handler
3925 if (task
!= current
&& ctx
->ctx_fl_system
== 0) return -EBUSY
;
3927 return pfm_write_ibrs(ctx
, req
, nreq
, regs
);
3929 EXPORT_SYMBOL(pfm_mod_write_ibrs
);
3932 pfm_mod_write_dbrs(struct task_struct
*task
, void *req
, unsigned int nreq
, struct pt_regs
*regs
)
3936 if (req
== NULL
) return -EINVAL
;
3938 ctx
= GET_PMU_CTX();
3940 if (ctx
== NULL
) return -EINVAL
;
3943 * for now limit to current task, which is enough when calling
3944 * from overflow handler
3946 if (task
!= current
&& ctx
->ctx_fl_system
== 0) return -EBUSY
;
3948 return pfm_write_dbrs(ctx
, req
, nreq
, regs
);
3950 EXPORT_SYMBOL(pfm_mod_write_dbrs
);
3954 pfm_get_features(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3956 pfarg_features_t
*req
= (pfarg_features_t
*)arg
;
3958 req
->ft_version
= PFM_VERSION
;
3963 pfm_stop(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
3965 struct pt_regs
*tregs
;
3966 struct task_struct
*task
= PFM_CTX_TASK(ctx
);
3967 int state
, is_system
;
3969 state
= ctx
->ctx_state
;
3970 is_system
= ctx
->ctx_fl_system
;
3973 * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
3975 if (state
== PFM_CTX_UNLOADED
) return -EINVAL
;
3978 * In system wide and when the context is loaded, access can only happen
3979 * when the caller is running on the CPU being monitored by the session.
3980 * It does not have to be the owner (ctx_task) of the context per se.
3982 if (is_system
&& ctx
->ctx_cpu
!= smp_processor_id()) {
3983 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
3986 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
3987 task_pid_nr(PFM_CTX_TASK(ctx
)),
3991 * in system mode, we need to update the PMU directly
3992 * and the user level state of the caller, which may not
3993 * necessarily be the creator of the context.
3997 * Update local PMU first
4001 ia64_setreg(_IA64_REG_CR_DCR
, ia64_getreg(_IA64_REG_CR_DCR
) & ~IA64_DCR_PP
);
4005 * update local cpuinfo
4007 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP
);
4010 * stop monitoring, does srlz.i
4015 * stop monitoring in the caller
4017 ia64_psr(regs
)->pp
= 0;
4025 if (task
== current
) {
4026 /* stop monitoring at kernel level */
4030 * stop monitoring at the user level
4032 ia64_psr(regs
)->up
= 0;
4034 tregs
= task_pt_regs(task
);
4037 * stop monitoring at the user level
4039 ia64_psr(tregs
)->up
= 0;
4042 * monitoring disabled in kernel at next reschedule
4044 ctx
->ctx_saved_psr_up
= 0;
4045 DPRINT(("task=[%d]\n", task_pid_nr(task
)));
4052 pfm_start(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
4054 struct pt_regs
*tregs
;
4055 int state
, is_system
;
4057 state
= ctx
->ctx_state
;
4058 is_system
= ctx
->ctx_fl_system
;
4060 if (state
!= PFM_CTX_LOADED
) return -EINVAL
;
4063 * In system wide and when the context is loaded, access can only happen
4064 * when the caller is running on the CPU being monitored by the session.
4065 * It does not have to be the owner (ctx_task) of the context per se.
4067 if (is_system
&& ctx
->ctx_cpu
!= smp_processor_id()) {
4068 DPRINT(("should be running on CPU%d\n", ctx
->ctx_cpu
));
4073 * in system mode, we need to update the PMU directly
4074 * and the user level state of the caller, which may not
4075 * necessarily be the creator of the context.
4080 * set user level psr.pp for the caller
4082 ia64_psr(regs
)->pp
= 1;
4085 * now update the local PMU and cpuinfo
4087 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP
);
4090 * start monitoring at kernel level
4095 ia64_setreg(_IA64_REG_CR_DCR
, ia64_getreg(_IA64_REG_CR_DCR
) | IA64_DCR_PP
);
4105 if (ctx
->ctx_task
== current
) {
4107 /* start monitoring at kernel level */
4111 * activate monitoring at user level
4113 ia64_psr(regs
)->up
= 1;
4116 tregs
= task_pt_regs(ctx
->ctx_task
);
4119 * start monitoring at the kernel level the next
4120 * time the task is scheduled
4122 ctx
->ctx_saved_psr_up
= IA64_PSR_UP
;
4125 * activate monitoring at user level
4127 ia64_psr(tregs
)->up
= 1;
4133 pfm_get_pmc_reset(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
4135 pfarg_reg_t
*req
= (pfarg_reg_t
*)arg
;
4140 for (i
= 0; i
< count
; i
++, req
++) {
4142 cnum
= req
->reg_num
;
4144 if (!PMC_IS_IMPL(cnum
)) goto abort_mission
;
4146 req
->reg_value
= PMC_DFL_VAL(cnum
);
4148 PFM_REG_RETFLAG_SET(req
->reg_flags
, 0);
4150 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum
, req
->reg_value
));
4155 PFM_REG_RETFLAG_SET(req
->reg_flags
, PFM_REG_RETFL_EINVAL
);
4160 pfm_check_task_exist(pfm_context_t
*ctx
)
4162 struct task_struct
*g
, *t
;
4165 read_lock(&tasklist_lock
);
4167 do_each_thread (g
, t
) {
4168 if (t
->thread
.pfm_context
== ctx
) {
4172 } while_each_thread (g
, t
);
4174 read_unlock(&tasklist_lock
);
4176 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret
, ctx
));
4182 pfm_context_load(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
4184 struct task_struct
*task
;
4185 struct thread_struct
*thread
;
4186 struct pfm_context_t
*old
;
4187 unsigned long flags
;
4189 struct task_struct
*owner_task
= NULL
;
4191 pfarg_load_t
*req
= (pfarg_load_t
*)arg
;
4192 unsigned long *pmcs_source
, *pmds_source
;
4195 int state
, is_system
, set_dbregs
= 0;
4197 state
= ctx
->ctx_state
;
4198 is_system
= ctx
->ctx_fl_system
;
4200 * can only load from unloaded or terminated state
4202 if (state
!= PFM_CTX_UNLOADED
) {
4203 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4209 DPRINT(("load_pid [%d] using_dbreg=%d\n", req
->load_pid
, ctx
->ctx_fl_using_dbreg
));
4211 if (CTX_OVFL_NOBLOCK(ctx
) == 0 && req
->load_pid
== current
->pid
) {
4212 DPRINT(("cannot use blocking mode on self\n"));
4216 ret
= pfm_get_task(ctx
, req
->load_pid
, &task
);
4218 DPRINT(("load_pid [%d] get_task=%d\n", req
->load_pid
, ret
));
4225 * system wide is self monitoring only
4227 if (is_system
&& task
!= current
) {
4228 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4233 thread
= &task
->thread
;
4237 * cannot load a context which is using range restrictions,
4238 * into a task that is being debugged.
4240 if (ctx
->ctx_fl_using_dbreg
) {
4241 if (thread
->flags
& IA64_THREAD_DBG_VALID
) {
4243 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req
->load_pid
));
4249 if (pfm_sessions
.pfs_ptrace_use_dbregs
) {
4250 DPRINT(("cannot load [%d] dbregs in use\n",
4251 task_pid_nr(task
)));
4254 pfm_sessions
.pfs_sys_use_dbregs
++;
4255 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task
), pfm_sessions
.pfs_sys_use_dbregs
));
4262 if (ret
) goto error
;
4266 * SMP system-wide monitoring implies self-monitoring.
4268 * The programming model expects the task to
4269 * be pinned on a CPU throughout the session.
4270 * Here we take note of the current CPU at the
4271 * time the context is loaded. No call from
4272 * another CPU will be allowed.
4274 * The pinning via shed_setaffinity()
4275 * must be done by the calling task prior
4278 * systemwide: keep track of CPU this session is supposed to run on
4280 the_cpu
= ctx
->ctx_cpu
= smp_processor_id();
4284 * now reserve the session
4286 ret
= pfm_reserve_session(current
, is_system
, the_cpu
);
4287 if (ret
) goto error
;
4290 * task is necessarily stopped at this point.
4292 * If the previous context was zombie, then it got removed in
4293 * pfm_save_regs(). Therefore we should not see it here.
4294 * If we see a context, then this is an active context
4296 * XXX: needs to be atomic
4298 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4299 thread
->pfm_context
, ctx
));
4302 old
= ia64_cmpxchg(acq
, &thread
->pfm_context
, NULL
, ctx
, sizeof(pfm_context_t
*));
4304 DPRINT(("load_pid [%d] already has a context\n", req
->load_pid
));
4308 pfm_reset_msgq(ctx
);
4310 ctx
->ctx_state
= PFM_CTX_LOADED
;
4313 * link context to task
4315 ctx
->ctx_task
= task
;
4319 * we load as stopped
4321 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE
);
4322 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP
);
4324 if (ctx
->ctx_fl_excl_idle
) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE
);
4326 thread
->flags
|= IA64_THREAD_PM_VALID
;
4330 * propagate into thread-state
4332 pfm_copy_pmds(task
, ctx
);
4333 pfm_copy_pmcs(task
, ctx
);
4335 pmcs_source
= ctx
->th_pmcs
;
4336 pmds_source
= ctx
->th_pmds
;
4339 * always the case for system-wide
4341 if (task
== current
) {
4343 if (is_system
== 0) {
4345 /* allow user level control */
4346 ia64_psr(regs
)->sp
= 0;
4347 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task
)));
4349 SET_LAST_CPU(ctx
, smp_processor_id());
4351 SET_ACTIVATION(ctx
);
4354 * push the other task out, if any
4356 owner_task
= GET_PMU_OWNER();
4357 if (owner_task
) pfm_lazy_save_regs(owner_task
);
4361 * load all PMD from ctx to PMU (as opposed to thread state)
4362 * restore all PMC from ctx to PMU
4364 pfm_restore_pmds(pmds_source
, ctx
->ctx_all_pmds
[0]);
4365 pfm_restore_pmcs(pmcs_source
, ctx
->ctx_all_pmcs
[0]);
4367 ctx
->ctx_reload_pmcs
[0] = 0UL;
4368 ctx
->ctx_reload_pmds
[0] = 0UL;
4371 * guaranteed safe by earlier check against DBG_VALID
4373 if (ctx
->ctx_fl_using_dbreg
) {
4374 pfm_restore_ibrs(ctx
->ctx_ibrs
, pmu_conf
->num_ibrs
);
4375 pfm_restore_dbrs(ctx
->ctx_dbrs
, pmu_conf
->num_dbrs
);
4380 SET_PMU_OWNER(task
, ctx
);
4382 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task
)));
4385 * when not current, task MUST be stopped, so this is safe
4387 regs
= task_pt_regs(task
);
4389 /* force a full reload */
4390 ctx
->ctx_last_activation
= PFM_INVALID_ACTIVATION
;
4391 SET_LAST_CPU(ctx
, -1);
4393 /* initial saved psr (stopped) */
4394 ctx
->ctx_saved_psr_up
= 0UL;
4395 ia64_psr(regs
)->up
= ia64_psr(regs
)->pp
= 0;
4401 if (ret
) pfm_unreserve_session(ctx
, ctx
->ctx_fl_system
, the_cpu
);
4404 * we must undo the dbregs setting (for system-wide)
4406 if (ret
&& set_dbregs
) {
4408 pfm_sessions
.pfs_sys_use_dbregs
--;
4412 * release task, there is now a link with the context
4414 if (is_system
== 0 && task
!= current
) {
4418 ret
= pfm_check_task_exist(ctx
);
4420 ctx
->ctx_state
= PFM_CTX_UNLOADED
;
4421 ctx
->ctx_task
= NULL
;
4429 * in this function, we do not need to increase the use count
4430 * for the task via get_task_struct(), because we hold the
4431 * context lock. If the task were to disappear while having
4432 * a context attached, it would go through pfm_exit_thread()
4433 * which also grabs the context lock and would therefore be blocked
4434 * until we are here.
4436 static void pfm_flush_pmds(struct task_struct
*, pfm_context_t
*ctx
);
4439 pfm_context_unload(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
)
4441 struct task_struct
*task
= PFM_CTX_TASK(ctx
);
4442 struct pt_regs
*tregs
;
4443 int prev_state
, is_system
;
4446 DPRINT(("ctx_state=%d task [%d]\n", ctx
->ctx_state
, task
? task_pid_nr(task
) : -1));
4448 prev_state
= ctx
->ctx_state
;
4449 is_system
= ctx
->ctx_fl_system
;
4452 * unload only when necessary
4454 if (prev_state
== PFM_CTX_UNLOADED
) {
4455 DPRINT(("ctx_state=%d, nothing to do\n", prev_state
));
4460 * clear psr and dcr bits
4462 ret
= pfm_stop(ctx
, NULL
, 0, regs
);
4463 if (ret
) return ret
;
4465 ctx
->ctx_state
= PFM_CTX_UNLOADED
;
4468 * in system mode, we need to update the PMU directly
4469 * and the user level state of the caller, which may not
4470 * necessarily be the creator of the context.
4477 * local PMU is taken care of in pfm_stop()
4479 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE
);
4480 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE
);
4483 * save PMDs in context
4486 pfm_flush_pmds(current
, ctx
);
4489 * at this point we are done with the PMU
4490 * so we can unreserve the resource.
4492 if (prev_state
!= PFM_CTX_ZOMBIE
)
4493 pfm_unreserve_session(ctx
, 1 , ctx
->ctx_cpu
);
4496 * disconnect context from task
4498 task
->thread
.pfm_context
= NULL
;
4500 * disconnect task from context
4502 ctx
->ctx_task
= NULL
;
4505 * There is nothing more to cleanup here.
4513 tregs
= task
== current
? regs
: task_pt_regs(task
);
4515 if (task
== current
) {
4517 * cancel user level control
4519 ia64_psr(regs
)->sp
= 1;
4521 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task
)));
4524 * save PMDs to context
4527 pfm_flush_pmds(task
, ctx
);
4530 * at this point we are done with the PMU
4531 * so we can unreserve the resource.
4533 * when state was ZOMBIE, we have already unreserved.
4535 if (prev_state
!= PFM_CTX_ZOMBIE
)
4536 pfm_unreserve_session(ctx
, 0 , ctx
->ctx_cpu
);
4539 * reset activation counter and psr
4541 ctx
->ctx_last_activation
= PFM_INVALID_ACTIVATION
;
4542 SET_LAST_CPU(ctx
, -1);
4545 * PMU state will not be restored
4547 task
->thread
.flags
&= ~IA64_THREAD_PM_VALID
;
4550 * break links between context and task
4552 task
->thread
.pfm_context
= NULL
;
4553 ctx
->ctx_task
= NULL
;
4555 PFM_SET_WORK_PENDING(task
, 0);
4557 ctx
->ctx_fl_trap_reason
= PFM_TRAP_REASON_NONE
;
4558 ctx
->ctx_fl_can_restart
= 0;
4559 ctx
->ctx_fl_going_zombie
= 0;
4561 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task
)));
4568 * called only from exit_thread(): task == current
4569 * we come here only if current has a context attached (loaded or masked)
4572 pfm_exit_thread(struct task_struct
*task
)
4575 unsigned long flags
;
4576 struct pt_regs
*regs
= task_pt_regs(task
);
4580 ctx
= PFM_GET_CTX(task
);
4582 PROTECT_CTX(ctx
, flags
);
4584 DPRINT(("state=%d task [%d]\n", ctx
->ctx_state
, task_pid_nr(task
)));
4586 state
= ctx
->ctx_state
;
4588 case PFM_CTX_UNLOADED
:
4590 * only comes to this function if pfm_context is not NULL, i.e., cannot
4591 * be in unloaded state
4593 printk(KERN_ERR
"perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task
));
4595 case PFM_CTX_LOADED
:
4596 case PFM_CTX_MASKED
:
4597 ret
= pfm_context_unload(ctx
, NULL
, 0, regs
);
4599 printk(KERN_ERR
"perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task
), state
, ret
);
4601 DPRINT(("ctx unloaded for current state was %d\n", state
));
4603 pfm_end_notify_user(ctx
);
4605 case PFM_CTX_ZOMBIE
:
4606 ret
= pfm_context_unload(ctx
, NULL
, 0, regs
);
4608 printk(KERN_ERR
"perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task
), state
, ret
);
4613 printk(KERN_ERR
"perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task
), state
);
4616 UNPROTECT_CTX(ctx
, flags
);
4618 { u64 psr
= pfm_get_psr();
4619 BUG_ON(psr
& (IA64_PSR_UP
|IA64_PSR_PP
));
4620 BUG_ON(GET_PMU_OWNER());
4621 BUG_ON(ia64_psr(regs
)->up
);
4622 BUG_ON(ia64_psr(regs
)->pp
);
4626 * All memory free operations (especially for vmalloc'ed memory)
4627 * MUST be done with interrupts ENABLED.
4629 if (free_ok
) pfm_context_free(ctx
);
4633 * functions MUST be listed in the increasing order of their index (see permfon.h)
4635 #define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4636 #define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4637 #define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4638 #define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4639 #define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4641 static pfm_cmd_desc_t pfm_cmd_tab
[]={
4642 /* 0 */PFM_CMD_NONE
,
4643 /* 1 */PFM_CMD(pfm_write_pmcs
, PFM_CMD_PCLRWS
, PFM_CMD_ARG_MANY
, pfarg_reg_t
, NULL
),
4644 /* 2 */PFM_CMD(pfm_write_pmds
, PFM_CMD_PCLRWS
, PFM_CMD_ARG_MANY
, pfarg_reg_t
, NULL
),
4645 /* 3 */PFM_CMD(pfm_read_pmds
, PFM_CMD_PCLRWS
, PFM_CMD_ARG_MANY
, pfarg_reg_t
, NULL
),
4646 /* 4 */PFM_CMD_S(pfm_stop
, PFM_CMD_PCLRWS
),
4647 /* 5 */PFM_CMD_S(pfm_start
, PFM_CMD_PCLRWS
),
4648 /* 6 */PFM_CMD_NONE
,
4649 /* 7 */PFM_CMD_NONE
,
4650 /* 8 */PFM_CMD(pfm_context_create
, PFM_CMD_ARG_RW
, 1, pfarg_context_t
, pfm_ctx_getsize
),
4651 /* 9 */PFM_CMD_NONE
,
4652 /* 10 */PFM_CMD_S(pfm_restart
, PFM_CMD_PCLRW
),
4653 /* 11 */PFM_CMD_NONE
,
4654 /* 12 */PFM_CMD(pfm_get_features
, PFM_CMD_ARG_RW
, 1, pfarg_features_t
, NULL
),
4655 /* 13 */PFM_CMD(pfm_debug
, 0, 1, unsigned int, NULL
),
4656 /* 14 */PFM_CMD_NONE
,
4657 /* 15 */PFM_CMD(pfm_get_pmc_reset
, PFM_CMD_ARG_RW
, PFM_CMD_ARG_MANY
, pfarg_reg_t
, NULL
),
4658 /* 16 */PFM_CMD(pfm_context_load
, PFM_CMD_PCLRWS
, 1, pfarg_load_t
, NULL
),
4659 /* 17 */PFM_CMD_S(pfm_context_unload
, PFM_CMD_PCLRWS
),
4660 /* 18 */PFM_CMD_NONE
,
4661 /* 19 */PFM_CMD_NONE
,
4662 /* 20 */PFM_CMD_NONE
,
4663 /* 21 */PFM_CMD_NONE
,
4664 /* 22 */PFM_CMD_NONE
,
4665 /* 23 */PFM_CMD_NONE
,
4666 /* 24 */PFM_CMD_NONE
,
4667 /* 25 */PFM_CMD_NONE
,
4668 /* 26 */PFM_CMD_NONE
,
4669 /* 27 */PFM_CMD_NONE
,
4670 /* 28 */PFM_CMD_NONE
,
4671 /* 29 */PFM_CMD_NONE
,
4672 /* 30 */PFM_CMD_NONE
,
4673 /* 31 */PFM_CMD_NONE
,
4674 /* 32 */PFM_CMD(pfm_write_ibrs
, PFM_CMD_PCLRWS
, PFM_CMD_ARG_MANY
, pfarg_dbreg_t
, NULL
),
4675 /* 33 */PFM_CMD(pfm_write_dbrs
, PFM_CMD_PCLRWS
, PFM_CMD_ARG_MANY
, pfarg_dbreg_t
, NULL
)
4677 #define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4680 pfm_check_task_state(pfm_context_t
*ctx
, int cmd
, unsigned long flags
)
4682 struct task_struct
*task
;
4683 int state
, old_state
;
4686 state
= ctx
->ctx_state
;
4687 task
= ctx
->ctx_task
;
4690 DPRINT(("context %d no task, state=%d\n", ctx
->ctx_fd
, state
));
4694 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4698 task
->state
, PFM_CMD_STOPPED(cmd
)));
4701 * self-monitoring always ok.
4703 * for system-wide the caller can either be the creator of the
4704 * context (to one to which the context is attached to) OR
4705 * a task running on the same CPU as the session.
4707 if (task
== current
|| ctx
->ctx_fl_system
) return 0;
4710 * we are monitoring another thread
4713 case PFM_CTX_UNLOADED
:
4715 * if context is UNLOADED we are safe to go
4718 case PFM_CTX_ZOMBIE
:
4720 * no command can operate on a zombie context
4722 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd
));
4724 case PFM_CTX_MASKED
:
4726 * PMU state has been saved to software even though
4727 * the thread may still be running.
4729 if (cmd
!= PFM_UNLOAD_CONTEXT
) return 0;
4733 * context is LOADED or MASKED. Some commands may need to have
4736 * We could lift this restriction for UP but it would mean that
4737 * the user has no guarantee the task would not run between
4738 * two successive calls to perfmonctl(). That's probably OK.
4739 * If this user wants to ensure the task does not run, then
4740 * the task must be stopped.
4742 if (PFM_CMD_STOPPED(cmd
)) {
4743 if (!task_is_stopped_or_traced(task
)) {
4744 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task
)));
4748 * task is now stopped, wait for ctxsw out
4750 * This is an interesting point in the code.
4751 * We need to unprotect the context because
4752 * the pfm_save_regs() routines needs to grab
4753 * the same lock. There are danger in doing
4754 * this because it leaves a window open for
4755 * another task to get access to the context
4756 * and possibly change its state. The one thing
4757 * that is not possible is for the context to disappear
4758 * because we are protected by the VFS layer, i.e.,
4759 * get_fd()/put_fd().
4763 UNPROTECT_CTX(ctx
, flags
);
4765 wait_task_inactive(task
, 0);
4767 PROTECT_CTX(ctx
, flags
);
4770 * we must recheck to verify if state has changed
4772 if (ctx
->ctx_state
!= old_state
) {
4773 DPRINT(("old_state=%d new_state=%d\n", old_state
, ctx
->ctx_state
));
4781 * system-call entry point (must return long)
4784 sys_perfmonctl (int fd
, int cmd
, void __user
*arg
, int count
)
4786 struct file
*file
= NULL
;
4787 pfm_context_t
*ctx
= NULL
;
4788 unsigned long flags
= 0UL;
4789 void *args_k
= NULL
;
4790 long ret
; /* will expand int return types */
4791 size_t base_sz
, sz
, xtra_sz
= 0;
4792 int narg
, completed_args
= 0, call_made
= 0, cmd_flags
;
4793 int (*func
)(pfm_context_t
*ctx
, void *arg
, int count
, struct pt_regs
*regs
);
4794 int (*getsize
)(void *arg
, size_t *sz
);
4795 #define PFM_MAX_ARGSIZE 4096
4798 * reject any call if perfmon was disabled at initialization
4800 if (unlikely(pmu_conf
== NULL
)) return -ENOSYS
;
4802 if (unlikely(cmd
< 0 || cmd
>= PFM_CMD_COUNT
)) {
4803 DPRINT(("invalid cmd=%d\n", cmd
));
4807 func
= pfm_cmd_tab
[cmd
].cmd_func
;
4808 narg
= pfm_cmd_tab
[cmd
].cmd_narg
;
4809 base_sz
= pfm_cmd_tab
[cmd
].cmd_argsize
;
4810 getsize
= pfm_cmd_tab
[cmd
].cmd_getsize
;
4811 cmd_flags
= pfm_cmd_tab
[cmd
].cmd_flags
;
4813 if (unlikely(func
== NULL
)) {
4814 DPRINT(("invalid cmd=%d\n", cmd
));
4818 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4826 * check if number of arguments matches what the command expects
4828 if (unlikely((narg
== PFM_CMD_ARG_MANY
&& count
<= 0) || (narg
> 0 && narg
!= count
)))
4832 sz
= xtra_sz
+ base_sz
*count
;
4834 * limit abuse to min page size
4836 if (unlikely(sz
> PFM_MAX_ARGSIZE
)) {
4837 printk(KERN_ERR
"perfmon: [%d] argument too big %lu\n", task_pid_nr(current
), sz
);
4842 * allocate default-sized argument buffer
4844 if (likely(count
&& args_k
== NULL
)) {
4845 args_k
= kmalloc(PFM_MAX_ARGSIZE
, GFP_KERNEL
);
4846 if (args_k
== NULL
) return -ENOMEM
;
4854 * assume sz = 0 for command without parameters
4856 if (sz
&& copy_from_user(args_k
, arg
, sz
)) {
4857 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz
, arg
));
4862 * check if command supports extra parameters
4864 if (completed_args
== 0 && getsize
) {
4866 * get extra parameters size (based on main argument)
4868 ret
= (*getsize
)(args_k
, &xtra_sz
);
4869 if (ret
) goto error_args
;
4873 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz
, xtra_sz
));
4875 /* retry if necessary */
4876 if (likely(xtra_sz
)) goto restart_args
;
4879 if (unlikely((cmd_flags
& PFM_CMD_FD
) == 0)) goto skip_fd
;
4884 if (unlikely(file
== NULL
)) {
4885 DPRINT(("invalid fd %d\n", fd
));
4888 if (unlikely(PFM_IS_FILE(file
) == 0)) {
4889 DPRINT(("fd %d not related to perfmon\n", fd
));
4893 ctx
= file
->private_data
;
4894 if (unlikely(ctx
== NULL
)) {
4895 DPRINT(("no context for fd %d\n", fd
));
4898 prefetch(&ctx
->ctx_state
);
4900 PROTECT_CTX(ctx
, flags
);
4903 * check task is stopped
4905 ret
= pfm_check_task_state(ctx
, cmd
, flags
);
4906 if (unlikely(ret
)) goto abort_locked
;
4909 ret
= (*func
)(ctx
, args_k
, count
, task_pt_regs(current
));
4915 DPRINT(("context unlocked\n"));
4916 UNPROTECT_CTX(ctx
, flags
);
4919 /* copy argument back to user, if needed */
4920 if (call_made
&& PFM_CMD_RW_ARG(cmd
) && copy_to_user(arg
, args_k
, base_sz
*count
)) ret
= -EFAULT
;
4928 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd
), ret
));
4934 pfm_resume_after_ovfl(pfm_context_t
*ctx
, unsigned long ovfl_regs
, struct pt_regs
*regs
)
4936 pfm_buffer_fmt_t
*fmt
= ctx
->ctx_buf_fmt
;
4937 pfm_ovfl_ctrl_t rst_ctrl
;
4941 state
= ctx
->ctx_state
;
4943 * Unlock sampling buffer and reset index atomically
4944 * XXX: not really needed when blocking
4946 if (CTX_HAS_SMPL(ctx
)) {
4948 rst_ctrl
.bits
.mask_monitoring
= 0;
4949 rst_ctrl
.bits
.reset_ovfl_pmds
= 0;
4951 if (state
== PFM_CTX_LOADED
)
4952 ret
= pfm_buf_fmt_restart_active(fmt
, current
, &rst_ctrl
, ctx
->ctx_smpl_hdr
, regs
);
4954 ret
= pfm_buf_fmt_restart(fmt
, current
, &rst_ctrl
, ctx
->ctx_smpl_hdr
, regs
);
4956 rst_ctrl
.bits
.mask_monitoring
= 0;
4957 rst_ctrl
.bits
.reset_ovfl_pmds
= 1;
4961 if (rst_ctrl
.bits
.reset_ovfl_pmds
) {
4962 pfm_reset_regs(ctx
, &ovfl_regs
, PFM_PMD_LONG_RESET
);
4964 if (rst_ctrl
.bits
.mask_monitoring
== 0) {
4965 DPRINT(("resuming monitoring\n"));
4966 if (ctx
->ctx_state
== PFM_CTX_MASKED
) pfm_restore_monitoring(current
);
4968 DPRINT(("stopping monitoring\n"));
4969 //pfm_stop_monitoring(current, regs);
4971 ctx
->ctx_state
= PFM_CTX_LOADED
;
4976 * context MUST BE LOCKED when calling
4977 * can only be called for current
4980 pfm_context_force_terminate(pfm_context_t
*ctx
, struct pt_regs
*regs
)
4984 DPRINT(("entering for [%d]\n", task_pid_nr(current
)));
4986 ret
= pfm_context_unload(ctx
, NULL
, 0, regs
);
4988 printk(KERN_ERR
"pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current
), ret
);
4992 * and wakeup controlling task, indicating we are now disconnected
4994 wake_up_interruptible(&ctx
->ctx_zombieq
);
4997 * given that context is still locked, the controlling
4998 * task will only get access when we return from
4999 * pfm_handle_work().
5003 static int pfm_ovfl_notify_user(pfm_context_t
*ctx
, unsigned long ovfl_pmds
);
5006 * pfm_handle_work() can be called with interrupts enabled
5007 * (TIF_NEED_RESCHED) or disabled. The down_interruptible
5008 * call may sleep, therefore we must re-enable interrupts
5009 * to avoid deadlocks. It is safe to do so because this function
5010 * is called ONLY when returning to user level (pUStk=1), in which case
5011 * there is no risk of kernel stack overflow due to deep
5012 * interrupt nesting.
5015 pfm_handle_work(void)
5018 struct pt_regs
*regs
;
5019 unsigned long flags
, dummy_flags
;
5020 unsigned long ovfl_regs
;
5021 unsigned int reason
;
5024 ctx
= PFM_GET_CTX(current
);
5026 printk(KERN_ERR
"perfmon: [%d] has no PFM context\n",
5027 task_pid_nr(current
));
5031 PROTECT_CTX(ctx
, flags
);
5033 PFM_SET_WORK_PENDING(current
, 0);
5035 regs
= task_pt_regs(current
);
5038 * extract reason for being here and clear
5040 reason
= ctx
->ctx_fl_trap_reason
;
5041 ctx
->ctx_fl_trap_reason
= PFM_TRAP_REASON_NONE
;
5042 ovfl_regs
= ctx
->ctx_ovfl_regs
[0];
5044 DPRINT(("reason=%d state=%d\n", reason
, ctx
->ctx_state
));
5047 * must be done before we check for simple-reset mode
5049 if (ctx
->ctx_fl_going_zombie
|| ctx
->ctx_state
== PFM_CTX_ZOMBIE
)
5052 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
5053 if (reason
== PFM_TRAP_REASON_RESET
)
5057 * restore interrupt mask to what it was on entry.
5058 * Could be enabled/diasbled.
5060 UNPROTECT_CTX(ctx
, flags
);
5063 * force interrupt enable because of down_interruptible()
5067 DPRINT(("before block sleeping\n"));
5070 * may go through without blocking on SMP systems
5071 * if restart has been received already by the time we call down()
5073 ret
= wait_for_completion_interruptible(&ctx
->ctx_restart_done
);
5075 DPRINT(("after block sleeping ret=%d\n", ret
));
5078 * lock context and mask interrupts again
5079 * We save flags into a dummy because we may have
5080 * altered interrupts mask compared to entry in this
5083 PROTECT_CTX(ctx
, dummy_flags
);
5086 * we need to read the ovfl_regs only after wake-up
5087 * because we may have had pfm_write_pmds() in between
5088 * and that can changed PMD values and therefore
5089 * ovfl_regs is reset for these new PMD values.
5091 ovfl_regs
= ctx
->ctx_ovfl_regs
[0];
5093 if (ctx
->ctx_fl_going_zombie
) {
5095 DPRINT(("context is zombie, bailing out\n"));
5096 pfm_context_force_terminate(ctx
, regs
);
5100 * in case of interruption of down() we don't restart anything
5106 pfm_resume_after_ovfl(ctx
, ovfl_regs
, regs
);
5107 ctx
->ctx_ovfl_regs
[0] = 0UL;
5111 * restore flags as they were upon entry
5113 UNPROTECT_CTX(ctx
, flags
);
5117 pfm_notify_user(pfm_context_t
*ctx
, pfm_msg_t
*msg
)
5119 if (ctx
->ctx_state
== PFM_CTX_ZOMBIE
) {
5120 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5124 DPRINT(("waking up somebody\n"));
5126 if (msg
) wake_up_interruptible(&ctx
->ctx_msgq_wait
);
5129 * safe, we are not in intr handler, nor in ctxsw when
5132 kill_fasync (&ctx
->ctx_async_queue
, SIGIO
, POLL_IN
);
5138 pfm_ovfl_notify_user(pfm_context_t
*ctx
, unsigned long ovfl_pmds
)
5140 pfm_msg_t
*msg
= NULL
;
5142 if (ctx
->ctx_fl_no_msg
== 0) {
5143 msg
= pfm_get_new_msg(ctx
);
5145 printk(KERN_ERR
"perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5149 msg
->pfm_ovfl_msg
.msg_type
= PFM_MSG_OVFL
;
5150 msg
->pfm_ovfl_msg
.msg_ctx_fd
= ctx
->ctx_fd
;
5151 msg
->pfm_ovfl_msg
.msg_active_set
= 0;
5152 msg
->pfm_ovfl_msg
.msg_ovfl_pmds
[0] = ovfl_pmds
;
5153 msg
->pfm_ovfl_msg
.msg_ovfl_pmds
[1] = 0UL;
5154 msg
->pfm_ovfl_msg
.msg_ovfl_pmds
[2] = 0UL;
5155 msg
->pfm_ovfl_msg
.msg_ovfl_pmds
[3] = 0UL;
5156 msg
->pfm_ovfl_msg
.msg_tstamp
= 0UL;
5159 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5165 return pfm_notify_user(ctx
, msg
);
5169 pfm_end_notify_user(pfm_context_t
*ctx
)
5173 msg
= pfm_get_new_msg(ctx
);
5175 printk(KERN_ERR
"perfmon: pfm_end_notify_user no more notification msgs\n");
5179 memset(msg
, 0, sizeof(*msg
));
5181 msg
->pfm_end_msg
.msg_type
= PFM_MSG_END
;
5182 msg
->pfm_end_msg
.msg_ctx_fd
= ctx
->ctx_fd
;
5183 msg
->pfm_ovfl_msg
.msg_tstamp
= 0UL;
5185 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5190 return pfm_notify_user(ctx
, msg
);
5194 * main overflow processing routine.
5195 * it can be called from the interrupt path or explicitly during the context switch code
5197 static void pfm_overflow_handler(struct task_struct
*task
, pfm_context_t
*ctx
,
5198 unsigned long pmc0
, struct pt_regs
*regs
)
5200 pfm_ovfl_arg_t
*ovfl_arg
;
5202 unsigned long old_val
, ovfl_val
, new_val
;
5203 unsigned long ovfl_notify
= 0UL, ovfl_pmds
= 0UL, smpl_pmds
= 0UL, reset_pmds
;
5204 unsigned long tstamp
;
5205 pfm_ovfl_ctrl_t ovfl_ctrl
;
5206 unsigned int i
, has_smpl
;
5207 int must_notify
= 0;
5209 if (unlikely(ctx
->ctx_state
== PFM_CTX_ZOMBIE
)) goto stop_monitoring
;
5212 * sanity test. Should never happen
5214 if (unlikely((pmc0
& 0x1) == 0)) goto sanity_check
;
5216 tstamp
= ia64_get_itc();
5217 mask
= pmc0
>> PMU_FIRST_COUNTER
;
5218 ovfl_val
= pmu_conf
->ovfl_val
;
5219 has_smpl
= CTX_HAS_SMPL(ctx
);
5221 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5222 "used_pmds=0x%lx\n",
5224 task
? task_pid_nr(task
): -1,
5225 (regs
? regs
->cr_iip
: 0),
5226 CTX_OVFL_NOBLOCK(ctx
) ? "nonblocking" : "blocking",
5227 ctx
->ctx_used_pmds
[0]));
5231 * first we update the virtual counters
5232 * assume there was a prior ia64_srlz_d() issued
5234 for (i
= PMU_FIRST_COUNTER
; mask
; i
++, mask
>>= 1) {
5236 /* skip pmd which did not overflow */
5237 if ((mask
& 0x1) == 0) continue;
5240 * Note that the pmd is not necessarily 0 at this point as qualified events
5241 * may have happened before the PMU was frozen. The residual count is not
5242 * taken into consideration here but will be with any read of the pmd via
5245 old_val
= new_val
= ctx
->ctx_pmds
[i
].val
;
5246 new_val
+= 1 + ovfl_val
;
5247 ctx
->ctx_pmds
[i
].val
= new_val
;
5250 * check for overflow condition
5252 if (likely(old_val
> new_val
)) {
5253 ovfl_pmds
|= 1UL << i
;
5254 if (PMC_OVFL_NOTIFY(ctx
, i
)) ovfl_notify
|= 1UL << i
;
5257 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5261 ia64_get_pmd(i
) & ovfl_val
,
5267 * there was no 64-bit overflow, nothing else to do
5269 if (ovfl_pmds
== 0UL) return;
5272 * reset all control bits
5278 * if a sampling format module exists, then we "cache" the overflow by
5279 * calling the module's handler() routine.
5282 unsigned long start_cycles
, end_cycles
;
5283 unsigned long pmd_mask
;
5285 int this_cpu
= smp_processor_id();
5287 pmd_mask
= ovfl_pmds
>> PMU_FIRST_COUNTER
;
5288 ovfl_arg
= &ctx
->ctx_ovfl_arg
;
5290 prefetch(ctx
->ctx_smpl_hdr
);
5292 for(i
=PMU_FIRST_COUNTER
; pmd_mask
&& ret
== 0; i
++, pmd_mask
>>=1) {
5296 if ((pmd_mask
& 0x1) == 0) continue;
5298 ovfl_arg
->ovfl_pmd
= (unsigned char )i
;
5299 ovfl_arg
->ovfl_notify
= ovfl_notify
& mask
? 1 : 0;
5300 ovfl_arg
->active_set
= 0;
5301 ovfl_arg
->ovfl_ctrl
.val
= 0; /* module must fill in all fields */
5302 ovfl_arg
->smpl_pmds
[0] = smpl_pmds
= ctx
->ctx_pmds
[i
].smpl_pmds
[0];
5304 ovfl_arg
->pmd_value
= ctx
->ctx_pmds
[i
].val
;
5305 ovfl_arg
->pmd_last_reset
= ctx
->ctx_pmds
[i
].lval
;
5306 ovfl_arg
->pmd_eventid
= ctx
->ctx_pmds
[i
].eventid
;
5309 * copy values of pmds of interest. Sampling format may copy them
5310 * into sampling buffer.
5313 for(j
=0, k
=0; smpl_pmds
; j
++, smpl_pmds
>>=1) {
5314 if ((smpl_pmds
& 0x1) == 0) continue;
5315 ovfl_arg
->smpl_pmds_values
[k
++] = PMD_IS_COUNTING(j
) ? pfm_read_soft_counter(ctx
, j
) : ia64_get_pmd(j
);
5316 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k
-1, j
, ovfl_arg
->smpl_pmds_values
[k
-1]));
5320 pfm_stats
[this_cpu
].pfm_smpl_handler_calls
++;
5322 start_cycles
= ia64_get_itc();
5325 * call custom buffer format record (handler) routine
5327 ret
= (*ctx
->ctx_buf_fmt
->fmt_handler
)(task
, ctx
->ctx_smpl_hdr
, ovfl_arg
, regs
, tstamp
);
5329 end_cycles
= ia64_get_itc();
5332 * For those controls, we take the union because they have
5333 * an all or nothing behavior.
5335 ovfl_ctrl
.bits
.notify_user
|= ovfl_arg
->ovfl_ctrl
.bits
.notify_user
;
5336 ovfl_ctrl
.bits
.block_task
|= ovfl_arg
->ovfl_ctrl
.bits
.block_task
;
5337 ovfl_ctrl
.bits
.mask_monitoring
|= ovfl_arg
->ovfl_ctrl
.bits
.mask_monitoring
;
5339 * build the bitmask of pmds to reset now
5341 if (ovfl_arg
->ovfl_ctrl
.bits
.reset_ovfl_pmds
) reset_pmds
|= mask
;
5343 pfm_stats
[this_cpu
].pfm_smpl_handler_cycles
+= end_cycles
- start_cycles
;
5346 * when the module cannot handle the rest of the overflows, we abort right here
5348 if (ret
&& pmd_mask
) {
5349 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5350 pmd_mask
<<PMU_FIRST_COUNTER
));
5353 * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
5355 ovfl_pmds
&= ~reset_pmds
;
5358 * when no sampling module is used, then the default
5359 * is to notify on overflow if requested by user
5361 ovfl_ctrl
.bits
.notify_user
= ovfl_notify
? 1 : 0;
5362 ovfl_ctrl
.bits
.block_task
= ovfl_notify
? 1 : 0;
5363 ovfl_ctrl
.bits
.mask_monitoring
= ovfl_notify
? 1 : 0; /* XXX: change for saturation */
5364 ovfl_ctrl
.bits
.reset_ovfl_pmds
= ovfl_notify
? 0 : 1;
5366 * if needed, we reset all overflowed pmds
5368 if (ovfl_notify
== 0) reset_pmds
= ovfl_pmds
;
5371 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds
, reset_pmds
));
5374 * reset the requested PMD registers using the short reset values
5377 unsigned long bm
= reset_pmds
;
5378 pfm_reset_regs(ctx
, &bm
, PFM_PMD_SHORT_RESET
);
5381 if (ovfl_notify
&& ovfl_ctrl
.bits
.notify_user
) {
5383 * keep track of what to reset when unblocking
5385 ctx
->ctx_ovfl_regs
[0] = ovfl_pmds
;
5388 * check for blocking context
5390 if (CTX_OVFL_NOBLOCK(ctx
) == 0 && ovfl_ctrl
.bits
.block_task
) {
5392 ctx
->ctx_fl_trap_reason
= PFM_TRAP_REASON_BLOCK
;
5395 * set the perfmon specific checking pending work for the task
5397 PFM_SET_WORK_PENDING(task
, 1);
5400 * when coming from ctxsw, current still points to the
5401 * previous task, therefore we must work with task and not current.
5403 set_notify_resume(task
);
5406 * defer until state is changed (shorten spin window). the context is locked
5407 * anyway, so the signal receiver would come spin for nothing.
5412 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5413 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
5414 PFM_GET_WORK_PENDING(task
),
5415 ctx
->ctx_fl_trap_reason
,
5418 ovfl_ctrl
.bits
.mask_monitoring
? 1 : 0));
5420 * in case monitoring must be stopped, we toggle the psr bits
5422 if (ovfl_ctrl
.bits
.mask_monitoring
) {
5423 pfm_mask_monitoring(task
);
5424 ctx
->ctx_state
= PFM_CTX_MASKED
;
5425 ctx
->ctx_fl_can_restart
= 1;
5429 * send notification now
5431 if (must_notify
) pfm_ovfl_notify_user(ctx
, ovfl_notify
);
5436 printk(KERN_ERR
"perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5438 task
? task_pid_nr(task
) : -1,
5444 * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
5445 * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
5446 * come here as zombie only if the task is the current task. In which case, we
5447 * can access the PMU hardware directly.
5449 * Note that zombies do have PM_VALID set. So here we do the minimal.
5451 * In case the context was zombified it could not be reclaimed at the time
5452 * the monitoring program exited. At this point, the PMU reservation has been
5453 * returned, the sampiing buffer has been freed. We must convert this call
5454 * into a spurious interrupt. However, we must also avoid infinite overflows
5455 * by stopping monitoring for this task. We can only come here for a per-task
5456 * context. All we need to do is to stop monitoring using the psr bits which
5457 * are always task private. By re-enabling secure montioring, we ensure that
5458 * the monitored task will not be able to re-activate monitoring.
5459 * The task will eventually be context switched out, at which point the context
5460 * will be reclaimed (that includes releasing ownership of the PMU).
5462 * So there might be a window of time where the number of per-task session is zero
5463 * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
5464 * context. This is safe because if a per-task session comes in, it will push this one
5465 * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
5466 * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
5467 * also push our zombie context out.
5469 * Overall pretty hairy stuff....
5471 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task
? task_pid_nr(task
): -1));
5473 ia64_psr(regs
)->up
= 0;
5474 ia64_psr(regs
)->sp
= 1;
5479 pfm_do_interrupt_handler(void *arg
, struct pt_regs
*regs
)
5481 struct task_struct
*task
;
5483 unsigned long flags
;
5485 int this_cpu
= smp_processor_id();
5488 pfm_stats
[this_cpu
].pfm_ovfl_intr_count
++;
5491 * srlz.d done before arriving here
5493 pmc0
= ia64_get_pmc(0);
5495 task
= GET_PMU_OWNER();
5496 ctx
= GET_PMU_CTX();
5499 * if we have some pending bits set
5500 * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
5502 if (PMC0_HAS_OVFL(pmc0
) && task
) {
5504 * we assume that pmc0.fr is always set here
5508 if (!ctx
) goto report_spurious1
;
5510 if (ctx
->ctx_fl_system
== 0 && (task
->thread
.flags
& IA64_THREAD_PM_VALID
) == 0)
5511 goto report_spurious2
;
5513 PROTECT_CTX_NOPRINT(ctx
, flags
);
5515 pfm_overflow_handler(task
, ctx
, pmc0
, regs
);
5517 UNPROTECT_CTX_NOPRINT(ctx
, flags
);
5520 pfm_stats
[this_cpu
].pfm_spurious_ovfl_intr_count
++;
5524 * keep it unfrozen at all times
5531 printk(KERN_INFO
"perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5532 this_cpu
, task_pid_nr(task
));
5536 printk(KERN_INFO
"perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5544 pfm_interrupt_handler(int irq
, void *arg
)
5546 unsigned long start_cycles
, total_cycles
;
5547 unsigned long min
, max
;
5550 struct pt_regs
*regs
= get_irq_regs();
5552 this_cpu
= get_cpu();
5553 if (likely(!pfm_alt_intr_handler
)) {
5554 min
= pfm_stats
[this_cpu
].pfm_ovfl_intr_cycles_min
;
5555 max
= pfm_stats
[this_cpu
].pfm_ovfl_intr_cycles_max
;
5557 start_cycles
= ia64_get_itc();
5559 ret
= pfm_do_interrupt_handler(arg
, regs
);
5561 total_cycles
= ia64_get_itc();
5564 * don't measure spurious interrupts
5566 if (likely(ret
== 0)) {
5567 total_cycles
-= start_cycles
;
5569 if (total_cycles
< min
) pfm_stats
[this_cpu
].pfm_ovfl_intr_cycles_min
= total_cycles
;
5570 if (total_cycles
> max
) pfm_stats
[this_cpu
].pfm_ovfl_intr_cycles_max
= total_cycles
;
5572 pfm_stats
[this_cpu
].pfm_ovfl_intr_cycles
+= total_cycles
;
5576 (*pfm_alt_intr_handler
->handler
)(irq
, arg
, regs
);
5584 * /proc/perfmon interface, for debug only
5587 #define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
5590 pfm_proc_start(struct seq_file
*m
, loff_t
*pos
)
5593 return PFM_PROC_SHOW_HEADER
;
5596 while (*pos
<= nr_cpu_ids
) {
5597 if (cpu_online(*pos
- 1)) {
5598 return (void *)*pos
;
5606 pfm_proc_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
5609 return pfm_proc_start(m
, pos
);
5613 pfm_proc_stop(struct seq_file
*m
, void *v
)
5618 pfm_proc_show_header(struct seq_file
*m
)
5620 struct list_head
* pos
;
5621 pfm_buffer_fmt_t
* entry
;
5622 unsigned long flags
;
5625 "perfmon version : %u.%u\n"
5628 "expert mode : %s\n"
5629 "ovfl_mask : 0x%lx\n"
5630 "PMU flags : 0x%x\n",
5631 PFM_VERSION_MAJ
, PFM_VERSION_MIN
,
5633 pfm_sysctl
.fastctxsw
> 0 ? "Yes": "No",
5634 pfm_sysctl
.expert_mode
> 0 ? "Yes": "No",
5641 "proc_sessions : %u\n"
5642 "sys_sessions : %u\n"
5643 "sys_use_dbregs : %u\n"
5644 "ptrace_use_dbregs : %u\n",
5645 pfm_sessions
.pfs_task_sessions
,
5646 pfm_sessions
.pfs_sys_sessions
,
5647 pfm_sessions
.pfs_sys_use_dbregs
,
5648 pfm_sessions
.pfs_ptrace_use_dbregs
);
5652 spin_lock(&pfm_buffer_fmt_lock
);
5654 list_for_each(pos
, &pfm_buffer_fmt_list
) {
5655 entry
= list_entry(pos
, pfm_buffer_fmt_t
, fmt_list
);
5656 seq_printf(m
, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
5667 entry
->fmt_uuid
[10],
5668 entry
->fmt_uuid
[11],
5669 entry
->fmt_uuid
[12],
5670 entry
->fmt_uuid
[13],
5671 entry
->fmt_uuid
[14],
5672 entry
->fmt_uuid
[15],
5675 spin_unlock(&pfm_buffer_fmt_lock
);
5680 pfm_proc_show(struct seq_file
*m
, void *v
)
5686 if (v
== PFM_PROC_SHOW_HEADER
) {
5687 pfm_proc_show_header(m
);
5691 /* show info for CPU (v - 1) */
5695 "CPU%-2d overflow intrs : %lu\n"
5696 "CPU%-2d overflow cycles : %lu\n"
5697 "CPU%-2d overflow min : %lu\n"
5698 "CPU%-2d overflow max : %lu\n"
5699 "CPU%-2d smpl handler calls : %lu\n"
5700 "CPU%-2d smpl handler cycles : %lu\n"
5701 "CPU%-2d spurious intrs : %lu\n"
5702 "CPU%-2d replay intrs : %lu\n"
5703 "CPU%-2d syst_wide : %d\n"
5704 "CPU%-2d dcr_pp : %d\n"
5705 "CPU%-2d exclude idle : %d\n"
5706 "CPU%-2d owner : %d\n"
5707 "CPU%-2d context : %p\n"
5708 "CPU%-2d activations : %lu\n",
5709 cpu
, pfm_stats
[cpu
].pfm_ovfl_intr_count
,
5710 cpu
, pfm_stats
[cpu
].pfm_ovfl_intr_cycles
,
5711 cpu
, pfm_stats
[cpu
].pfm_ovfl_intr_cycles_min
,
5712 cpu
, pfm_stats
[cpu
].pfm_ovfl_intr_cycles_max
,
5713 cpu
, pfm_stats
[cpu
].pfm_smpl_handler_calls
,
5714 cpu
, pfm_stats
[cpu
].pfm_smpl_handler_cycles
,
5715 cpu
, pfm_stats
[cpu
].pfm_spurious_ovfl_intr_count
,
5716 cpu
, pfm_stats
[cpu
].pfm_replay_ovfl_intr_count
,
5717 cpu
, pfm_get_cpu_data(pfm_syst_info
, cpu
) & PFM_CPUINFO_SYST_WIDE
? 1 : 0,
5718 cpu
, pfm_get_cpu_data(pfm_syst_info
, cpu
) & PFM_CPUINFO_DCR_PP
? 1 : 0,
5719 cpu
, pfm_get_cpu_data(pfm_syst_info
, cpu
) & PFM_CPUINFO_EXCL_IDLE
? 1 : 0,
5720 cpu
, pfm_get_cpu_data(pmu_owner
, cpu
) ? pfm_get_cpu_data(pmu_owner
, cpu
)->pid
: -1,
5721 cpu
, pfm_get_cpu_data(pmu_ctx
, cpu
),
5722 cpu
, pfm_get_cpu_data(pmu_activation_number
, cpu
));
5724 if (num_online_cpus() == 1 && pfm_sysctl
.debug
> 0) {
5726 psr
= pfm_get_psr();
5731 "CPU%-2d psr : 0x%lx\n"
5732 "CPU%-2d pmc0 : 0x%lx\n",
5734 cpu
, ia64_get_pmc(0));
5736 for (i
=0; PMC_IS_LAST(i
) == 0; i
++) {
5737 if (PMC_IS_COUNTING(i
) == 0) continue;
5739 "CPU%-2d pmc%u : 0x%lx\n"
5740 "CPU%-2d pmd%u : 0x%lx\n",
5741 cpu
, i
, ia64_get_pmc(i
),
5742 cpu
, i
, ia64_get_pmd(i
));
5748 const struct seq_operations pfm_seq_ops
= {
5749 .start
= pfm_proc_start
,
5750 .next
= pfm_proc_next
,
5751 .stop
= pfm_proc_stop
,
5752 .show
= pfm_proc_show
5756 pfm_proc_open(struct inode
*inode
, struct file
*file
)
5758 return seq_open(file
, &pfm_seq_ops
);
5763 * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
5764 * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
5765 * is active or inactive based on mode. We must rely on the value in
5766 * local_cpu_data->pfm_syst_info
5769 pfm_syst_wide_update_task(struct task_struct
*task
, unsigned long info
, int is_ctxswin
)
5771 struct pt_regs
*regs
;
5773 unsigned long dcr_pp
;
5775 dcr_pp
= info
& PFM_CPUINFO_DCR_PP
? 1 : 0;
5778 * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
5779 * on every CPU, so we can rely on the pid to identify the idle task.
5781 if ((info
& PFM_CPUINFO_EXCL_IDLE
) == 0 || task
->pid
) {
5782 regs
= task_pt_regs(task
);
5783 ia64_psr(regs
)->pp
= is_ctxswin
? dcr_pp
: 0;
5787 * if monitoring has started
5790 dcr
= ia64_getreg(_IA64_REG_CR_DCR
);
5792 * context switching in?
5795 /* mask monitoring for the idle task */
5796 ia64_setreg(_IA64_REG_CR_DCR
, dcr
& ~IA64_DCR_PP
);
5802 * context switching out
5803 * restore monitoring for next task
5805 * Due to inlining this odd if-then-else construction generates
5808 ia64_setreg(_IA64_REG_CR_DCR
, dcr
|IA64_DCR_PP
);
5817 pfm_force_cleanup(pfm_context_t
*ctx
, struct pt_regs
*regs
)
5819 struct task_struct
*task
= ctx
->ctx_task
;
5821 ia64_psr(regs
)->up
= 0;
5822 ia64_psr(regs
)->sp
= 1;
5824 if (GET_PMU_OWNER() == task
) {
5825 DPRINT(("cleared ownership for [%d]\n",
5826 task_pid_nr(ctx
->ctx_task
)));
5827 SET_PMU_OWNER(NULL
, NULL
);
5831 * disconnect the task from the context and vice-versa
5833 PFM_SET_WORK_PENDING(task
, 0);
5835 task
->thread
.pfm_context
= NULL
;
5836 task
->thread
.flags
&= ~IA64_THREAD_PM_VALID
;
5838 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task
)));
5843 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
5846 pfm_save_regs(struct task_struct
*task
)
5849 unsigned long flags
;
5853 ctx
= PFM_GET_CTX(task
);
5854 if (ctx
== NULL
) return;
5857 * we always come here with interrupts ALREADY disabled by
5858 * the scheduler. So we simply need to protect against concurrent
5859 * access, not CPU concurrency.
5861 flags
= pfm_protect_ctx_ctxsw(ctx
);
5863 if (ctx
->ctx_state
== PFM_CTX_ZOMBIE
) {
5864 struct pt_regs
*regs
= task_pt_regs(task
);
5868 pfm_force_cleanup(ctx
, regs
);
5870 BUG_ON(ctx
->ctx_smpl_hdr
);
5872 pfm_unprotect_ctx_ctxsw(ctx
, flags
);
5874 pfm_context_free(ctx
);
5879 * save current PSR: needed because we modify it
5882 psr
= pfm_get_psr();
5884 BUG_ON(psr
& (IA64_PSR_I
));
5888 * This is the last instruction which may generate an overflow
5890 * We do not need to set psr.sp because, it is irrelevant in kernel.
5891 * It will be restored from ipsr when going back to user level
5896 * keep a copy of psr.up (for reload)
5898 ctx
->ctx_saved_psr_up
= psr
& IA64_PSR_UP
;
5901 * release ownership of this PMU.
5902 * PM interrupts are masked, so nothing
5905 SET_PMU_OWNER(NULL
, NULL
);
5908 * we systematically save the PMD as we have no
5909 * guarantee we will be schedule at that same
5912 pfm_save_pmds(ctx
->th_pmds
, ctx
->ctx_used_pmds
[0]);
5915 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5916 * we will need it on the restore path to check
5917 * for pending overflow.
5919 ctx
->th_pmcs
[0] = ia64_get_pmc(0);
5922 * unfreeze PMU if had pending overflows
5924 if (ctx
->th_pmcs
[0] & ~0x1UL
) pfm_unfreeze_pmu();
5927 * finally, allow context access.
5928 * interrupts will still be masked after this call.
5930 pfm_unprotect_ctx_ctxsw(ctx
, flags
);
5933 #else /* !CONFIG_SMP */
5935 pfm_save_regs(struct task_struct
*task
)
5940 ctx
= PFM_GET_CTX(task
);
5941 if (ctx
== NULL
) return;
5944 * save current PSR: needed because we modify it
5946 psr
= pfm_get_psr();
5948 BUG_ON(psr
& (IA64_PSR_I
));
5952 * This is the last instruction which may generate an overflow
5954 * We do not need to set psr.sp because, it is irrelevant in kernel.
5955 * It will be restored from ipsr when going back to user level
5960 * keep a copy of psr.up (for reload)
5962 ctx
->ctx_saved_psr_up
= psr
& IA64_PSR_UP
;
5966 pfm_lazy_save_regs (struct task_struct
*task
)
5969 unsigned long flags
;
5971 { u64 psr
= pfm_get_psr();
5972 BUG_ON(psr
& IA64_PSR_UP
);
5975 ctx
= PFM_GET_CTX(task
);
5978 * we need to mask PMU overflow here to
5979 * make sure that we maintain pmc0 until
5980 * we save it. overflow interrupts are
5981 * treated as spurious if there is no
5984 * XXX: I don't think this is necessary
5986 PROTECT_CTX(ctx
,flags
);
5989 * release ownership of this PMU.
5990 * must be done before we save the registers.
5992 * after this call any PMU interrupt is treated
5995 SET_PMU_OWNER(NULL
, NULL
);
5998 * save all the pmds we use
6000 pfm_save_pmds(ctx
->th_pmds
, ctx
->ctx_used_pmds
[0]);
6003 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
6004 * it is needed to check for pended overflow
6005 * on the restore path
6007 ctx
->th_pmcs
[0] = ia64_get_pmc(0);
6010 * unfreeze PMU if had pending overflows
6012 if (ctx
->th_pmcs
[0] & ~0x1UL
) pfm_unfreeze_pmu();
6015 * now get can unmask PMU interrupts, they will
6016 * be treated as purely spurious and we will not
6017 * lose any information
6019 UNPROTECT_CTX(ctx
,flags
);
6021 #endif /* CONFIG_SMP */
6025 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
6028 pfm_load_regs (struct task_struct
*task
)
6031 unsigned long pmc_mask
= 0UL, pmd_mask
= 0UL;
6032 unsigned long flags
;
6034 int need_irq_resend
;
6036 ctx
= PFM_GET_CTX(task
);
6037 if (unlikely(ctx
== NULL
)) return;
6039 BUG_ON(GET_PMU_OWNER());
6042 * possible on unload
6044 if (unlikely((task
->thread
.flags
& IA64_THREAD_PM_VALID
) == 0)) return;
6047 * we always come here with interrupts ALREADY disabled by
6048 * the scheduler. So we simply need to protect against concurrent
6049 * access, not CPU concurrency.
6051 flags
= pfm_protect_ctx_ctxsw(ctx
);
6052 psr
= pfm_get_psr();
6054 need_irq_resend
= pmu_conf
->flags
& PFM_PMU_IRQ_RESEND
;
6056 BUG_ON(psr
& (IA64_PSR_UP
|IA64_PSR_PP
));
6057 BUG_ON(psr
& IA64_PSR_I
);
6059 if (unlikely(ctx
->ctx_state
== PFM_CTX_ZOMBIE
)) {
6060 struct pt_regs
*regs
= task_pt_regs(task
);
6062 BUG_ON(ctx
->ctx_smpl_hdr
);
6064 pfm_force_cleanup(ctx
, regs
);
6066 pfm_unprotect_ctx_ctxsw(ctx
, flags
);
6069 * this one (kmalloc'ed) is fine with interrupts disabled
6071 pfm_context_free(ctx
);
6077 * we restore ALL the debug registers to avoid picking up
6080 if (ctx
->ctx_fl_using_dbreg
) {
6081 pfm_restore_ibrs(ctx
->ctx_ibrs
, pmu_conf
->num_ibrs
);
6082 pfm_restore_dbrs(ctx
->ctx_dbrs
, pmu_conf
->num_dbrs
);
6085 * retrieve saved psr.up
6087 psr_up
= ctx
->ctx_saved_psr_up
;
6090 * if we were the last user of the PMU on that CPU,
6091 * then nothing to do except restore psr
6093 if (GET_LAST_CPU(ctx
) == smp_processor_id() && ctx
->ctx_last_activation
== GET_ACTIVATION()) {
6096 * retrieve partial reload masks (due to user modifications)
6098 pmc_mask
= ctx
->ctx_reload_pmcs
[0];
6099 pmd_mask
= ctx
->ctx_reload_pmds
[0];
6103 * To avoid leaking information to the user level when psr.sp=0,
6104 * we must reload ALL implemented pmds (even the ones we don't use).
6105 * In the kernel we only allow PFM_READ_PMDS on registers which
6106 * we initialized or requested (sampling) so there is no risk there.
6108 pmd_mask
= pfm_sysctl
.fastctxsw
? ctx
->ctx_used_pmds
[0] : ctx
->ctx_all_pmds
[0];
6111 * ALL accessible PMCs are systematically reloaded, unused registers
6112 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6113 * up stale configuration.
6115 * PMC0 is never in the mask. It is always restored separately.
6117 pmc_mask
= ctx
->ctx_all_pmcs
[0];
6120 * when context is MASKED, we will restore PMC with plm=0
6121 * and PMD with stale information, but that's ok, nothing
6124 * XXX: optimize here
6126 if (pmd_mask
) pfm_restore_pmds(ctx
->th_pmds
, pmd_mask
);
6127 if (pmc_mask
) pfm_restore_pmcs(ctx
->th_pmcs
, pmc_mask
);
6130 * check for pending overflow at the time the state
6133 if (unlikely(PMC0_HAS_OVFL(ctx
->th_pmcs
[0]))) {
6135 * reload pmc0 with the overflow information
6136 * On McKinley PMU, this will trigger a PMU interrupt
6138 ia64_set_pmc(0, ctx
->th_pmcs
[0]);
6140 ctx
->th_pmcs
[0] = 0UL;
6143 * will replay the PMU interrupt
6145 if (need_irq_resend
) ia64_resend_irq(IA64_PERFMON_VECTOR
);
6147 pfm_stats
[smp_processor_id()].pfm_replay_ovfl_intr_count
++;
6151 * we just did a reload, so we reset the partial reload fields
6153 ctx
->ctx_reload_pmcs
[0] = 0UL;
6154 ctx
->ctx_reload_pmds
[0] = 0UL;
6156 SET_LAST_CPU(ctx
, smp_processor_id());
6159 * dump activation value for this PMU
6163 * record current activation for this context
6165 SET_ACTIVATION(ctx
);
6168 * establish new ownership.
6170 SET_PMU_OWNER(task
, ctx
);
6173 * restore the psr.up bit. measurement
6175 * no PMU interrupt can happen at this point
6176 * because we still have interrupts disabled.
6178 if (likely(psr_up
)) pfm_set_psr_up();
6181 * allow concurrent access to context
6183 pfm_unprotect_ctx_ctxsw(ctx
, flags
);
6185 #else /* !CONFIG_SMP */
6187 * reload PMU state for UP kernels
6188 * in 2.5 we come here with interrupts disabled
6191 pfm_load_regs (struct task_struct
*task
)
6194 struct task_struct
*owner
;
6195 unsigned long pmd_mask
, pmc_mask
;
6197 int need_irq_resend
;
6199 owner
= GET_PMU_OWNER();
6200 ctx
= PFM_GET_CTX(task
);
6201 psr
= pfm_get_psr();
6203 BUG_ON(psr
& (IA64_PSR_UP
|IA64_PSR_PP
));
6204 BUG_ON(psr
& IA64_PSR_I
);
6207 * we restore ALL the debug registers to avoid picking up
6210 * This must be done even when the task is still the owner
6211 * as the registers may have been modified via ptrace()
6212 * (not perfmon) by the previous task.
6214 if (ctx
->ctx_fl_using_dbreg
) {
6215 pfm_restore_ibrs(ctx
->ctx_ibrs
, pmu_conf
->num_ibrs
);
6216 pfm_restore_dbrs(ctx
->ctx_dbrs
, pmu_conf
->num_dbrs
);
6220 * retrieved saved psr.up
6222 psr_up
= ctx
->ctx_saved_psr_up
;
6223 need_irq_resend
= pmu_conf
->flags
& PFM_PMU_IRQ_RESEND
;
6226 * short path, our state is still there, just
6227 * need to restore psr and we go
6229 * we do not touch either PMC nor PMD. the psr is not touched
6230 * by the overflow_handler. So we are safe w.r.t. to interrupt
6231 * concurrency even without interrupt masking.
6233 if (likely(owner
== task
)) {
6234 if (likely(psr_up
)) pfm_set_psr_up();
6239 * someone else is still using the PMU, first push it out and
6240 * then we'll be able to install our stuff !
6242 * Upon return, there will be no owner for the current PMU
6244 if (owner
) pfm_lazy_save_regs(owner
);
6247 * To avoid leaking information to the user level when psr.sp=0,
6248 * we must reload ALL implemented pmds (even the ones we don't use).
6249 * In the kernel we only allow PFM_READ_PMDS on registers which
6250 * we initialized or requested (sampling) so there is no risk there.
6252 pmd_mask
= pfm_sysctl
.fastctxsw
? ctx
->ctx_used_pmds
[0] : ctx
->ctx_all_pmds
[0];
6255 * ALL accessible PMCs are systematically reloaded, unused registers
6256 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6257 * up stale configuration.
6259 * PMC0 is never in the mask. It is always restored separately
6261 pmc_mask
= ctx
->ctx_all_pmcs
[0];
6263 pfm_restore_pmds(ctx
->th_pmds
, pmd_mask
);
6264 pfm_restore_pmcs(ctx
->th_pmcs
, pmc_mask
);
6267 * check for pending overflow at the time the state
6270 if (unlikely(PMC0_HAS_OVFL(ctx
->th_pmcs
[0]))) {
6272 * reload pmc0 with the overflow information
6273 * On McKinley PMU, this will trigger a PMU interrupt
6275 ia64_set_pmc(0, ctx
->th_pmcs
[0]);
6278 ctx
->th_pmcs
[0] = 0UL;
6281 * will replay the PMU interrupt
6283 if (need_irq_resend
) ia64_resend_irq(IA64_PERFMON_VECTOR
);
6285 pfm_stats
[smp_processor_id()].pfm_replay_ovfl_intr_count
++;
6289 * establish new ownership.
6291 SET_PMU_OWNER(task
, ctx
);
6294 * restore the psr.up bit. measurement
6296 * no PMU interrupt can happen at this point
6297 * because we still have interrupts disabled.
6299 if (likely(psr_up
)) pfm_set_psr_up();
6301 #endif /* CONFIG_SMP */
6304 * this function assumes monitoring is stopped
6307 pfm_flush_pmds(struct task_struct
*task
, pfm_context_t
*ctx
)
6310 unsigned long mask2
, val
, pmd_val
, ovfl_val
;
6311 int i
, can_access_pmu
= 0;
6315 * is the caller the task being monitored (or which initiated the
6316 * session for system wide measurements)
6318 is_self
= ctx
->ctx_task
== task
? 1 : 0;
6321 * can access PMU is task is the owner of the PMU state on the current CPU
6322 * or if we are running on the CPU bound to the context in system-wide mode
6323 * (that is not necessarily the task the context is attached to in this mode).
6324 * In system-wide we always have can_access_pmu true because a task running on an
6325 * invalid processor is flagged earlier in the call stack (see pfm_stop).
6327 can_access_pmu
= (GET_PMU_OWNER() == task
) || (ctx
->ctx_fl_system
&& ctx
->ctx_cpu
== smp_processor_id());
6328 if (can_access_pmu
) {
6330 * Mark the PMU as not owned
6331 * This will cause the interrupt handler to do nothing in case an overflow
6332 * interrupt was in-flight
6333 * This also guarantees that pmc0 will contain the final state
6334 * It virtually gives us full control on overflow processing from that point
6337 SET_PMU_OWNER(NULL
, NULL
);
6338 DPRINT(("releasing ownership\n"));
6341 * read current overflow status:
6343 * we are guaranteed to read the final stable state
6346 pmc0
= ia64_get_pmc(0); /* slow */
6349 * reset freeze bit, overflow status information destroyed
6353 pmc0
= ctx
->th_pmcs
[0];
6355 * clear whatever overflow status bits there were
6357 ctx
->th_pmcs
[0] = 0;
6359 ovfl_val
= pmu_conf
->ovfl_val
;
6361 * we save all the used pmds
6362 * we take care of overflows for counting PMDs
6364 * XXX: sampling situation is not taken into account here
6366 mask2
= ctx
->ctx_used_pmds
[0];
6368 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self
, ovfl_val
, mask2
));
6370 for (i
= 0; mask2
; i
++, mask2
>>=1) {
6372 /* skip non used pmds */
6373 if ((mask2
& 0x1) == 0) continue;
6376 * can access PMU always true in system wide mode
6378 val
= pmd_val
= can_access_pmu
? ia64_get_pmd(i
) : ctx
->th_pmds
[i
];
6380 if (PMD_IS_COUNTING(i
)) {
6381 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6384 ctx
->ctx_pmds
[i
].val
,
6388 * we rebuild the full 64 bit value of the counter
6390 val
= ctx
->ctx_pmds
[i
].val
+ (val
& ovfl_val
);
6393 * now everything is in ctx_pmds[] and we need
6394 * to clear the saved context from save_regs() such that
6395 * pfm_read_pmds() gets the correct value
6400 * take care of overflow inline
6402 if (pmc0
& (1UL << i
)) {
6403 val
+= 1 + ovfl_val
;
6404 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task
), i
));
6408 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task
), i
, val
, pmd_val
));
6410 if (is_self
) ctx
->th_pmds
[i
] = pmd_val
;
6412 ctx
->ctx_pmds
[i
].val
= val
;
6416 static struct irqaction perfmon_irqaction
= {
6417 .handler
= pfm_interrupt_handler
,
6418 .flags
= IRQF_DISABLED
,
6423 pfm_alt_save_pmu_state(void *data
)
6425 struct pt_regs
*regs
;
6427 regs
= task_pt_regs(current
);
6429 DPRINT(("called\n"));
6432 * should not be necessary but
6433 * let's take not risk
6437 ia64_psr(regs
)->pp
= 0;
6440 * This call is required
6441 * May cause a spurious interrupt on some processors
6449 pfm_alt_restore_pmu_state(void *data
)
6451 struct pt_regs
*regs
;
6453 regs
= task_pt_regs(current
);
6455 DPRINT(("called\n"));
6458 * put PMU back in state expected
6463 ia64_psr(regs
)->pp
= 0;
6466 * perfmon runs with PMU unfrozen at all times
6474 pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t
*hdl
)
6479 /* some sanity checks */
6480 if (hdl
== NULL
|| hdl
->handler
== NULL
) return -EINVAL
;
6482 /* do the easy test first */
6483 if (pfm_alt_intr_handler
) return -EBUSY
;
6485 /* one at a time in the install or remove, just fail the others */
6486 if (!spin_trylock(&pfm_alt_install_check
)) {
6490 /* reserve our session */
6491 for_each_online_cpu(reserve_cpu
) {
6492 ret
= pfm_reserve_session(NULL
, 1, reserve_cpu
);
6493 if (ret
) goto cleanup_reserve
;
6496 /* save the current system wide pmu states */
6497 ret
= on_each_cpu(pfm_alt_save_pmu_state
, NULL
, 1);
6499 DPRINT(("on_each_cpu() failed: %d\n", ret
));
6500 goto cleanup_reserve
;
6503 /* officially change to the alternate interrupt handler */
6504 pfm_alt_intr_handler
= hdl
;
6506 spin_unlock(&pfm_alt_install_check
);
6511 for_each_online_cpu(i
) {
6512 /* don't unreserve more than we reserved */
6513 if (i
>= reserve_cpu
) break;
6515 pfm_unreserve_session(NULL
, 1, i
);
6518 spin_unlock(&pfm_alt_install_check
);
6522 EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt
);
6525 pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t
*hdl
)
6530 if (hdl
== NULL
) return -EINVAL
;
6532 /* cannot remove someone else's handler! */
6533 if (pfm_alt_intr_handler
!= hdl
) return -EINVAL
;
6535 /* one at a time in the install or remove, just fail the others */
6536 if (!spin_trylock(&pfm_alt_install_check
)) {
6540 pfm_alt_intr_handler
= NULL
;
6542 ret
= on_each_cpu(pfm_alt_restore_pmu_state
, NULL
, 1);
6544 DPRINT(("on_each_cpu() failed: %d\n", ret
));
6547 for_each_online_cpu(i
) {
6548 pfm_unreserve_session(NULL
, 1, i
);
6551 spin_unlock(&pfm_alt_install_check
);
6555 EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt
);
6558 * perfmon initialization routine, called from the initcall() table
6560 static int init_pfm_fs(void);
6568 family
= local_cpu_data
->family
;
6573 if ((*p
)->probe() == 0) goto found
;
6574 } else if ((*p
)->pmu_family
== family
|| (*p
)->pmu_family
== 0xff) {
6585 static const struct file_operations pfm_proc_fops
= {
6586 .open
= pfm_proc_open
,
6588 .llseek
= seq_lseek
,
6589 .release
= seq_release
,
6595 unsigned int n
, n_counters
, i
;
6597 printk("perfmon: version %u.%u IRQ %u\n",
6600 IA64_PERFMON_VECTOR
);
6602 if (pfm_probe_pmu()) {
6603 printk(KERN_INFO
"perfmon: disabled, there is no support for processor family %d\n",
6604 local_cpu_data
->family
);
6609 * compute the number of implemented PMD/PMC from the
6610 * description tables
6613 for (i
=0; PMC_IS_LAST(i
) == 0; i
++) {
6614 if (PMC_IS_IMPL(i
) == 0) continue;
6615 pmu_conf
->impl_pmcs
[i
>>6] |= 1UL << (i
&63);
6618 pmu_conf
->num_pmcs
= n
;
6620 n
= 0; n_counters
= 0;
6621 for (i
=0; PMD_IS_LAST(i
) == 0; i
++) {
6622 if (PMD_IS_IMPL(i
) == 0) continue;
6623 pmu_conf
->impl_pmds
[i
>>6] |= 1UL << (i
&63);
6625 if (PMD_IS_COUNTING(i
)) n_counters
++;
6627 pmu_conf
->num_pmds
= n
;
6628 pmu_conf
->num_counters
= n_counters
;
6631 * sanity checks on the number of debug registers
6633 if (pmu_conf
->use_rr_dbregs
) {
6634 if (pmu_conf
->num_ibrs
> IA64_NUM_DBG_REGS
) {
6635 printk(KERN_INFO
"perfmon: unsupported number of code debug registers (%u)\n", pmu_conf
->num_ibrs
);
6639 if (pmu_conf
->num_dbrs
> IA64_NUM_DBG_REGS
) {
6640 printk(KERN_INFO
"perfmon: unsupported number of data debug registers (%u)\n", pmu_conf
->num_ibrs
);
6646 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6650 pmu_conf
->num_counters
,
6651 ffz(pmu_conf
->ovfl_val
));
6654 if (pmu_conf
->num_pmds
>= PFM_NUM_PMD_REGS
|| pmu_conf
->num_pmcs
>= PFM_NUM_PMC_REGS
) {
6655 printk(KERN_ERR
"perfmon: not enough pmc/pmd, perfmon disabled\n");
6661 * create /proc/perfmon (mostly for debugging purposes)
6663 perfmon_dir
= proc_create("perfmon", S_IRUGO
, NULL
, &pfm_proc_fops
);
6664 if (perfmon_dir
== NULL
) {
6665 printk(KERN_ERR
"perfmon: cannot create /proc entry, perfmon disabled\n");
6671 * create /proc/sys/kernel/perfmon (for debugging purposes)
6673 pfm_sysctl_header
= register_sysctl_table(pfm_sysctl_root
);
6676 * initialize all our spinlocks
6678 spin_lock_init(&pfm_sessions
.pfs_lock
);
6679 spin_lock_init(&pfm_buffer_fmt_lock
);
6683 for(i
=0; i
< NR_CPUS
; i
++) pfm_stats
[i
].pfm_ovfl_intr_cycles_min
= ~0UL;
6688 __initcall(pfm_init
);
6691 * this function is called before pfm_init()
6694 pfm_init_percpu (void)
6696 static int first_time
=1;
6698 * make sure no measurement is active
6699 * (may inherit programmed PMCs from EFI).
6705 * we run with the PMU not frozen at all times
6710 register_percpu_irq(IA64_PERFMON_VECTOR
, &perfmon_irqaction
);
6714 ia64_setreg(_IA64_REG_CR_PMV
, IA64_PERFMON_VECTOR
);
6719 * used for debug purposes only
6722 dump_pmu_state(const char *from
)
6724 struct task_struct
*task
;
6725 struct pt_regs
*regs
;
6727 unsigned long psr
, dcr
, info
, flags
;
6730 local_irq_save(flags
);
6732 this_cpu
= smp_processor_id();
6733 regs
= task_pt_regs(current
);
6734 info
= PFM_CPUINFO_GET();
6735 dcr
= ia64_getreg(_IA64_REG_CR_DCR
);
6737 if (info
== 0 && ia64_psr(regs
)->pp
== 0 && (dcr
& IA64_DCR_PP
) == 0) {
6738 local_irq_restore(flags
);
6742 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6745 task_pid_nr(current
),
6749 task
= GET_PMU_OWNER();
6750 ctx
= GET_PMU_CTX();
6752 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu
, task
? task_pid_nr(task
) : -1, ctx
);
6754 psr
= pfm_get_psr();
6756 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6759 psr
& IA64_PSR_PP
? 1 : 0,
6760 psr
& IA64_PSR_UP
? 1 : 0,
6761 dcr
& IA64_DCR_PP
? 1 : 0,
6764 ia64_psr(regs
)->pp
);
6766 ia64_psr(regs
)->up
= 0;
6767 ia64_psr(regs
)->pp
= 0;
6769 for (i
=1; PMC_IS_LAST(i
) == 0; i
++) {
6770 if (PMC_IS_IMPL(i
) == 0) continue;
6771 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu
, i
, ia64_get_pmc(i
), i
, ctx
->th_pmcs
[i
]);
6774 for (i
=1; PMD_IS_LAST(i
) == 0; i
++) {
6775 if (PMD_IS_IMPL(i
) == 0) continue;
6776 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu
, i
, ia64_get_pmd(i
), i
, ctx
->th_pmds
[i
]);
6780 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6783 ctx
->ctx_smpl_vaddr
,
6787 ctx
->ctx_saved_psr_up
);
6789 local_irq_restore(flags
);
6793 * called from process.c:copy_thread(). task is new child.
6796 pfm_inherit(struct task_struct
*task
, struct pt_regs
*regs
)
6798 struct thread_struct
*thread
;
6800 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task
)));
6802 thread
= &task
->thread
;
6805 * cut links inherited from parent (current)
6807 thread
->pfm_context
= NULL
;
6809 PFM_SET_WORK_PENDING(task
, 0);
6812 * the psr bits are already set properly in copy_threads()
6815 #else /* !CONFIG_PERFMON */
6817 sys_perfmonctl (int fd
, int cmd
, void *arg
, int count
)
6821 #endif /* CONFIG_PERFMON */