powerpc/pseries: Inform the hypervisor we are using EBB regs
[deliverable/linux.git] / arch / powerpc / include / asm / lppaca.h
CommitLineData
1da177e4
LT
1/*
2 * lppaca.h
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
8882a4da
DG
19#ifndef _ASM_POWERPC_LPPACA_H
20#define _ASM_POWERPC_LPPACA_H
88ced031 21#ifdef __KERNEL__
1da177e4 22
59c19cb2
AB
23/*
24 * These definitions relate to hypervisors that only exist when using
94491685
BH
25 * a server type processor
26 */
27#ifdef CONFIG_PPC_BOOK3S
28
59c19cb2
AB
29/*
30 * This control block contains the data that is shared between the
31 * hypervisor and the OS.
32 */
2f6093c8 33#include <linux/cache.h>
f2f6dad6 34#include <linux/threads.h>
1da177e4 35#include <asm/types.h>
2f6093c8 36#include <asm/mmu.h>
1da177e4 37
f2f6dad6
BH
38/*
39 * We only have to have statically allocated lppaca structs on
40 * legacy iSeries, which supports at most 64 cpus.
41 */
f2f6dad6 42#define NR_LPPACAS 1
f2f6dad6 43
59c19cb2
AB
44/*
45 * The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
46 * alignment is sufficient to prevent this
47 */
c6b3feaf 48struct lppaca {
59c19cb2
AB
49 /* cacheline 1 contains read-only data */
50
51 u32 desc; /* Eye catcher 0xD397D781 */
52 u16 size; /* Size of this struct */
53 u16 reserved1;
54 u16 reserved2:14;
55 u8 shared_proc:1; /* Shared processor indicator */
56 u8 secondary_thread:1; /* Secondary thread indicator */
57 u8 reserved3[14];
58 volatile u32 dyn_hw_node_id; /* Dynamic hardware node id */
59 volatile u32 dyn_hw_proc_id; /* Dynamic hardware proc id */
60 u8 reserved4[56];
61 volatile u8 vphn_assoc_counts[8]; /* Virtual processor home node */
62 /* associativity change counters */
63 u8 reserved5[32];
64
65 /* cacheline 2 contains local read-write data */
66
67 u8 reserved6[48];
68 u8 cede_latency_hint;
6e0b8bc9
ME
69 u8 ebb_regs_in_use;
70 u8 reserved7[6];
59c19cb2
AB
71 u8 dtl_enable_mask; /* Dispatch Trace Log mask */
72 u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */
73 u8 fpregs_in_use;
74 u8 pmcregs_in_use;
75 u8 reserved8[28];
76 u64 wait_state_cycles; /* Wait cycles for this proc */
77 u8 reserved9[28];
78 u16 slb_count; /* # of SLBs to maintain */
79 u8 idle; /* Indicate OS is idle */
80 u8 vmxregs_in_use;
81
82 /* cacheline 3 is shared with other processors */
83
84 /*
85 * This is the yield_count. An "odd" value (low bit on) means that
86 * the processor is yielded (either because of an OS yield or a
87 * hypervisor preempt). An even value implies that the processor is
88 * currently executing.
89 * NOTE: This value will ALWAYS be zero for dedicated processors and
90 * will NEVER be zero for shared processors (ie, initialized to a 1).
91 */
92 volatile u32 yield_count;
93 volatile u32 dispersion_count; /* dispatch changed physical cpu */
94 volatile u64 cmo_faults; /* CMO page fault count */
95 volatile u64 cmo_fault_time; /* CMO page fault time */
96 u8 reserved10[104];
97
98 /* cacheline 4-5 */
99
100 u32 page_ins; /* CMO Hint - # page ins by OS */
101 u8 reserved11[148];
102 volatile u64 dtl_idx; /* Dispatch Trace Log head index */
103 u8 reserved12[96];
c6b3feaf 104} __attribute__((__aligned__(0x400)));
1da177e4 105
3356bb9f
DG
106extern struct lppaca lppaca[];
107
93c22703 108#define lppaca_of(cpu) (*paca[cpu].lppaca_ptr)
8154c5d2 109
2f6093c8
MN
110/*
111 * SLB shadow buffer structure as defined in the PAPR. The save_area
112 * contains adjacent ESID and VSID pairs for each shadowed SLB. The
113 * ESID is stored in the lower 64bits, then the VSID.
114 */
115struct slb_shadow {
59c19cb2
AB
116 u32 persistent; /* Number of persistent SLBs */
117 u32 buffer_length; /* Total shadow buffer length */
118 u64 reserved;
2f6093c8
MN
119 struct {
120 u64 esid;
121 u64 vsid;
59c19cb2 122 } save_area[SLB_NUM_BOLTED];
2f6093c8
MN
123} ____cacheline_aligned;
124
125extern struct slb_shadow slb_shadow[];
126
cf9efce0
PM
127/*
128 * Layout of entries in the hypervisor's dispatch trace log buffer.
129 */
130struct dtl_entry {
131 u8 dispatch_reason;
132 u8 preempt_reason;
133 u16 processor_id;
134 u32 enqueue_to_dispatch_time;
135 u32 ready_to_enqueue_time;
136 u32 waiting_to_ready_time;
137 u64 timebase;
138 u64 fault_addr;
139 u64 srr0;
140 u64 srr1;
141};
142
143#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
144#define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
145
af442a1b
NA
146extern struct kmem_cache *dtl_cache;
147
872e439a 148/*
abf917cd 149 * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
872e439a
PM
150 * reading from the dispatch trace log. If other code wants to consume
151 * DTL entries, it can set this pointer to a function that will get
152 * called once for each DTL entry that gets processed.
153 */
154extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
155
94491685 156#endif /* CONFIG_PPC_BOOK3S */
88ced031 157#endif /* __KERNEL__ */
8882a4da 158#endif /* _ASM_POWERPC_LPPACA_H */
This page took 0.585087 seconds and 5 git commands to generate.