cfq-iosched: fix rcu freeing of cfq io contexts
[deliverable/linux.git] / include / asm-x86 / desc.h
1 #ifndef _ASM_DESC_H_
2 #define _ASM_DESC_H_
3
4 #ifndef __ASSEMBLY__
5 #include <asm/desc_defs.h>
6 #include <asm/ldt.h>
7 #include <asm/mmu.h>
8 #include <linux/smp.h>
9
10 static inline void fill_ldt(struct desc_struct *desc,
11 const struct user_desc *info)
12 {
13 desc->limit0 = info->limit & 0x0ffff;
14 desc->base0 = info->base_addr & 0x0000ffff;
15
16 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
17 desc->type = (info->read_exec_only ^ 1) << 1;
18 desc->type |= info->contents << 2;
19 desc->s = 1;
20 desc->dpl = 0x3;
21 desc->p = info->seg_not_present ^ 1;
22 desc->limit = (info->limit & 0xf0000) >> 16;
23 desc->avl = info->useable;
24 desc->d = info->seg_32bit;
25 desc->g = info->limit_in_pages;
26 desc->base2 = (info->base_addr & 0xff000000) >> 24;
27 }
28
29 extern struct desc_ptr idt_descr;
30 extern gate_desc idt_table[];
31
32 #ifdef CONFIG_X86_64
33 extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
34 extern struct desc_ptr cpu_gdt_descr[];
35 /* the cpu gdt accessor */
36 #define get_cpu_gdt_table(x) ((struct desc_struct *)cpu_gdt_descr[x].address)
37
38 static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
39 unsigned dpl, unsigned ist, unsigned seg)
40 {
41 gate->offset_low = PTR_LOW(func);
42 gate->segment = __KERNEL_CS;
43 gate->ist = ist;
44 gate->p = 1;
45 gate->dpl = dpl;
46 gate->zero0 = 0;
47 gate->zero1 = 0;
48 gate->type = type;
49 gate->offset_middle = PTR_MIDDLE(func);
50 gate->offset_high = PTR_HIGH(func);
51 }
52
53 #else
54 struct gdt_page {
55 struct desc_struct gdt[GDT_ENTRIES];
56 } __attribute__((aligned(PAGE_SIZE)));
57 DECLARE_PER_CPU(struct gdt_page, gdt_page);
58
59 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
60 {
61 return per_cpu(gdt_page, cpu).gdt;
62 }
63
64 static inline void pack_gate(gate_desc *gate, unsigned char type,
65 unsigned long base, unsigned dpl, unsigned flags, unsigned short seg)
66
67 {
68 gate->a = (seg << 16) | (base & 0xffff);
69 gate->b = (base & 0xffff0000) |
70 (((0x80 | type | (dpl << 5)) & 0xff) << 8);
71 }
72
73 #endif
74
75 static inline int desc_empty(const void *ptr)
76 {
77 const u32 *desc = ptr;
78 return !(desc[0] | desc[1]);
79 }
80
81 #ifdef CONFIG_PARAVIRT
82 #include <asm/paravirt.h>
83 #else
84 #define load_TR_desc() native_load_tr_desc()
85 #define load_gdt(dtr) native_load_gdt(dtr)
86 #define load_idt(dtr) native_load_idt(dtr)
87 #define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
88 #define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
89
90 #define store_gdt(dtr) native_store_gdt(dtr)
91 #define store_idt(dtr) native_store_idt(dtr)
92 #define store_tr(tr) (tr = native_store_tr())
93 #define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
94
95 #define load_TLS(t, cpu) native_load_tls(t, cpu)
96 #define set_ldt native_set_ldt
97
98 #define write_ldt_entry(dt, entry, desc) \
99 native_write_ldt_entry(dt, entry, desc)
100 #define write_gdt_entry(dt, entry, desc, type) \
101 native_write_gdt_entry(dt, entry, desc, type)
102 #define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
103 #endif
104
105 static inline void native_write_idt_entry(gate_desc *idt, int entry,
106 const gate_desc *gate)
107 {
108 memcpy(&idt[entry], gate, sizeof(*gate));
109 }
110
111 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
112 const void *desc)
113 {
114 memcpy(&ldt[entry], desc, 8);
115 }
116
117 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
118 const void *desc, int type)
119 {
120 unsigned int size;
121 switch (type) {
122 case DESC_TSS:
123 size = sizeof(tss_desc);
124 break;
125 case DESC_LDT:
126 size = sizeof(ldt_desc);
127 break;
128 default:
129 size = sizeof(struct desc_struct);
130 break;
131 }
132 memcpy(&gdt[entry], desc, size);
133 }
134
135 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
136 unsigned long limit, unsigned char type,
137 unsigned char flags)
138 {
139 desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
140 desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
141 (limit & 0x000f0000) | ((type & 0xff) << 8) |
142 ((flags & 0xf) << 20);
143 desc->p = 1;
144 }
145
146
147 static inline void set_tssldt_descriptor(void *d, unsigned long addr,
148 unsigned type, unsigned size)
149 {
150 #ifdef CONFIG_X86_64
151 struct ldttss_desc64 *desc = d;
152 memset(desc, 0, sizeof(*desc));
153 desc->limit0 = size & 0xFFFF;
154 desc->base0 = PTR_LOW(addr);
155 desc->base1 = PTR_MIDDLE(addr) & 0xFF;
156 desc->type = type;
157 desc->p = 1;
158 desc->limit1 = (size >> 16) & 0xF;
159 desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
160 desc->base3 = PTR_HIGH(addr);
161 #else
162
163 pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
164 #endif
165 }
166
167 static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
168 {
169 struct desc_struct *d = get_cpu_gdt_table(cpu);
170 tss_desc tss;
171
172 /*
173 * sizeof(unsigned long) coming from an extra "long" at the end
174 * of the iobitmap. See tss_struct definition in processor.h
175 *
176 * -1? seg base+limit should be pointing to the address of the
177 * last valid byte
178 */
179 set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
180 IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
181 write_gdt_entry(d, entry, &tss, DESC_TSS);
182 }
183
184 #define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
185
186 static inline void native_set_ldt(const void *addr, unsigned int entries)
187 {
188 if (likely(entries == 0))
189 __asm__ __volatile__("lldt %w0"::"q" (0));
190 else {
191 unsigned cpu = smp_processor_id();
192 ldt_desc ldt;
193
194 set_tssldt_descriptor(&ldt, (unsigned long)addr,
195 DESC_LDT, entries * sizeof(ldt) - 1);
196 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
197 &ldt, DESC_LDT);
198 __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
199 }
200 }
201
202 static inline void native_load_tr_desc(void)
203 {
204 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
205 }
206
207 static inline void native_load_gdt(const struct desc_ptr *dtr)
208 {
209 asm volatile("lgdt %0"::"m" (*dtr));
210 }
211
212 static inline void native_load_idt(const struct desc_ptr *dtr)
213 {
214 asm volatile("lidt %0"::"m" (*dtr));
215 }
216
217 static inline void native_store_gdt(struct desc_ptr *dtr)
218 {
219 asm volatile("sgdt %0":"=m" (*dtr));
220 }
221
222 static inline void native_store_idt(struct desc_ptr *dtr)
223 {
224 asm volatile("sidt %0":"=m" (*dtr));
225 }
226
227 static inline unsigned long native_store_tr(void)
228 {
229 unsigned long tr;
230 asm volatile("str %0":"=r" (tr));
231 return tr;
232 }
233
234 static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
235 {
236 unsigned int i;
237 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
238
239 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
240 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
241 }
242
243 #define _LDT_empty(info) (\
244 (info)->base_addr == 0 && \
245 (info)->limit == 0 && \
246 (info)->contents == 0 && \
247 (info)->read_exec_only == 1 && \
248 (info)->seg_32bit == 0 && \
249 (info)->limit_in_pages == 0 && \
250 (info)->seg_not_present == 1 && \
251 (info)->useable == 0)
252
253 #ifdef CONFIG_X86_64
254 #define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
255 #else
256 #define LDT_empty(info) (_LDT_empty(info))
257 #endif
258
259 static inline void clear_LDT(void)
260 {
261 set_ldt(NULL, 0);
262 }
263
264 /*
265 * load one particular LDT into the current CPU
266 */
267 static inline void load_LDT_nolock(mm_context_t *pc)
268 {
269 set_ldt(pc->ldt, pc->size);
270 }
271
272 static inline void load_LDT(mm_context_t *pc)
273 {
274 preempt_disable();
275 load_LDT_nolock(pc);
276 preempt_enable();
277 }
278
279 static inline unsigned long get_desc_base(const struct desc_struct *desc)
280 {
281 return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24);
282 }
283
284 static inline unsigned long get_desc_limit(const struct desc_struct *desc)
285 {
286 return desc->limit0 | (desc->limit << 16);
287 }
288
289 static inline void _set_gate(int gate, unsigned type, void *addr,
290 unsigned dpl, unsigned ist, unsigned seg)
291 {
292 gate_desc s;
293 pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
294 /*
295 * does not need to be atomic because it is only done once at
296 * setup time
297 */
298 write_idt_entry(idt_table, gate, &s);
299 }
300
301 /*
302 * This needs to use 'idt_table' rather than 'idt', and
303 * thus use the _nonmapped_ version of the IDT, as the
304 * Pentium F0 0F bugfix can have resulted in the mapped
305 * IDT being write-protected.
306 */
307 static inline void set_intr_gate(unsigned int n, void *addr)
308 {
309 BUG_ON((unsigned)n > 0xFF);
310 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
311 }
312
313 /*
314 * This routine sets up an interrupt gate at directory privilege level 3.
315 */
316 static inline void set_system_intr_gate(unsigned int n, void *addr)
317 {
318 BUG_ON((unsigned)n > 0xFF);
319 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
320 }
321
322 static inline void set_trap_gate(unsigned int n, void *addr)
323 {
324 BUG_ON((unsigned)n > 0xFF);
325 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
326 }
327
328 static inline void set_system_gate(unsigned int n, void *addr)
329 {
330 BUG_ON((unsigned)n > 0xFF);
331 #ifdef CONFIG_X86_32
332 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
333 #else
334 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
335 #endif
336 }
337
338 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
339 {
340 BUG_ON((unsigned)n > 0xFF);
341 _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
342 }
343
344 static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
345 {
346 BUG_ON((unsigned)n > 0xFF);
347 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
348 }
349
350 static inline void set_system_gate_ist(int n, void *addr, unsigned ist)
351 {
352 BUG_ON((unsigned)n > 0xFF);
353 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
354 }
355
356 #else
357 /*
358 * GET_DESC_BASE reads the descriptor base of the specified segment.
359 *
360 * Args:
361 * idx - descriptor index
362 * gdt - GDT pointer
363 * base - 32bit register to which the base will be written
364 * lo_w - lo word of the "base" register
365 * lo_b - lo byte of the "base" register
366 * hi_b - hi byte of the low word of the "base" register
367 *
368 * Example:
369 * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
370 * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
371 */
372 #define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
373 movb idx*8+4(gdt), lo_b; \
374 movb idx*8+7(gdt), hi_b; \
375 shll $16, base; \
376 movw idx*8+2(gdt), lo_w;
377
378
379 #endif /* __ASSEMBLY__ */
380
381 #endif
This page took 0.040764 seconds and 5 git commands to generate.