[PARISC] use less assembler statements in syscall path
[deliverable/linux.git] / arch / parisc / kernel / entry.S
CommitLineData
1da177e4
LT
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 * Copyright (C) 1999,2000 Philipp Rumpf
6 * Copyright (C) 1999 SuSE GmbH Nuernberg
7 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
0013a854 25#include <asm/asm-offsets.h>
1da177e4
LT
26
27/* we have the following possibilities to act on an interruption:
28 * - handle in assembly and use shadowed registers only
29 * - save registers to kernel stack and handle in assembly or C */
30
31
896a3756 32#include <asm/psw.h>
3d73cf5e 33#include <asm/cache.h> /* for L1_CACHE_SHIFT */
1da177e4
LT
34#include <asm/assembly.h> /* for LDREG/STREG defines */
35#include <asm/pgtable.h>
1da177e4
LT
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
413059f2 40#ifdef CONFIG_64BIT
1da177e4
LT
41#define CMPIB cmpib,*
42#define CMPB cmpb,*
43#define COND(x) *x
44
45 .level 2.0w
46#else
47#define CMPIB cmpib,
48#define CMPB cmpb,
49#define COND(x) x
50
51 .level 2.0
52#endif
53
54 .import pa_dbit_lock,data
55
56 /* space_to_prot macro creates a prot id from a space id */
57
58#if (SPACEID_SHIFT) == 0
59 .macro space_to_prot spc prot
60 depd,z \spc,62,31,\prot
61 .endm
62#else
63 .macro space_to_prot spc prot
64 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
65 .endm
66#endif
67
68 /* Switch to virtual mapping, trashing only %r1 */
69 .macro virt_map
896a3756
GG
70 /* pcxt_ssm_bug */
71 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
1da177e4
LT
72 mtsp %r0, %sr4
73 mtsp %r0, %sr5
896a3756
GG
74 mfsp %sr7, %r1
75 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
76 mtsp %r1, %sr3
77 tovirt_r1 %r29
78 load32 KERNEL_PSW, %r1
79
80 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
1da177e4
LT
81 mtsp %r0, %sr6
82 mtsp %r0, %sr7
1da177e4
LT
83 mtctl %r0, %cr17 /* Clear IIASQ tail */
84 mtctl %r0, %cr17 /* Clear IIASQ head */
896a3756 85 mtctl %r1, %ipsw
1da177e4
LT
86 load32 4f, %r1
87 mtctl %r1, %cr18 /* Set IIAOQ tail */
88 ldo 4(%r1), %r1
89 mtctl %r1, %cr18 /* Set IIAOQ head */
90 rfir
91 nop
924:
93 .endm
94
95 /*
96 * The "get_stack" macros are responsible for determining the
97 * kernel stack value.
98 *
99 * For Faults:
100 * If sr7 == 0
101 * Already using a kernel stack, so call the
102 * get_stack_use_r30 macro to push a pt_regs structure
103 * on the stack, and store registers there.
104 * else
105 * Need to set up a kernel stack, so call the
106 * get_stack_use_cr30 macro to set up a pointer
107 * to the pt_regs structure contained within the
108 * task pointer pointed to by cr30. Set the stack
109 * pointer to point to the end of the task structure.
110 *
111 * For Interrupts:
112 * If sr7 == 0
113 * Already using a kernel stack, check to see if r30
114 * is already pointing to the per processor interrupt
115 * stack. If it is, call the get_stack_use_r30 macro
116 * to push a pt_regs structure on the stack, and store
117 * registers there. Otherwise, call get_stack_use_cr31
118 * to get a pointer to the base of the interrupt stack
119 * and push a pt_regs structure on that stack.
120 * else
121 * Need to set up a kernel stack, so call the
122 * get_stack_use_cr30 macro to set up a pointer
123 * to the pt_regs structure contained within the
124 * task pointer pointed to by cr30. Set the stack
125 * pointer to point to the end of the task structure.
126 * N.B: We don't use the interrupt stack for the
127 * first interrupt from userland, because signals/
128 * resched's are processed when returning to userland,
129 * and we can sleep in those cases.
130 *
131 * Note that we use shadowed registers for temps until
132 * we can save %r26 and %r29. %r26 is used to preserve
133 * %r8 (a shadowed register) which temporarily contained
134 * either the fault type ("code") or the eirr. We need
135 * to use a non-shadowed register to carry the value over
136 * the rfir in virt_map. We use %r26 since this value winds
137 * up being passed as the argument to either do_cpu_irq_mask
138 * or handle_interruption. %r29 is used to hold a pointer
139 * the register save area, and once again, it needs to
140 * be a non-shadowed register so that it survives the rfir.
141 *
142 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
143 */
144
145 .macro get_stack_use_cr30
146
147 /* we save the registers in the task struct */
148
149 mfctl %cr30, %r1
150 tophys %r1,%r9
151 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
152 tophys %r1,%r9
153 ldo TASK_REGS(%r9),%r9
154 STREG %r30, PT_GR30(%r9)
155 STREG %r29,PT_GR29(%r9)
156 STREG %r26,PT_GR26(%r9)
157 copy %r9,%r29
158 mfctl %cr30, %r1
159 ldo THREAD_SZ_ALGN(%r1), %r30
160 .endm
161
162 .macro get_stack_use_r30
163
164 /* we put a struct pt_regs on the stack and save the registers there */
165
166 tophys %r30,%r9
167 STREG %r30,PT_GR30(%r9)
168 ldo PT_SZ_ALGN(%r30),%r30
169 STREG %r29,PT_GR29(%r9)
170 STREG %r26,PT_GR26(%r9)
171 copy %r9,%r29
172 .endm
173
174 .macro rest_stack
175 LDREG PT_GR1(%r29), %r1
176 LDREG PT_GR30(%r29),%r30
177 LDREG PT_GR29(%r29),%r29
178 .endm
179
180 /* default interruption handler
181 * (calls traps.c:handle_interruption) */
182 .macro def code
183 b intr_save
184 ldi \code, %r8
185 .align 32
186 .endm
187
188 /* Interrupt interruption handler
189 * (calls irq.c:do_cpu_irq_mask) */
190 .macro extint code
191 b intr_extint
192 mfsp %sr7,%r16
193 .align 32
194 .endm
195
196 .import os_hpmc, code
197
198 /* HPMC handler */
199 .macro hpmc code
200 nop /* must be a NOP, will be patched later */
201 load32 PA(os_hpmc), %r3
202 bv,n 0(%r3)
203 nop
204 .word 0 /* checksum (will be patched) */
205 .word PA(os_hpmc) /* address of handler */
206 .word 0 /* length of handler */
207 .endm
208
209 /*
210 * Performance Note: Instructions will be moved up into
211 * this part of the code later on, once we are sure
212 * that the tlb miss handlers are close to final form.
213 */
214
215 /* Register definitions for tlb miss handler macros */
216
217 va = r8 /* virtual address for which the trap occured */
218 spc = r24 /* space for which the trap occured */
219
413059f2 220#ifndef CONFIG_64BIT
1da177e4
LT
221
222 /*
223 * itlb miss interruption handler (parisc 1.1 - 32 bit)
224 */
225
226 .macro itlb_11 code
227
228 mfctl %pcsq, spc
229 b itlb_miss_11
230 mfctl %pcoq, va
231
232 .align 32
233 .endm
234#endif
235
236 /*
237 * itlb miss interruption handler (parisc 2.0)
238 */
239
240 .macro itlb_20 code
241 mfctl %pcsq, spc
413059f2 242#ifdef CONFIG_64BIT
1da177e4
LT
243 b itlb_miss_20w
244#else
245 b itlb_miss_20
246#endif
247 mfctl %pcoq, va
248
249 .align 32
250 .endm
251
413059f2 252#ifndef CONFIG_64BIT
1da177e4
LT
253 /*
254 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
255 *
256 * Note: naitlb misses will be treated
257 * as an ordinary itlb miss for now.
258 * However, note that naitlb misses
259 * have the faulting address in the
260 * IOR/ISR.
261 */
262
263 .macro naitlb_11 code
264
265 mfctl %isr,spc
266 b itlb_miss_11
267 mfctl %ior,va
268 /* FIXME: If user causes a naitlb miss, the priv level may not be in
269 * lower bits of va, where the itlb miss handler is expecting them
270 */
271
272 .align 32
273 .endm
274#endif
275
276 /*
277 * naitlb miss interruption handler (parisc 2.0)
278 *
279 * Note: naitlb misses will be treated
280 * as an ordinary itlb miss for now.
281 * However, note that naitlb misses
282 * have the faulting address in the
283 * IOR/ISR.
284 */
285
286 .macro naitlb_20 code
287
288 mfctl %isr,spc
413059f2 289#ifdef CONFIG_64BIT
1da177e4
LT
290 b itlb_miss_20w
291#else
292 b itlb_miss_20
293#endif
294 mfctl %ior,va
295 /* FIXME: If user causes a naitlb miss, the priv level may not be in
296 * lower bits of va, where the itlb miss handler is expecting them
297 */
298
299 .align 32
300 .endm
301
413059f2 302#ifndef CONFIG_64BIT
1da177e4
LT
303 /*
304 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
305 */
306
307 .macro dtlb_11 code
308
309 mfctl %isr, spc
310 b dtlb_miss_11
311 mfctl %ior, va
312
313 .align 32
314 .endm
315#endif
316
317 /*
318 * dtlb miss interruption handler (parisc 2.0)
319 */
320
321 .macro dtlb_20 code
322
323 mfctl %isr, spc
413059f2 324#ifdef CONFIG_64BIT
1da177e4
LT
325 b dtlb_miss_20w
326#else
327 b dtlb_miss_20
328#endif
329 mfctl %ior, va
330
331 .align 32
332 .endm
333
413059f2 334#ifndef CONFIG_64BIT
1da177e4
LT
335 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
336
337 .macro nadtlb_11 code
338
339 mfctl %isr,spc
340 b nadtlb_miss_11
341 mfctl %ior,va
342
343 .align 32
344 .endm
345#endif
346
347 /* nadtlb miss interruption handler (parisc 2.0) */
348
349 .macro nadtlb_20 code
350
351 mfctl %isr,spc
413059f2 352#ifdef CONFIG_64BIT
1da177e4
LT
353 b nadtlb_miss_20w
354#else
355 b nadtlb_miss_20
356#endif
357 mfctl %ior,va
358
359 .align 32
360 .endm
361
413059f2 362#ifndef CONFIG_64BIT
1da177e4
LT
363 /*
364 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
365 */
366
367 .macro dbit_11 code
368
369 mfctl %isr,spc
370 b dbit_trap_11
371 mfctl %ior,va
372
373 .align 32
374 .endm
375#endif
376
377 /*
378 * dirty bit trap interruption handler (parisc 2.0)
379 */
380
381 .macro dbit_20 code
382
383 mfctl %isr,spc
413059f2 384#ifdef CONFIG_64BIT
1da177e4
LT
385 b dbit_trap_20w
386#else
387 b dbit_trap_20
388#endif
389 mfctl %ior,va
390
391 .align 32
392 .endm
393
394 /* The following are simple 32 vs 64 bit instruction
395 * abstractions for the macros */
396 .macro EXTR reg1,start,length,reg2
413059f2 397#ifdef CONFIG_64BIT
1da177e4
LT
398 extrd,u \reg1,32+\start,\length,\reg2
399#else
400 extrw,u \reg1,\start,\length,\reg2
401#endif
402 .endm
403
404 .macro DEP reg1,start,length,reg2
413059f2 405#ifdef CONFIG_64BIT
1da177e4
LT
406 depd \reg1,32+\start,\length,\reg2
407#else
408 depw \reg1,\start,\length,\reg2
409#endif
410 .endm
411
412 .macro DEPI val,start,length,reg
413059f2 413#ifdef CONFIG_64BIT
1da177e4
LT
414 depdi \val,32+\start,\length,\reg
415#else
416 depwi \val,\start,\length,\reg
417#endif
418 .endm
419
420 /* In LP64, the space contains part of the upper 32 bits of the
421 * fault. We have to extract this and place it in the va,
422 * zeroing the corresponding bits in the space register */
423 .macro space_adjust spc,va,tmp
413059f2 424#ifdef CONFIG_64BIT
1da177e4
LT
425 extrd,u \spc,63,SPACEID_SHIFT,\tmp
426 depd %r0,63,SPACEID_SHIFT,\spc
427 depd \tmp,31,SPACEID_SHIFT,\va
428#endif
429 .endm
430
431 .import swapper_pg_dir,code
432
433 /* Get the pgd. For faults on space zero (kernel space), this
434 * is simply swapper_pg_dir. For user space faults, the
435 * pgd is stored in %cr25 */
436 .macro get_pgd spc,reg
437 ldil L%PA(swapper_pg_dir),\reg
438 ldo R%PA(swapper_pg_dir)(\reg),\reg
439 or,COND(=) %r0,\spc,%r0
440 mfctl %cr25,\reg
441 .endm
442
443 /*
444 space_check(spc,tmp,fault)
445
446 spc - The space we saw the fault with.
447 tmp - The place to store the current space.
448 fault - Function to call on failure.
449
450 Only allow faults on different spaces from the
451 currently active one if we're the kernel
452
453 */
454 .macro space_check spc,tmp,fault
455 mfsp %sr7,\tmp
456 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
457 * as kernel, so defeat the space
458 * check if it is */
459 copy \spc,\tmp
460 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
461 cmpb,COND(<>),n \tmp,\spc,\fault
462 .endm
463
464 /* Look up a PTE in a 2-Level scheme (faulting at each
465 * level if the entry isn't present
466 *
467 * NOTE: we use ldw even for LP64, since the short pointers
468 * can address up to 1TB
469 */
470 .macro L2_ptep pmd,pte,index,va,fault
471#if PT_NLEVELS == 3
472 EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
473#else
474 EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
475#endif
476 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
477 copy %r0,\pte
478 ldw,s \index(\pmd),\pmd
479 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
480 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
481 copy \pmd,%r9
3d73cf5e 482 SHLREG %r9,PxD_VALUE_SHIFT,\pmd
1da177e4
LT
483 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
484 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
485 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
486 LDREG %r0(\pmd),\pte /* pmd is now pte */
487 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
488 .endm
489
490 /* Look up PTE in a 3-Level scheme.
491 *
492 * Here we implement a Hybrid L2/L3 scheme: we allocate the
493 * first pmd adjacent to the pgd. This means that we can
494 * subtract a constant offset to get to it. The pmd and pgd
495 * sizes are arranged so that a single pmd covers 4GB (giving
496 * a full LP64 process access to 8TB) so our lookups are
497 * effectively L2 for the first 4GB of the kernel (i.e. for
498 * all ILP32 processes and all the kernel for machines with
499 * under 4GB of memory) */
500 .macro L3_ptep pgd,pte,index,va,fault
2fd83038 501#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
1da177e4
LT
502 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
503 copy %r0,\pte
2fd83038 504 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 505 ldw,s \index(\pgd),\pgd
2fd83038 506 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 507 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
2fd83038 508 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 509 shld \pgd,PxD_VALUE_SHIFT,\index
2fd83038 510 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 511 copy \index,\pgd
2fd83038 512 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 513 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
2fd83038 514#endif
1da177e4
LT
515 L2_ptep \pgd,\pte,\index,\va,\fault
516 .endm
517
518 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
519 * don't needlessly dirty the cache line if it was already set */
520 .macro update_ptep ptep,pte,tmp,tmp1
521 ldi _PAGE_ACCESSED,\tmp1
522 or \tmp1,\pte,\tmp
523 and,COND(<>) \tmp1,\pte,%r0
524 STREG \tmp,0(\ptep)
525 .endm
526
527 /* Set the dirty bit (and accessed bit). No need to be
528 * clever, this is only used from the dirty fault */
529 .macro update_dirty ptep,pte,tmp
530 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
531 or \tmp,\pte,\pte
532 STREG \pte,0(\ptep)
533 .endm
534
535 /* Convert the pte and prot to tlb insertion values. How
536 * this happens is quite subtle, read below */
537 .macro make_insert_tlb spc,pte,prot
538 space_to_prot \spc \prot /* create prot id from space */
539 /* The following is the real subtlety. This is depositing
540 * T <-> _PAGE_REFTRAP
541 * D <-> _PAGE_DIRTY
542 * B <-> _PAGE_DMB (memory break)
543 *
544 * Then incredible subtlety: The access rights are
545 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
546 * See 3-14 of the parisc 2.0 manual
547 *
548 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
549 * trigger an access rights trap in user space if the user
550 * tries to read an unreadable page */
551 depd \pte,8,7,\prot
552
553 /* PAGE_USER indicates the page can be read with user privileges,
554 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
555 * contains _PAGE_READ */
556 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
557 depdi 7,11,3,\prot
558 /* If we're a gateway page, drop PL2 back to zero for promotion
559 * to kernel privilege (so we can execute the page as kernel).
560 * Any privilege promotion page always denys read and write */
561 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
562 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
563
2fd83038
HD
564 /* Enforce uncacheable pages.
565 * This should ONLY be use for MMIO on PA 2.0 machines.
566 * Memory/DMA is cache coherent on all PA2.0 machines we support
567 * (that means T-class is NOT supported) and the memory controllers
568 * on most of those machines only handles cache transactions.
569 */
570 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
571 depi 1,12,1,\prot
1da177e4 572
2fd83038
HD
573 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
574 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
575 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
1da177e4
LT
576 .endm
577
578 /* Identical macro to make_insert_tlb above, except it
579 * makes the tlb entry for the differently formatted pa11
580 * insertion instructions */
581 .macro make_insert_tlb_11 spc,pte,prot
582 zdep \spc,30,15,\prot
583 dep \pte,8,7,\prot
584 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
585 depi 1,12,1,\prot
586 extru,= \pte,_PAGE_USER_BIT,1,%r0
587 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
588 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
589 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
590
591 /* Get rid of prot bits and convert to page addr for iitlba */
592
2fd83038 593 depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
1da177e4 594 extru \pte,24,25,\pte
1da177e4
LT
595 .endm
596
597 /* This is for ILP32 PA2.0 only. The TLB insertion needs
598 * to extend into I/O space if the address is 0xfXXXXXXX
599 * so we extend the f's into the top word of the pte in
600 * this case */
601 .macro f_extend pte,tmp
602 extrd,s \pte,42,4,\tmp
603 addi,<> 1,\tmp,%r0
604 extrd,s \pte,63,25,\pte
605 .endm
606
607 /* The alias region is an 8MB aligned 16MB to do clear and
608 * copy user pages at addresses congruent with the user
609 * virtual address.
610 *
611 * To use the alias page, you set %r26 up with the to TLB
612 * entry (identifying the physical page) and %r23 up with
613 * the from tlb entry (or nothing if only a to entry---for
614 * clear_user_page_asm) */
615 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
616 cmpib,COND(<>),n 0,\spc,\fault
617 ldil L%(TMPALIAS_MAP_START),\tmp
413059f2 618#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
1da177e4
LT
619 /* on LP64, ldi will sign extend into the upper 32 bits,
620 * which is behaviour we don't want */
621 depdi 0,31,32,\tmp
622#endif
623 copy \va,\tmp1
624 DEPI 0,31,23,\tmp1
625 cmpb,COND(<>),n \tmp,\tmp1,\fault
626 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
627 depd,z \prot,8,7,\prot
628 /*
629 * OK, it is in the temp alias region, check whether "from" or "to".
630 * Check "subtle" note in pacache.S re: r23/r26.
631 */
413059f2 632#ifdef CONFIG_64BIT
1da177e4
LT
633 extrd,u,*= \va,41,1,%r0
634#else
635 extrw,u,= \va,9,1,%r0
636#endif
637 or,COND(tr) %r23,%r0,\pte
638 or %r26,%r0,\pte
639 .endm
640
641
642 /*
643 * Align fault_vector_20 on 4K boundary so that both
644 * fault_vector_11 and fault_vector_20 are on the
645 * same page. This is only necessary as long as we
646 * write protect the kernel text, which we may stop
647 * doing once we use large page translations to cover
648 * the static part of the kernel address space.
649 */
650
651 .export fault_vector_20
652
653 .text
654
655 .align 4096
656
657fault_vector_20:
658 /* First vector is invalid (0) */
659 .ascii "cows can fly"
660 .byte 0
661 .align 32
662
663 hpmc 1
664 def 2
665 def 3
666 extint 4
667 def 5
668 itlb_20 6
669 def 7
670 def 8
671 def 9
672 def 10
673 def 11
674 def 12
675 def 13
676 def 14
677 dtlb_20 15
678#if 0
679 naitlb_20 16
680#else
681 def 16
682#endif
683 nadtlb_20 17
684 def 18
685 def 19
686 dbit_20 20
687 def 21
688 def 22
689 def 23
690 def 24
691 def 25
692 def 26
693 def 27
694 def 28
695 def 29
696 def 30
697 def 31
698
413059f2 699#ifndef CONFIG_64BIT
1da177e4
LT
700
701 .export fault_vector_11
702
703 .align 2048
704
705fault_vector_11:
706 /* First vector is invalid (0) */
707 .ascii "cows can fly"
708 .byte 0
709 .align 32
710
711 hpmc 1
712 def 2
713 def 3
714 extint 4
715 def 5
716 itlb_11 6
717 def 7
718 def 8
719 def 9
720 def 10
721 def 11
722 def 12
723 def 13
724 def 14
725 dtlb_11 15
726#if 0
727 naitlb_11 16
728#else
729 def 16
730#endif
731 nadtlb_11 17
732 def 18
733 def 19
734 dbit_11 20
735 def 21
736 def 22
737 def 23
738 def 24
739 def 25
740 def 26
741 def 27
742 def 28
743 def 29
744 def 30
745 def 31
746
747#endif
748
749 .import handle_interruption,code
750 .import do_cpu_irq_mask,code
751
752 /*
753 * r26 = function to be called
754 * r25 = argument to pass in
755 * r24 = flags for do_fork()
756 *
757 * Kernel threads don't ever return, so they don't need
758 * a true register context. We just save away the arguments
759 * for copy_thread/ret_ to properly set up the child.
760 */
761
762#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
763#define CLONE_UNTRACED 0x00800000
764
765 .export __kernel_thread, code
766 .import do_fork
767__kernel_thread:
768 STREG %r2, -RP_OFFSET(%r30)
769
770 copy %r30, %r1
771 ldo PT_SZ_ALGN(%r30),%r30
413059f2 772#ifdef CONFIG_64BIT
1da177e4
LT
773 /* Yo, function pointers in wide mode are little structs... -PB */
774 ldd 24(%r26), %r2
775 STREG %r2, PT_GR27(%r1) /* Store childs %dp */
776 ldd 16(%r26), %r26
777
778 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
779 copy %r0, %r22 /* user_tid */
780#endif
781 STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
782 STREG %r25, PT_GR25(%r1)
783 ldil L%CLONE_UNTRACED, %r26
784 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
785 or %r26, %r24, %r26 /* will have kernel mappings. */
786 ldi 1, %r25 /* stack_start, signals kernel thread */
787 stw %r0, -52(%r30) /* user_tid */
413059f2 788#ifdef CONFIG_64BIT
1da177e4
LT
789 ldo -16(%r30),%r29 /* Reference param save area */
790#endif
791 BL do_fork, %r2
792 copy %r1, %r24 /* pt_regs */
793
794 /* Parent Returns here */
795
796 LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
797 ldo -PT_SZ_ALGN(%r30), %r30
798 bv %r0(%r2)
799 nop
800
801 /*
802 * Child Returns here
803 *
804 * copy_thread moved args from temp save area set up above
805 * into task save area.
806 */
807
808 .export ret_from_kernel_thread
809ret_from_kernel_thread:
810
811 /* Call schedule_tail first though */
812 BL schedule_tail, %r2
813 nop
814
815 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
816 LDREG TASK_PT_GR25(%r1), %r26
413059f2 817#ifdef CONFIG_64BIT
1da177e4
LT
818 LDREG TASK_PT_GR27(%r1), %r27
819 LDREG TASK_PT_GR22(%r1), %r22
820#endif
821 LDREG TASK_PT_GR26(%r1), %r1
822 ble 0(%sr7, %r1)
823 copy %r31, %r2
824
413059f2 825#ifdef CONFIG_64BIT
1da177e4
LT
826 ldo -16(%r30),%r29 /* Reference param save area */
827 loadgp /* Thread could have been in a module */
828#endif
99ac7947 829#ifndef CONFIG_64BIT
1da177e4 830 b sys_exit
99ac7947
RC
831#else
832 load32 sys_exit, %r1
833 bv %r0(%r1)
834#endif
1da177e4
LT
835 ldi 0, %r26
836
837 .import sys_execve, code
838 .export __execve, code
839__execve:
840 copy %r2, %r15
841 copy %r30, %r16
842 ldo PT_SZ_ALGN(%r30), %r30
843 STREG %r26, PT_GR26(%r16)
844 STREG %r25, PT_GR25(%r16)
845 STREG %r24, PT_GR24(%r16)
413059f2 846#ifdef CONFIG_64BIT
1da177e4
LT
847 ldo -16(%r30),%r29 /* Reference param save area */
848#endif
849 BL sys_execve, %r2
850 copy %r16, %r26
851
852 cmpib,=,n 0,%r28,intr_return /* forward */
853
854 /* yes, this will trap and die. */
855 copy %r15, %r2
856 copy %r16, %r30
857 bv %r0(%r2)
858 nop
859
860 .align 4
861
862 /*
863 * struct task_struct *_switch_to(struct task_struct *prev,
864 * struct task_struct *next)
865 *
866 * switch kernel stacks and return prev */
867 .export _switch_to, code
868_switch_to:
869 STREG %r2, -RP_OFFSET(%r30)
870
618febd6 871 callee_save_float
1da177e4
LT
872 callee_save
873
874 load32 _switch_to_ret, %r2
875
876 STREG %r2, TASK_PT_KPC(%r26)
877 LDREG TASK_PT_KPC(%r25), %r2
878
879 STREG %r30, TASK_PT_KSP(%r26)
880 LDREG TASK_PT_KSP(%r25), %r30
881 LDREG TASK_THREAD_INFO(%r25), %r25
882 bv %r0(%r2)
883 mtctl %r25,%cr30
884
885_switch_to_ret:
886 mtctl %r0, %cr0 /* Needed for single stepping */
887 callee_rest
618febd6 888 callee_rest_float
1da177e4
LT
889
890 LDREG -RP_OFFSET(%r30), %r2
891 bv %r0(%r2)
892 copy %r26, %r28
893
894 /*
895 * Common rfi return path for interruptions, kernel execve, and
896 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
897 * return via this path if the signal was received when the process
898 * was running; if the process was blocked on a syscall then the
899 * normal syscall_exit path is used. All syscalls for traced
900 * proceses exit via intr_restore.
901 *
902 * XXX If any syscalls that change a processes space id ever exit
903 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
904 * adjust IASQ[0..1].
905 *
1da177e4
LT
906 */
907
908 .align 4096
909
910 .export syscall_exit_rfi
911syscall_exit_rfi:
912 mfctl %cr30,%r16
913 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
914 ldo TASK_REGS(%r16),%r16
915 /* Force iaoq to userspace, as the user has had access to our current
916 * context via sigcontext. Also Filter the PSW for the same reason.
917 */
918 LDREG PT_IAOQ0(%r16),%r19
919 depi 3,31,2,%r19
920 STREG %r19,PT_IAOQ0(%r16)
921 LDREG PT_IAOQ1(%r16),%r19
922 depi 3,31,2,%r19
923 STREG %r19,PT_IAOQ1(%r16)
924 LDREG PT_PSW(%r16),%r19
925 load32 USER_PSW_MASK,%r1
413059f2 926#ifdef CONFIG_64BIT
1da177e4
LT
927 load32 USER_PSW_HI_MASK,%r20
928 depd %r20,31,32,%r1
929#endif
930 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
931 load32 USER_PSW,%r1
932 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
933 STREG %r19,PT_PSW(%r16)
934
935 /*
936 * If we aren't being traced, we never saved space registers
937 * (we don't store them in the sigcontext), so set them
938 * to "proper" values now (otherwise we'll wind up restoring
939 * whatever was last stored in the task structure, which might
940 * be inconsistent if an interrupt occured while on the gateway
4b3f686d
ML
941 * page). Note that we may be "trashing" values the user put in
942 * them, but we don't support the user changing them.
1da177e4
LT
943 */
944
945 STREG %r0,PT_SR2(%r16)
946 mfsp %sr3,%r19
947 STREG %r19,PT_SR0(%r16)
948 STREG %r19,PT_SR1(%r16)
949 STREG %r19,PT_SR3(%r16)
950 STREG %r19,PT_SR4(%r16)
951 STREG %r19,PT_SR5(%r16)
952 STREG %r19,PT_SR6(%r16)
953 STREG %r19,PT_SR7(%r16)
954
955intr_return:
956 /* NOTE: Need to enable interrupts incase we schedule. */
957 ssm PSW_SM_I, %r0
958
959 /* Check for software interrupts */
960
961 .import irq_stat,data
962
963 load32 irq_stat,%r19
964#ifdef CONFIG_SMP
965 mfctl %cr30,%r1
966 ldw TI_CPU(%r1),%r1 /* get cpu # - int */
967 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
968 ** irq_stat[] is defined using ____cacheline_aligned.
969 */
3d73cf5e 970 SHLREG %r1,L1_CACHE_SHIFT,%r20
1da177e4
LT
971 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
972#endif /* CONFIG_SMP */
973
1da177e4
LT
974intr_check_resched:
975
976 /* check for reschedule */
977 mfctl %cr30,%r1
978 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
979 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
980
4650f0a5 981 .import do_notify_resume,code
1da177e4
LT
982intr_check_sig:
983 /* As above */
984 mfctl %cr30,%r1
4650f0a5 985 LDREG TI_FLAGS(%r1),%r19
3fe4c55e 986 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20
4650f0a5
KM
987 and,COND(<>) %r19, %r20, %r0
988 b,n intr_restore /* skip past if we've nothing to do */
989
990 /* This check is critical to having LWS
991 * working. The IASQ is zero on the gateway
992 * page and we cannot deliver any signals until
993 * we get off the gateway page.
994 *
995 * Only do signals if we are returning to user space
996 */
997 LDREG PT_IASQ0(%r16), %r20
3fe4c55e 998 CMPIB=,n 0,%r20,intr_restore /* backward */
4650f0a5 999 LDREG PT_IASQ1(%r16), %r20
3fe4c55e 1000 CMPIB=,n 0,%r20,intr_restore /* backward */
4650f0a5
KM
1001
1002 copy %r0, %r25 /* long in_syscall = 0 */
1003#ifdef CONFIG_64BIT
1004 ldo -16(%r30),%r29 /* Reference param save area */
1005#endif
1006
1007 BL do_notify_resume,%r2
1008 copy %r16, %r26 /* struct pt_regs *regs */
1009
3fe4c55e 1010 b,n intr_check_sig
1da177e4
LT
1011
1012intr_restore:
1013 copy %r16,%r29
1014 ldo PT_FR31(%r29),%r1
1015 rest_fp %r1
1016 rest_general %r29
1017
896a3756
GG
1018 /* inverse of virt_map */
1019 pcxt_ssm_bug
1020 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
1da177e4 1021 tophys_r1 %r29
1da177e4
LT
1022
1023 /* Restore space id's and special cr's from PT_REGS
896a3756
GG
1024 * structure pointed to by r29
1025 */
1da177e4
LT
1026 rest_specials %r29
1027
896a3756
GG
1028 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
1029 * It also restores r1 and r30.
1030 */
1da177e4
LT
1031 rest_stack
1032
1033 rfi
1034 nop
1035 nop
1036 nop
1037 nop
1038 nop
1039 nop
1040 nop
1041 nop
1042
50a34dbd
KM
1043#ifndef CONFIG_PREEMPT
1044# define intr_do_preempt intr_restore
1045#endif /* !CONFIG_PREEMPT */
1046
1da177e4
LT
1047 .import schedule,code
1048intr_do_resched:
50a34dbd
KM
1049 /* Only call schedule on return to userspace. If we're returning
1050 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
1051 * we jump back to intr_restore.
1052 */
1da177e4 1053 LDREG PT_IASQ0(%r16), %r20
50a34dbd 1054 CMPIB= 0, %r20, intr_do_preempt
1da177e4
LT
1055 nop
1056 LDREG PT_IASQ1(%r16), %r20
50a34dbd 1057 CMPIB= 0, %r20, intr_do_preempt
1da177e4
LT
1058 nop
1059
413059f2 1060#ifdef CONFIG_64BIT
1da177e4
LT
1061 ldo -16(%r30),%r29 /* Reference param save area */
1062#endif
1063
1064 ldil L%intr_check_sig, %r2
99ac7947 1065#ifndef CONFIG_64BIT
1da177e4 1066 b schedule
99ac7947
RC
1067#else
1068 load32 schedule, %r20
1069 bv %r0(%r20)
1070#endif
1da177e4
LT
1071 ldo R%intr_check_sig(%r2), %r2
1072
50a34dbd
KM
1073 /* preempt the current task on returning to kernel
1074 * mode from an interrupt, iff need_resched is set,
1075 * and preempt_count is 0. otherwise, we continue on
1076 * our merry way back to the current running task.
1077 */
1078#ifdef CONFIG_PREEMPT
1079 .import preempt_schedule_irq,code
1080intr_do_preempt:
1081 rsm PSW_SM_I, %r0 /* disable interrupts */
1082
1083 /* current_thread_info()->preempt_count */
1084 mfctl %cr30, %r1
1085 LDREG TI_PRE_COUNT(%r1), %r19
1086 CMPIB<> 0, %r19, intr_restore /* if preempt_count > 0 */
1087 nop /* prev insn branched backwards */
1088
1089 /* check if we interrupted a critical path */
1090 LDREG PT_PSW(%r16), %r20
1091 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
1092 nop
1093
1094 BL preempt_schedule_irq, %r2
1095 nop
1096
9c2c5457 1097 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
50a34dbd 1098#endif /* CONFIG_PREEMPT */
1da177e4 1099
1da177e4
LT
1100 /*
1101 * External interrupts.
1102 */
1103
1104intr_extint:
1105 CMPIB=,n 0,%r16,1f
1106 get_stack_use_cr30
1107 b,n 3f
1108
11091:
1110#if 0 /* Interrupt Stack support not working yet! */
1111 mfctl %cr31,%r1
1112 copy %r30,%r17
1113 /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
df47b438 1114 DEPI 0,31,15,%r17
1da177e4
LT
1115 CMPB=,n %r1,%r17,2f
1116 get_stack_use_cr31
1117 b,n 3f
1118#endif
11192:
1120 get_stack_use_r30
1121
11223:
1123 save_specials %r29
1124 virt_map
1125 save_general %r29
1126
1127 ldo PT_FR0(%r29), %r24
1128 save_fp %r24
1129
1130 loadgp
1131
1132 copy %r29, %r26 /* arg0 is pt_regs */
1133 copy %r29, %r16 /* save pt_regs */
1134
1135 ldil L%intr_return, %r2
1136
413059f2 1137#ifdef CONFIG_64BIT
1da177e4
LT
1138 ldo -16(%r30),%r29 /* Reference param save area */
1139#endif
1140
1141 b do_cpu_irq_mask
1142 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1143
1144
1145 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1146
1147 .export intr_save, code /* for os_hpmc */
1148
1149intr_save:
1150 mfsp %sr7,%r16
1151 CMPIB=,n 0,%r16,1f
1152 get_stack_use_cr30
1153 b 2f
1154 copy %r8,%r26
1155
11561:
1157 get_stack_use_r30
1158 copy %r8,%r26
1159
11602:
1161 save_specials %r29
1162
1163 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1164
1165 /*
1166 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1167 * traps.c.
1168 * 2) Once we start executing code above 4 Gb, we need
1169 * to adjust iasq/iaoq here in the same way we
1170 * adjust isr/ior below.
1171 */
1172
1173 CMPIB=,n 6,%r26,skip_save_ior
1174
1da177e4
LT
1175
1176 mfctl %cr20, %r16 /* isr */
896a3756 1177 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1da177e4
LT
1178 mfctl %cr21, %r17 /* ior */
1179
896a3756 1180
413059f2 1181#ifdef CONFIG_64BIT
1da177e4
LT
1182 /*
1183 * If the interrupted code was running with W bit off (32 bit),
1184 * clear the b bits (bits 0 & 1) in the ior.
896a3756 1185 * save_specials left ipsw value in r8 for us to test.
1da177e4
LT
1186 */
1187 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1188 depdi 0,1,2,%r17
1189
1190 /*
1191 * FIXME: This code has hardwired assumptions about the split
1192 * between space bits and offset bits. This will change
1193 * when we allow alternate page sizes.
1194 */
1195
1196 /* adjust isr/ior. */
2fd83038
HD
1197 extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
1198 depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
1199 depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
1da177e4
LT
1200#endif
1201 STREG %r16, PT_ISR(%r29)
1202 STREG %r17, PT_IOR(%r29)
1203
1204
1205skip_save_ior:
1206 virt_map
1207 save_general %r29
1208
1209 ldo PT_FR0(%r29), %r25
1210 save_fp %r25
1211
1212 loadgp
1213
1214 copy %r29, %r25 /* arg1 is pt_regs */
413059f2 1215#ifdef CONFIG_64BIT
1da177e4
LT
1216 ldo -16(%r30),%r29 /* Reference param save area */
1217#endif
1218
1219 ldil L%intr_check_sig, %r2
1220 copy %r25, %r16 /* save pt_regs */
1221
1222 b handle_interruption
1223 ldo R%intr_check_sig(%r2), %r2
1224
1225
1226 /*
1227 * Note for all tlb miss handlers:
1228 *
1229 * cr24 contains a pointer to the kernel address space
1230 * page directory.
1231 *
1232 * cr25 contains a pointer to the current user address
1233 * space page directory.
1234 *
1235 * sr3 will contain the space id of the user address space
1236 * of the current running thread while that thread is
1237 * running in the kernel.
1238 */
1239
1240 /*
1241 * register number allocations. Note that these are all
1242 * in the shadowed registers
1243 */
1244
1245 t0 = r1 /* temporary register 0 */
1246 va = r8 /* virtual address for which the trap occured */
1247 t1 = r9 /* temporary register 1 */
1248 pte = r16 /* pte/phys page # */
1249 prot = r17 /* prot bits */
1250 spc = r24 /* space for which the trap occured */
1251 ptp = r25 /* page directory/page table pointer */
1252
413059f2 1253#ifdef CONFIG_64BIT
1da177e4
LT
1254
1255dtlb_miss_20w:
1256 space_adjust spc,va,t0
1257 get_pgd spc,ptp
1258 space_check spc,t0,dtlb_fault
1259
1260 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1261
1262 update_ptep ptp,pte,t0,t1
1263
1264 make_insert_tlb spc,pte,prot
1265
1266 idtlbt pte,prot
1267
1268 rfir
1269 nop
1270
1271dtlb_check_alias_20w:
1272 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1273
1274 idtlbt pte,prot
1275
1276 rfir
1277 nop
1278
1279nadtlb_miss_20w:
1280 space_adjust spc,va,t0
1281 get_pgd spc,ptp
1282 space_check spc,t0,nadtlb_fault
1283
1284 L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w
1285
1286 update_ptep ptp,pte,t0,t1
1287
1288 make_insert_tlb spc,pte,prot
1289
1290 idtlbt pte,prot
1291
1292 rfir
1293 nop
1294
1295nadtlb_check_flush_20w:
1296 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1297
1298 /* Insert a "flush only" translation */
1299
1300 depdi,z 7,7,3,prot
1301 depdi 1,10,1,prot
1302
1303 /* Get rid of prot bits and convert to page addr for idtlbt */
1304
1305 depdi 0,63,12,pte
1306 extrd,u pte,56,52,pte
1307 idtlbt pte,prot
1308
1309 rfir
1310 nop
1311
1312#else
1313
1314dtlb_miss_11:
1315 get_pgd spc,ptp
1316
1317 space_check spc,t0,dtlb_fault
1318
1319 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1320
1321 update_ptep ptp,pte,t0,t1
1322
1323 make_insert_tlb_11 spc,pte,prot
1324
1325 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1326 mtsp spc,%sr1
1327
1328 idtlba pte,(%sr1,va)
1329 idtlbp prot,(%sr1,va)
1330
1331 mtsp t0, %sr1 /* Restore sr1 */
1332
1333 rfir
1334 nop
1335
1336dtlb_check_alias_11:
1337
1338 /* Check to see if fault is in the temporary alias region */
1339
1340 cmpib,<>,n 0,spc,dtlb_fault /* forward */
1341 ldil L%(TMPALIAS_MAP_START),t0
1342 copy va,t1
1343 depwi 0,31,23,t1
1344 cmpb,<>,n t0,t1,dtlb_fault /* forward */
1345 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1346 depw,z prot,8,7,prot
1347
1348 /*
1349 * OK, it is in the temp alias region, check whether "from" or "to".
1350 * Check "subtle" note in pacache.S re: r23/r26.
1351 */
1352
1353 extrw,u,= va,9,1,r0
1354 or,tr %r23,%r0,pte /* If "from" use "from" page */
1355 or %r26,%r0,pte /* else "to", use "to" page */
1356
1357 idtlba pte,(va)
1358 idtlbp prot,(va)
1359
1360 rfir
1361 nop
1362
1363nadtlb_miss_11:
1364 get_pgd spc,ptp
1365
1366 space_check spc,t0,nadtlb_fault
1367
1368 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11
1369
1370 update_ptep ptp,pte,t0,t1
1371
1372 make_insert_tlb_11 spc,pte,prot
1373
1374
1375 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1376 mtsp spc,%sr1
1377
1378 idtlba pte,(%sr1,va)
1379 idtlbp prot,(%sr1,va)
1380
1381 mtsp t0, %sr1 /* Restore sr1 */
1382
1383 rfir
1384 nop
1385
1386nadtlb_check_flush_11:
1387 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1388
1389 /* Insert a "flush only" translation */
1390
1391 zdepi 7,7,3,prot
1392 depi 1,10,1,prot
1393
1394 /* Get rid of prot bits and convert to page addr for idtlba */
1395
1396 depi 0,31,12,pte
1397 extru pte,24,25,pte
1398
1399 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1400 mtsp spc,%sr1
1401
1402 idtlba pte,(%sr1,va)
1403 idtlbp prot,(%sr1,va)
1404
1405 mtsp t0, %sr1 /* Restore sr1 */
1406
1407 rfir
1408 nop
1409
1410dtlb_miss_20:
1411 space_adjust spc,va,t0
1412 get_pgd spc,ptp
1413 space_check spc,t0,dtlb_fault
1414
1415 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1416
1417 update_ptep ptp,pte,t0,t1
1418
1419 make_insert_tlb spc,pte,prot
1420
1421 f_extend pte,t0
1422
1423 idtlbt pte,prot
1424
1425 rfir
1426 nop
1427
1428dtlb_check_alias_20:
1429 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1430
1431 idtlbt pte,prot
1432
1433 rfir
1434 nop
1435
1436nadtlb_miss_20:
1437 get_pgd spc,ptp
1438
1439 space_check spc,t0,nadtlb_fault
1440
1441 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20
1442
1443 update_ptep ptp,pte,t0,t1
1444
1445 make_insert_tlb spc,pte,prot
1446
1447 f_extend pte,t0
1448
1449 idtlbt pte,prot
1450
1451 rfir
1452 nop
1453
1454nadtlb_check_flush_20:
1455 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1456
1457 /* Insert a "flush only" translation */
1458
1459 depdi,z 7,7,3,prot
1460 depdi 1,10,1,prot
1461
1462 /* Get rid of prot bits and convert to page addr for idtlbt */
1463
1464 depdi 0,63,12,pte
1465 extrd,u pte,56,32,pte
1466 idtlbt pte,prot
1467
1468 rfir
1469 nop
1470#endif
1471
1472nadtlb_emulate:
1473
1474 /*
1475 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1476 * probei instructions. We don't want to fault for these
1477 * instructions (not only does it not make sense, it can cause
1478 * deadlocks, since some flushes are done with the mmap
1479 * semaphore held). If the translation doesn't exist, we can't
1480 * insert a translation, so have to emulate the side effects
1481 * of the instruction. Since we don't insert a translation
1482 * we can get a lot of faults during a flush loop, so it makes
1483 * sense to try to do it here with minimum overhead. We only
1484 * emulate fdc,fic,pdc,probew,prober instructions whose base
1485 * and index registers are not shadowed. We defer everything
1486 * else to the "slow" path.
1487 */
1488
1489 mfctl %cr19,%r9 /* Get iir */
1490
1491 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1492 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1493
1494 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1495 ldi 0x280,%r16
1496 and %r9,%r16,%r17
1497 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1498 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1499 BL get_register,%r25
1500 extrw,u %r9,15,5,%r8 /* Get index register # */
1501 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1502 copy %r1,%r24
1503 BL get_register,%r25
1504 extrw,u %r9,10,5,%r8 /* Get base register # */
1505 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1506 BL set_register,%r25
1507 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1508
1509nadtlb_nullify:
896a3756 1510 mfctl %ipsw,%r8
1da177e4
LT
1511 ldil L%PSW_N,%r9
1512 or %r8,%r9,%r8 /* Set PSW_N */
896a3756 1513 mtctl %r8,%ipsw
1da177e4
LT
1514
1515 rfir
1516 nop
1517
1518 /*
1519 When there is no translation for the probe address then we
1520 must nullify the insn and return zero in the target regsiter.
1521 This will indicate to the calling code that it does not have
1522 write/read privileges to this address.
1523
1524 This should technically work for prober and probew in PA 1.1,
1525 and also probe,r and probe,w in PA 2.0
1526
1527 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1528 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1529
1530 */
1531nadtlb_probe_check:
1532 ldi 0x80,%r16
1533 and %r9,%r16,%r17
1534 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1535 BL get_register,%r25 /* Find the target register */
1536 extrw,u %r9,31,5,%r8 /* Get target register */
1537 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1538 BL set_register,%r25
1539 copy %r0,%r1 /* Write zero to target register */
1540 b nadtlb_nullify /* Nullify return insn */
1541 nop
1542
1543
413059f2 1544#ifdef CONFIG_64BIT
1da177e4
LT
1545itlb_miss_20w:
1546
1547 /*
1548 * I miss is a little different, since we allow users to fault
1549 * on the gateway page which is in the kernel address space.
1550 */
1551
1552 space_adjust spc,va,t0
1553 get_pgd spc,ptp
1554 space_check spc,t0,itlb_fault
1555
1556 L3_ptep ptp,pte,t0,va,itlb_fault
1557
1558 update_ptep ptp,pte,t0,t1
1559
1560 make_insert_tlb spc,pte,prot
1561
1562 iitlbt pte,prot
1563
1564 rfir
1565 nop
1566
1567#else
1568
1569itlb_miss_11:
1570 get_pgd spc,ptp
1571
1572 space_check spc,t0,itlb_fault
1573
1574 L2_ptep ptp,pte,t0,va,itlb_fault
1575
1576 update_ptep ptp,pte,t0,t1
1577
1578 make_insert_tlb_11 spc,pte,prot
1579
1580 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1581 mtsp spc,%sr1
1582
1583 iitlba pte,(%sr1,va)
1584 iitlbp prot,(%sr1,va)
1585
1586 mtsp t0, %sr1 /* Restore sr1 */
1587
1588 rfir
1589 nop
1590
1591itlb_miss_20:
1592 get_pgd spc,ptp
1593
1594 space_check spc,t0,itlb_fault
1595
1596 L2_ptep ptp,pte,t0,va,itlb_fault
1597
1598 update_ptep ptp,pte,t0,t1
1599
1600 make_insert_tlb spc,pte,prot
1601
1602 f_extend pte,t0
1603
1604 iitlbt pte,prot
1605
1606 rfir
1607 nop
1608
1609#endif
1610
413059f2 1611#ifdef CONFIG_64BIT
1da177e4
LT
1612
1613dbit_trap_20w:
1614 space_adjust spc,va,t0
1615 get_pgd spc,ptp
1616 space_check spc,t0,dbit_fault
1617
1618 L3_ptep ptp,pte,t0,va,dbit_fault
1619
1620#ifdef CONFIG_SMP
1621 CMPIB=,n 0,spc,dbit_nolock_20w
1622 load32 PA(pa_dbit_lock),t0
1623
1624dbit_spin_20w:
64f49532 1625 LDCW 0(t0),t1
1da177e4
LT
1626 cmpib,= 0,t1,dbit_spin_20w
1627 nop
1628
1629dbit_nolock_20w:
1630#endif
1631 update_dirty ptp,pte,t1
1632
1633 make_insert_tlb spc,pte,prot
1634
1635 idtlbt pte,prot
1636#ifdef CONFIG_SMP
1637 CMPIB=,n 0,spc,dbit_nounlock_20w
1638 ldi 1,t1
1639 stw t1,0(t0)
1640
1641dbit_nounlock_20w:
1642#endif
1643
1644 rfir
1645 nop
1646#else
1647
1648dbit_trap_11:
1649
1650 get_pgd spc,ptp
1651
1652 space_check spc,t0,dbit_fault
1653
1654 L2_ptep ptp,pte,t0,va,dbit_fault
1655
1656#ifdef CONFIG_SMP
1657 CMPIB=,n 0,spc,dbit_nolock_11
1658 load32 PA(pa_dbit_lock),t0
1659
1660dbit_spin_11:
64f49532 1661 LDCW 0(t0),t1
1da177e4
LT
1662 cmpib,= 0,t1,dbit_spin_11
1663 nop
1664
1665dbit_nolock_11:
1666#endif
1667 update_dirty ptp,pte,t1
1668
1669 make_insert_tlb_11 spc,pte,prot
1670
1671 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1672 mtsp spc,%sr1
1673
1674 idtlba pte,(%sr1,va)
1675 idtlbp prot,(%sr1,va)
1676
1677 mtsp t1, %sr1 /* Restore sr1 */
1678#ifdef CONFIG_SMP
1679 CMPIB=,n 0,spc,dbit_nounlock_11
1680 ldi 1,t1
1681 stw t1,0(t0)
1682
1683dbit_nounlock_11:
1684#endif
1685
1686 rfir
1687 nop
1688
1689dbit_trap_20:
1690 get_pgd spc,ptp
1691
1692 space_check spc,t0,dbit_fault
1693
1694 L2_ptep ptp,pte,t0,va,dbit_fault
1695
1696#ifdef CONFIG_SMP
1697 CMPIB=,n 0,spc,dbit_nolock_20
1698 load32 PA(pa_dbit_lock),t0
1699
1700dbit_spin_20:
64f49532 1701 LDCW 0(t0),t1
1da177e4
LT
1702 cmpib,= 0,t1,dbit_spin_20
1703 nop
1704
1705dbit_nolock_20:
1706#endif
1707 update_dirty ptp,pte,t1
1708
1709 make_insert_tlb spc,pte,prot
1710
1711 f_extend pte,t1
1712
1713 idtlbt pte,prot
1714
1715#ifdef CONFIG_SMP
1716 CMPIB=,n 0,spc,dbit_nounlock_20
1717 ldi 1,t1
1718 stw t1,0(t0)
1719
1720dbit_nounlock_20:
1721#endif
1722
1723 rfir
1724 nop
1725#endif
1726
1727 .import handle_interruption,code
1728
1729kernel_bad_space:
1730 b intr_save
1731 ldi 31,%r8 /* Use an unused code */
1732
1733dbit_fault:
1734 b intr_save
1735 ldi 20,%r8
1736
1737itlb_fault:
1738 b intr_save
1739 ldi 6,%r8
1740
1741nadtlb_fault:
1742 b intr_save
1743 ldi 17,%r8
1744
1745dtlb_fault:
1746 b intr_save
1747 ldi 15,%r8
1748
1749 /* Register saving semantics for system calls:
1750
1751 %r1 clobbered by system call macro in userspace
1752 %r2 saved in PT_REGS by gateway page
1753 %r3 - %r18 preserved by C code (saved by signal code)
1754 %r19 - %r20 saved in PT_REGS by gateway page
1755 %r21 - %r22 non-standard syscall args
1756 stored in kernel stack by gateway page
1757 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1758 %r27 - %r30 saved in PT_REGS by gateway page
1759 %r31 syscall return pointer
1760 */
1761
1762 /* Floating point registers (FIXME: what do we do with these?)
1763
1764 %fr0 - %fr3 status/exception, not preserved
1765 %fr4 - %fr7 arguments
1766 %fr8 - %fr11 not preserved by C code
1767 %fr12 - %fr21 preserved by C code
1768 %fr22 - %fr31 not preserved by C code
1769 */
1770
1771 .macro reg_save regs
1772 STREG %r3, PT_GR3(\regs)
1773 STREG %r4, PT_GR4(\regs)
1774 STREG %r5, PT_GR5(\regs)
1775 STREG %r6, PT_GR6(\regs)
1776 STREG %r7, PT_GR7(\regs)
1777 STREG %r8, PT_GR8(\regs)
1778 STREG %r9, PT_GR9(\regs)
1779 STREG %r10,PT_GR10(\regs)
1780 STREG %r11,PT_GR11(\regs)
1781 STREG %r12,PT_GR12(\regs)
1782 STREG %r13,PT_GR13(\regs)
1783 STREG %r14,PT_GR14(\regs)
1784 STREG %r15,PT_GR15(\regs)
1785 STREG %r16,PT_GR16(\regs)
1786 STREG %r17,PT_GR17(\regs)
1787 STREG %r18,PT_GR18(\regs)
1788 .endm
1789
1790 .macro reg_restore regs
1791 LDREG PT_GR3(\regs), %r3
1792 LDREG PT_GR4(\regs), %r4
1793 LDREG PT_GR5(\regs), %r5
1794 LDREG PT_GR6(\regs), %r6
1795 LDREG PT_GR7(\regs), %r7
1796 LDREG PT_GR8(\regs), %r8
1797 LDREG PT_GR9(\regs), %r9
1798 LDREG PT_GR10(\regs),%r10
1799 LDREG PT_GR11(\regs),%r11
1800 LDREG PT_GR12(\regs),%r12
1801 LDREG PT_GR13(\regs),%r13
1802 LDREG PT_GR14(\regs),%r14
1803 LDREG PT_GR15(\regs),%r15
1804 LDREG PT_GR16(\regs),%r16
1805 LDREG PT_GR17(\regs),%r17
1806 LDREG PT_GR18(\regs),%r18
1807 .endm
1808
1809 .export sys_fork_wrapper
1810 .export child_return
1811sys_fork_wrapper:
1812 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1813 ldo TASK_REGS(%r1),%r1
1814 reg_save %r1
1815 mfctl %cr27, %r3
1816 STREG %r3, PT_CR27(%r1)
1817
1818 STREG %r2,-RP_OFFSET(%r30)
1819 ldo FRAME_SIZE(%r30),%r30
413059f2 1820#ifdef CONFIG_64BIT
1da177e4
LT
1821 ldo -16(%r30),%r29 /* Reference param save area */
1822#endif
1823
1824 /* These are call-clobbered registers and therefore
1825 also syscall-clobbered (we hope). */
1826 STREG %r2,PT_GR19(%r1) /* save for child */
1827 STREG %r30,PT_GR21(%r1)
1828
1829 LDREG PT_GR30(%r1),%r25
1830 copy %r1,%r24
1831 BL sys_clone,%r2
1832 ldi SIGCHLD,%r26
1833
1834 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1835wrapper_exit:
1836 ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
1837 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1838 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1839
1840 LDREG PT_CR27(%r1), %r3
1841 mtctl %r3, %cr27
1842 reg_restore %r1
1843
1844 /* strace expects syscall # to be preserved in r20 */
1845 ldi __NR_fork,%r20
1846 bv %r0(%r2)
1847 STREG %r20,PT_GR20(%r1)
1848
1849 /* Set the return value for the child */
1850child_return:
1851 BL schedule_tail, %r2
1852 nop
1853
1854 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1855 LDREG TASK_PT_GR19(%r1),%r2
1856 b wrapper_exit
1857 copy %r0,%r28
1858
1859
1860 .export sys_clone_wrapper
1861sys_clone_wrapper:
1862 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1863 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1864 reg_save %r1
1865 mfctl %cr27, %r3
1866 STREG %r3, PT_CR27(%r1)
1867
1868 STREG %r2,-RP_OFFSET(%r30)
1869 ldo FRAME_SIZE(%r30),%r30
413059f2 1870#ifdef CONFIG_64BIT
1da177e4
LT
1871 ldo -16(%r30),%r29 /* Reference param save area */
1872#endif
1873
aa0eecb0 1874 /* WARNING - Clobbers r19 and r21, userspace must save these! */
1da177e4
LT
1875 STREG %r2,PT_GR19(%r1) /* save for child */
1876 STREG %r30,PT_GR21(%r1)
1877 BL sys_clone,%r2
1878 copy %r1,%r24
1879
1880 b wrapper_exit
1881 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1882
1883 .export sys_vfork_wrapper
1884sys_vfork_wrapper:
1885 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1886 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1887 reg_save %r1
1888 mfctl %cr27, %r3
1889 STREG %r3, PT_CR27(%r1)
1890
1891 STREG %r2,-RP_OFFSET(%r30)
1892 ldo FRAME_SIZE(%r30),%r30
413059f2 1893#ifdef CONFIG_64BIT
1da177e4
LT
1894 ldo -16(%r30),%r29 /* Reference param save area */
1895#endif
1896
1897 STREG %r2,PT_GR19(%r1) /* save for child */
1898 STREG %r30,PT_GR21(%r1)
1899
1900 BL sys_vfork,%r2
1901 copy %r1,%r26
1902
1903 b wrapper_exit
1904 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1905
1906
1907 .macro execve_wrapper execve
1908 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1909 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1910
1911 /*
1912 * Do we need to save/restore r3-r18 here?
1913 * I don't think so. why would new thread need old
1914 * threads registers?
1915 */
1916
1917 /* %arg0 - %arg3 are already saved for us. */
1918
1919 STREG %r2,-RP_OFFSET(%r30)
1920 ldo FRAME_SIZE(%r30),%r30
413059f2 1921#ifdef CONFIG_64BIT
1da177e4
LT
1922 ldo -16(%r30),%r29 /* Reference param save area */
1923#endif
99ac7947 1924 BL \execve,%r2
1da177e4
LT
1925 copy %r1,%arg0
1926
1927 ldo -FRAME_SIZE(%r30),%r30
1928 LDREG -RP_OFFSET(%r30),%r2
1929
1930 /* If exec succeeded we need to load the args */
1931
1932 ldo -1024(%r0),%r1
1933 cmpb,>>= %r28,%r1,error_\execve
1934 copy %r2,%r19
1935
1936error_\execve:
1937 bv %r0(%r19)
1938 nop
1939 .endm
1940
1941 .export sys_execve_wrapper
1942 .import sys_execve
1943
1944sys_execve_wrapper:
1945 execve_wrapper sys_execve
1946
413059f2 1947#ifdef CONFIG_64BIT
1da177e4
LT
1948 .export sys32_execve_wrapper
1949 .import sys32_execve
1950
1951sys32_execve_wrapper:
1952 execve_wrapper sys32_execve
1953#endif
1954
1955 .export sys_rt_sigreturn_wrapper
1956sys_rt_sigreturn_wrapper:
1957 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1958 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1959 /* Don't save regs, we are going to restore them from sigcontext. */
1960 STREG %r2, -RP_OFFSET(%r30)
413059f2 1961#ifdef CONFIG_64BIT
1da177e4
LT
1962 ldo FRAME_SIZE(%r30), %r30
1963 BL sys_rt_sigreturn,%r2
1964 ldo -16(%r30),%r29 /* Reference param save area */
1965#else
1966 BL sys_rt_sigreturn,%r2
1967 ldo FRAME_SIZE(%r30), %r30
1968#endif
1969
1970 ldo -FRAME_SIZE(%r30), %r30
1971 LDREG -RP_OFFSET(%r30), %r2
1972
1973 /* FIXME: I think we need to restore a few more things here. */
1974 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1975 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1976 reg_restore %r1
1977
1978 /* If the signal was received while the process was blocked on a
1979 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1980 * take us to syscall_exit_rfi and on to intr_return.
1981 */
1982 bv %r0(%r2)
1983 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1984
1985 .export sys_sigaltstack_wrapper
1986sys_sigaltstack_wrapper:
1987 /* Get the user stack pointer */
1988 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1989 ldo TASK_REGS(%r1),%r24 /* get pt regs */
1990 LDREG TASK_PT_GR30(%r24),%r24
1991 STREG %r2, -RP_OFFSET(%r30)
413059f2 1992#ifdef CONFIG_64BIT
1da177e4 1993 ldo FRAME_SIZE(%r30), %r30
df47b438 1994 BL do_sigaltstack,%r2
1da177e4
LT
1995 ldo -16(%r30),%r29 /* Reference param save area */
1996#else
df47b438 1997 BL do_sigaltstack,%r2
1da177e4
LT
1998 ldo FRAME_SIZE(%r30), %r30
1999#endif
2000
2001 ldo -FRAME_SIZE(%r30), %r30
2002 LDREG -RP_OFFSET(%r30), %r2
2003 bv %r0(%r2)
2004 nop
2005
413059f2 2006#ifdef CONFIG_64BIT
1da177e4
LT
2007 .export sys32_sigaltstack_wrapper
2008sys32_sigaltstack_wrapper:
2009 /* Get the user stack pointer */
2010 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
2011 LDREG TASK_PT_GR30(%r24),%r24
2012 STREG %r2, -RP_OFFSET(%r30)
2013 ldo FRAME_SIZE(%r30), %r30
df47b438 2014 BL do_sigaltstack32,%r2
1da177e4
LT
2015 ldo -16(%r30),%r29 /* Reference param save area */
2016
2017 ldo -FRAME_SIZE(%r30), %r30
2018 LDREG -RP_OFFSET(%r30), %r2
2019 bv %r0(%r2)
2020 nop
2021#endif
2022
1da177e4
LT
2023 .export syscall_exit
2024syscall_exit:
2025
2026 /* NOTE: HP-UX syscalls also come through here
2027 * after hpux_syscall_exit fixes up return
2028 * values. */
2029
2030 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
2031 * via syscall_exit_rfi if the signal was received while the process
2032 * was running.
2033 */
2034
2035 /* save return value now */
2036
2037 mfctl %cr30, %r1
2038 LDREG TI_TASK(%r1),%r1
2039 STREG %r28,TASK_PT_GR28(%r1)
2040
2041#ifdef CONFIG_HPUX
2042
2043/* <linux/personality.h> cannot be easily included */
2044#define PER_HPUX 0x10
2045 LDREG TASK_PERSONALITY(%r1),%r19
2046
2047 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
2048 ldo -PER_HPUX(%r19), %r19
2049 CMPIB<>,n 0,%r19,1f
2050
2051 /* Save other hpux returns if personality is PER_HPUX */
2052 STREG %r22,TASK_PT_GR22(%r1)
2053 STREG %r29,TASK_PT_GR29(%r1)
20541:
2055
2056#endif /* CONFIG_HPUX */
2057
2058 /* Seems to me that dp could be wrong here, if the syscall involved
2059 * calling a module, and nothing got round to restoring dp on return.
2060 */
2061 loadgp
2062
2063syscall_check_bh:
2064
2065 /* Check for software interrupts */
2066
2067 .import irq_stat,data
2068
2069 load32 irq_stat,%r19
2070
2071#ifdef CONFIG_SMP
2072 /* sched.h: int processor */
2073 /* %r26 is used as scratch register to index into irq_stat[] */
2074 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
2075
2076 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
3d73cf5e 2077 SHLREG %r26,L1_CACHE_SHIFT,%r20
1da177e4
LT
2078 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
2079#endif /* CONFIG_SMP */
2080
1da177e4
LT
2081syscall_check_resched:
2082
2083 /* check for reschedule */
2084
2085 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
2086 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2087
4650f0a5 2088 .import do_signal,code
1da177e4 2089syscall_check_sig:
4650f0a5 2090 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
3fe4c55e 2091 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
4650f0a5
KM
2092 and,COND(<>) %r19, %r26, %r0
2093 b,n syscall_restore /* skip past if we've nothing to do */
2094
2095syscall_do_signal:
2096 /* Save callee-save registers (for sigcontext).
2097 * FIXME: After this point the process structure should be
2098 * consistent with all the relevant state of the process
2099 * before the syscall. We need to verify this.
2100 */
2101 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2102 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
2103 reg_save %r26
2104
2105#ifdef CONFIG_64BIT
2106 ldo -16(%r30),%r29 /* Reference param save area */
2107#endif
2108
2109 BL do_notify_resume,%r2
2110 ldi 1, %r25 /* long in_syscall = 1 */
2111
2112 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2113 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
2114 reg_restore %r20
2115
2116 b,n syscall_check_sig
1da177e4
LT
2117
2118syscall_restore:
2119 /* Are we being ptraced? */
2120 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2121
2122 LDREG TASK_PTRACE(%r1), %r19
2123 bb,< %r19,31,syscall_restore_rfi
2124 nop
2125
2126 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
2127 rest_fp %r19
2128
2129 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
2130 mtsar %r19
2131
2132 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
2133 LDREG TASK_PT_GR19(%r1),%r19
2134 LDREG TASK_PT_GR20(%r1),%r20
2135 LDREG TASK_PT_GR21(%r1),%r21
2136 LDREG TASK_PT_GR22(%r1),%r22
2137 LDREG TASK_PT_GR23(%r1),%r23
2138 LDREG TASK_PT_GR24(%r1),%r24
2139 LDREG TASK_PT_GR25(%r1),%r25
2140 LDREG TASK_PT_GR26(%r1),%r26
2141 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
2142 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
2143 LDREG TASK_PT_GR29(%r1),%r29
2144 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
2145
2146 /* NOTE: We use rsm/ssm pair to make this operation atomic */
2147 rsm PSW_SM_I, %r0
2148 LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */
2149 mfsp %sr3,%r1 /* Get users space id */
2150 mtsp %r1,%sr7 /* Restore sr7 */
2151 ssm PSW_SM_I, %r0
2152
2153 /* Set sr2 to zero for userspace syscalls to work. */
2154 mtsp %r0,%sr2
2155 mtsp %r1,%sr4 /* Restore sr4 */
2156 mtsp %r1,%sr5 /* Restore sr5 */
2157 mtsp %r1,%sr6 /* Restore sr6 */
2158
2159 depi 3,31,2,%r31 /* ensure return to user mode. */
2160
413059f2 2161#ifdef CONFIG_64BIT
1da177e4
LT
2162 /* decide whether to reset the wide mode bit
2163 *
2164 * For a syscall, the W bit is stored in the lowest bit
2165 * of sp. Extract it and reset W if it is zero */
2166 extrd,u,*<> %r30,63,1,%r1
2167 rsm PSW_SM_W, %r0
2168 /* now reset the lowest bit of sp if it was set */
2169 xor %r30,%r1,%r30
2170#endif
2171 be,n 0(%sr3,%r31) /* return to user space */
2172
2173 /* We have to return via an RFI, so that PSW T and R bits can be set
2174 * appropriately.
2175 * This sets up pt_regs so we can return via intr_restore, which is not
2176 * the most efficient way of doing things, but it works.
2177 */
2178syscall_restore_rfi:
2179 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
2180 mtctl %r2,%cr0 /* for immediate trap */
2181 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
2182 ldi 0x0b,%r20 /* Create new PSW */
2183 depi -1,13,1,%r20 /* C, Q, D, and I bits */
2184
2185 /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
2186 * set in include/linux/ptrace.h and converted to PA bitmap
2187 * numbers in asm-offsets.c */
2188
2189 /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
2190 extru,= %r19,PA_SINGLESTEP_BIT,1,%r0
2191 depi -1,27,1,%r20 /* R bit */
2192
2193 /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
2194 extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
2195 depi -1,7,1,%r20 /* T bit */
2196
2197 STREG %r20,TASK_PT_PSW(%r1)
2198
2199 /* Always store space registers, since sr3 can be changed (e.g. fork) */
2200
2201 mfsp %sr3,%r25
2202 STREG %r25,TASK_PT_SR3(%r1)
2203 STREG %r25,TASK_PT_SR4(%r1)
2204 STREG %r25,TASK_PT_SR5(%r1)
2205 STREG %r25,TASK_PT_SR6(%r1)
2206 STREG %r25,TASK_PT_SR7(%r1)
2207 STREG %r25,TASK_PT_IASQ0(%r1)
2208 STREG %r25,TASK_PT_IASQ1(%r1)
2209
2210 /* XXX W bit??? */
2211 /* Now if old D bit is clear, it means we didn't save all registers
2212 * on syscall entry, so do that now. This only happens on TRACEME
2213 * calls, or if someone attached to us while we were on a syscall.
2214 * We could make this more efficient by not saving r3-r18, but
2215 * then we wouldn't be able to use the common intr_restore path.
2216 * It is only for traced processes anyway, so performance is not
2217 * an issue.
2218 */
2219 bb,< %r2,30,pt_regs_ok /* Branch if D set */
2220 ldo TASK_REGS(%r1),%r25
2221 reg_save %r25 /* Save r3 to r18 */
2222
2223 /* Save the current sr */
2224 mfsp %sr0,%r2
2225 STREG %r2,TASK_PT_SR0(%r1)
2226
2227 /* Save the scratch sr */
2228 mfsp %sr1,%r2
2229 STREG %r2,TASK_PT_SR1(%r1)
2230
2231 /* sr2 should be set to zero for userspace syscalls */
2232 STREG %r0,TASK_PT_SR2(%r1)
2233
2234pt_regs_ok:
2235 LDREG TASK_PT_GR31(%r1),%r2
2236 depi 3,31,2,%r2 /* ensure return to user mode. */
2237 STREG %r2,TASK_PT_IAOQ0(%r1)
2238 ldo 4(%r2),%r2
2239 STREG %r2,TASK_PT_IAOQ1(%r1)
2240 copy %r25,%r16
2241 b intr_restore
2242 nop
2243
1da177e4
LT
2244 .import schedule,code
2245syscall_do_resched:
2246 BL schedule,%r2
413059f2 2247#ifdef CONFIG_64BIT
1da177e4
LT
2248 ldo -16(%r30),%r29 /* Reference param save area */
2249#else
2250 nop
2251#endif
2252 b syscall_check_bh /* if resched, we start over again */
2253 nop
2254
1da177e4
LT
2255 /*
2256 * get_register is used by the non access tlb miss handlers to
2257 * copy the value of the general register specified in r8 into
2258 * r1. This routine can't be used for shadowed registers, since
2259 * the rfir will restore the original value. So, for the shadowed
2260 * registers we put a -1 into r1 to indicate that the register
2261 * should not be used (the register being copied could also have
2262 * a -1 in it, but that is OK, it just means that we will have
2263 * to use the slow path instead).
2264 */
2265
2266get_register:
2267 blr %r8,%r0
2268 nop
2269 bv %r0(%r25) /* r0 */
2270 copy %r0,%r1
2271 bv %r0(%r25) /* r1 - shadowed */
2272 ldi -1,%r1
2273 bv %r0(%r25) /* r2 */
2274 copy %r2,%r1
2275 bv %r0(%r25) /* r3 */
2276 copy %r3,%r1
2277 bv %r0(%r25) /* r4 */
2278 copy %r4,%r1
2279 bv %r0(%r25) /* r5 */
2280 copy %r5,%r1
2281 bv %r0(%r25) /* r6 */
2282 copy %r6,%r1
2283 bv %r0(%r25) /* r7 */
2284 copy %r7,%r1
2285 bv %r0(%r25) /* r8 - shadowed */
2286 ldi -1,%r1
2287 bv %r0(%r25) /* r9 - shadowed */
2288 ldi -1,%r1
2289 bv %r0(%r25) /* r10 */
2290 copy %r10,%r1
2291 bv %r0(%r25) /* r11 */
2292 copy %r11,%r1
2293 bv %r0(%r25) /* r12 */
2294 copy %r12,%r1
2295 bv %r0(%r25) /* r13 */
2296 copy %r13,%r1
2297 bv %r0(%r25) /* r14 */
2298 copy %r14,%r1
2299 bv %r0(%r25) /* r15 */
2300 copy %r15,%r1
2301 bv %r0(%r25) /* r16 - shadowed */
2302 ldi -1,%r1
2303 bv %r0(%r25) /* r17 - shadowed */
2304 ldi -1,%r1
2305 bv %r0(%r25) /* r18 */
2306 copy %r18,%r1
2307 bv %r0(%r25) /* r19 */
2308 copy %r19,%r1
2309 bv %r0(%r25) /* r20 */
2310 copy %r20,%r1
2311 bv %r0(%r25) /* r21 */
2312 copy %r21,%r1
2313 bv %r0(%r25) /* r22 */
2314 copy %r22,%r1
2315 bv %r0(%r25) /* r23 */
2316 copy %r23,%r1
2317 bv %r0(%r25) /* r24 - shadowed */
2318 ldi -1,%r1
2319 bv %r0(%r25) /* r25 - shadowed */
2320 ldi -1,%r1
2321 bv %r0(%r25) /* r26 */
2322 copy %r26,%r1
2323 bv %r0(%r25) /* r27 */
2324 copy %r27,%r1
2325 bv %r0(%r25) /* r28 */
2326 copy %r28,%r1
2327 bv %r0(%r25) /* r29 */
2328 copy %r29,%r1
2329 bv %r0(%r25) /* r30 */
2330 copy %r30,%r1
2331 bv %r0(%r25) /* r31 */
2332 copy %r31,%r1
2333
2334 /*
2335 * set_register is used by the non access tlb miss handlers to
2336 * copy the value of r1 into the general register specified in
2337 * r8.
2338 */
2339
2340set_register:
2341 blr %r8,%r0
2342 nop
2343 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2344 copy %r1,%r0
2345 bv %r0(%r25) /* r1 */
2346 copy %r1,%r1
2347 bv %r0(%r25) /* r2 */
2348 copy %r1,%r2
2349 bv %r0(%r25) /* r3 */
2350 copy %r1,%r3
2351 bv %r0(%r25) /* r4 */
2352 copy %r1,%r4
2353 bv %r0(%r25) /* r5 */
2354 copy %r1,%r5
2355 bv %r0(%r25) /* r6 */
2356 copy %r1,%r6
2357 bv %r0(%r25) /* r7 */
2358 copy %r1,%r7
2359 bv %r0(%r25) /* r8 */
2360 copy %r1,%r8
2361 bv %r0(%r25) /* r9 */
2362 copy %r1,%r9
2363 bv %r0(%r25) /* r10 */
2364 copy %r1,%r10
2365 bv %r0(%r25) /* r11 */
2366 copy %r1,%r11
2367 bv %r0(%r25) /* r12 */
2368 copy %r1,%r12
2369 bv %r0(%r25) /* r13 */
2370 copy %r1,%r13
2371 bv %r0(%r25) /* r14 */
2372 copy %r1,%r14
2373 bv %r0(%r25) /* r15 */
2374 copy %r1,%r15
2375 bv %r0(%r25) /* r16 */
2376 copy %r1,%r16
2377 bv %r0(%r25) /* r17 */
2378 copy %r1,%r17
2379 bv %r0(%r25) /* r18 */
2380 copy %r1,%r18
2381 bv %r0(%r25) /* r19 */
2382 copy %r1,%r19
2383 bv %r0(%r25) /* r20 */
2384 copy %r1,%r20
2385 bv %r0(%r25) /* r21 */
2386 copy %r1,%r21
2387 bv %r0(%r25) /* r22 */
2388 copy %r1,%r22
2389 bv %r0(%r25) /* r23 */
2390 copy %r1,%r23
2391 bv %r0(%r25) /* r24 */
2392 copy %r1,%r24
2393 bv %r0(%r25) /* r25 */
2394 copy %r1,%r25
2395 bv %r0(%r25) /* r26 */
2396 copy %r1,%r26
2397 bv %r0(%r25) /* r27 */
2398 copy %r1,%r27
2399 bv %r0(%r25) /* r28 */
2400 copy %r1,%r28
2401 bv %r0(%r25) /* r29 */
2402 copy %r1,%r29
2403 bv %r0(%r25) /* r30 */
2404 copy %r1,%r30
2405 bv %r0(%r25) /* r31 */
2406 copy %r1,%r31
This page took 0.326965 seconds and 5 git commands to generate.