sh: comment tidying for sh64->sh migration.
[deliverable/linux.git] / arch / sh / lib64 / udelay.c
1 /*
2 * arch/sh/lib64/udelay.c
3 *
4 * Delay routines, using a pre-computed "loops_per_jiffy" value.
5 *
6 * Copyright (C) 2000, 2001 Paolo Alberelli
7 * Copyright (C) 2003, 2004 Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13 #include <linux/sched.h>
14 #include <asm/param.h>
15
16 /*
17 * Use only for very small delays (< 1 msec).
18 *
19 * The active part of our cycle counter is only 32-bits wide, and
20 * we're treating the difference between two marks as signed. On
21 * a 1GHz box, that's about 2 seconds.
22 */
23
24 void __delay(int loops)
25 {
26 long long dummy;
27 __asm__ __volatile__("gettr tr0, %1\n\t"
28 "pta $+4, tr0\n\t"
29 "addi %0, -1, %0\n\t"
30 "bne %0, r63, tr0\n\t"
31 "ptabs %1, tr0\n\t":"=r"(loops),
32 "=r"(dummy)
33 :"0"(loops));
34 }
35
36 void __udelay(unsigned long long usecs, unsigned long lpj)
37 {
38 usecs *= (((unsigned long long) HZ << 32) / 1000000) * lpj;
39 __delay((long long) usecs >> 32);
40 }
41
42 void __ndelay(unsigned long long nsecs, unsigned long lpj)
43 {
44 nsecs *= (((unsigned long long) HZ << 32) / 1000000000) * lpj;
45 __delay((long long) nsecs >> 32);
46 }
47
48 void udelay(unsigned long usecs)
49 {
50 __udelay(usecs, cpu_data[raw_smp_processor_id()].loops_per_jiffy);
51 }
52
53 void ndelay(unsigned long nsecs)
54 {
55 __ndelay(nsecs, cpu_data[raw_smp_processor_id()].loops_per_jiffy);
56 }
This page took 0.032384 seconds and 5 git commands to generate.