Merge branch 'sched/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip...
[deliverable/linux.git] / kernel / sched / core.c
index c7ea688faff4dd0000e0b9ad11e6c9110b1a98e6..cdf51a2adc2617449590c504745a5d4ebcd3de21 100644 (file)
@@ -935,7 +935,7 @@ static int irqtime_account_hi_update(void)
 
        local_irq_save(flags);
        latest_ns = this_cpu_read(cpu_hardirq_time);
-       if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat[CPUTIME_IRQ]))
+       if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
                ret = 1;
        local_irq_restore(flags);
        return ret;
@@ -950,7 +950,7 @@ static int irqtime_account_si_update(void)
 
        local_irq_save(flags);
        latest_ns = this_cpu_read(cpu_softirq_time);
-       if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat[CPUTIME_SOFTIRQ]))
+       if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
                ret = 1;
        local_irq_restore(flags);
        return ret;
@@ -2636,14 +2636,14 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
        int index;
 
        /* Add user time to process. */
-       p->utime = cputime_add(p->utime, cputime);
-       p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
+       p->utime += cputime;
+       p->utimescaled += cputime_scaled;
        account_group_user_time(p, cputime);
 
        index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
 
        /* Add user time to cpustat. */
-       task_group_account_field(p, index, cputime);
+       task_group_account_field(p, index, (__force u64) cputime);
 
        /* Account for user time used */
        acct_update_integrals(p);
@@ -2658,24 +2658,21 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
 static void account_guest_time(struct task_struct *p, cputime_t cputime,
                               cputime_t cputime_scaled)
 {
-       u64 tmp;
        u64 *cpustat = kcpustat_this_cpu->cpustat;
 
-       tmp = cputime_to_cputime64(cputime);
-
        /* Add guest time to process. */
-       p->utime = cputime_add(p->utime, cputime);
-       p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
+       p->utime += cputime;
+       p->utimescaled += cputime_scaled;
        account_group_user_time(p, cputime);
-       p->gtime = cputime_add(p->gtime, cputime);
+       p->gtime += cputime;
 
        /* Add guest time to cpustat. */
        if (TASK_NICE(p) > 0) {
-               cpustat[CPUTIME_NICE] += tmp;
-               cpustat[CPUTIME_GUEST_NICE] += tmp;
+               cpustat[CPUTIME_NICE] += (__force u64) cputime;
+               cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
        } else {
-               cpustat[CPUTIME_USER] += tmp;
-               cpustat[CPUTIME_GUEST] += tmp;
+               cpustat[CPUTIME_USER] += (__force u64) cputime;
+               cpustat[CPUTIME_GUEST] += (__force u64) cputime;
        }
 }
 
@@ -2691,12 +2688,12 @@ void __account_system_time(struct task_struct *p, cputime_t cputime,
                        cputime_t cputime_scaled, int index)
 {
        /* Add system time to process. */
-       p->stime = cputime_add(p->stime, cputime);
-       p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
+       p->stime += cputime;
+       p->stimescaled += cputime_scaled;
        account_group_system_time(p, cputime);
 
        /* Add system time to cpustat. */
-       task_group_account_field(p, index, cputime);
+       task_group_account_field(p, index, (__force u64) cputime);
 
        /* Account for system time used */
        acct_update_integrals(p);
@@ -2736,9 +2733,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
 void account_steal_time(cputime_t cputime)
 {
        u64 *cpustat = kcpustat_this_cpu->cpustat;
-       u64 cputime64 = cputime_to_cputime64(cputime);
 
-       cpustat[CPUTIME_STEAL] += cputime64;
+       cpustat[CPUTIME_STEAL] += (__force u64) cputime;
 }
 
 /*
@@ -2748,13 +2744,12 @@ void account_steal_time(cputime_t cputime)
 void account_idle_time(cputime_t cputime)
 {
        u64 *cpustat = kcpustat_this_cpu->cpustat;
-       u64 cputime64 = cputime_to_cputime64(cputime);
        struct rq *rq = this_rq();
 
        if (atomic_read(&rq->nr_iowait) > 0)
-               cpustat[CPUTIME_IOWAIT] += cputime64;
+               cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
        else
-               cpustat[CPUTIME_IDLE] += cputime64;
+               cpustat[CPUTIME_IDLE] += (__force u64) cputime;
 }
 
 static __always_inline bool steal_account_process_tick(void)
@@ -2804,16 +2799,15 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
                                                struct rq *rq)
 {
        cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
-       u64 tmp = cputime_to_cputime64(cputime_one_jiffy);
        u64 *cpustat = kcpustat_this_cpu->cpustat;
 
        if (steal_account_process_tick())
                return;
 
        if (irqtime_account_hi_update()) {
-               cpustat[CPUTIME_IRQ] += tmp;
+               cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
        } else if (irqtime_account_si_update()) {
-               cpustat[CPUTIME_SOFTIRQ] += tmp;
+               cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
        } else if (this_cpu_ksoftirqd() == p) {
                /*
                 * ksoftirqd time do not get accounted in cpu_softirq_time.
@@ -2929,7 +2923,7 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 
 void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
-       cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
+       cputime_t rtime, utime = p->utime, total = utime + p->stime;
 
        /*
         * Use CFS's precise accounting:
@@ -2937,11 +2931,11 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
        rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
 
        if (total) {
-               u64 temp = rtime;
+               u64 temp = (__force u64) rtime;
 
-               temp *= utime;
-               do_div(temp, total);
-               utime = (cputime_t)temp;
+               temp *= (__force u64) utime;
+               do_div(temp, (__force u32) total);
+               utime = (__force cputime_t) temp;
        } else
                utime = rtime;
 
@@ -2949,7 +2943,7 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
         * Compare with previous values, to keep monotonicity:
         */
        p->prev_utime = max(p->prev_utime, utime);
-       p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
+       p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
 
        *ut = p->prev_utime;
        *st = p->prev_stime;
@@ -2966,21 +2960,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
 
        thread_group_cputime(p, &cputime);
 
-       total = cputime_add(cputime.utime, cputime.stime);
+       total = cputime.utime + cputime.stime;
        rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
 
        if (total) {
-               u64 temp = rtime;
+               u64 temp = (__force u64) rtime;
 
-               temp *= cputime.utime;
-               do_div(temp, total);
-               utime = (cputime_t)temp;
+               temp *= (__force u64) cputime.utime;
+               do_div(temp, (__force u32) total);
+               utime = (__force cputime_t) temp;
        } else
                utime = rtime;
 
        sig->prev_utime = max(sig->prev_utime, utime);
-       sig->prev_stime = max(sig->prev_stime,
-                             cputime_sub(rtime, sig->prev_utime));
+       sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime);
 
        *ut = sig->prev_utime;
        *st = sig->prev_stime;
This page took 0.0321630000000001 seconds and 5 git commands to generate.