* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits)
sched/tracing: Add a new tracepoint for sleeptime
sched: Disable scheduler warnings during oopses
sched: Fix cgroup movement of waking process
sched: Fix cgroup movement of newly created process
sched: Fix cgroup movement of forking process
sched: Remove cfs bandwidth period check in tg_set_cfs_period()
sched: Fix load-balance lock-breaking
sched: Replace all_pinned with a generic flags field
sched: Only queue remote wakeups when crossing cache boundaries
sched: Add missing rcu_dereference() around ->real_parent usage
[S390] fix cputime overflow in uptime_proc_show
[S390] cputime: add sparse checking and cleanup
sched: Mark parent and real_parent as __rcu
sched, nohz: Fix missing RCU read lock
sched, nohz: Set the NOHZ_BALANCE_KICK flag for idle load balancer
sched, nohz: Fix the idle cpu check in nohz_idle_balance
sched: Use jump_labels for sched_feat
sched/accounting: Fix parameter passing in task_group_account_field
sched/accounting: Fix user/system tick double accounting
sched/accounting: Re-use scheduler statistics for the root cgroup
...
Fix up conflicts in
- arch/ia64/include/asm/cputime.h, include/asm-generic/cputime.h
usecs_to_cputime64() vs the sparse cleanups
- kernel/sched/fair.c, kernel/time/tick-sched.c
scheduler changes in multiple branches
#include <linux/jiffies.h>
#include <asm/processor.h>
- typedef u64 cputime_t;
- typedef u64 cputime64_t;
+ typedef u64 __nocast cputime_t;
+ typedef u64 __nocast cputime64_t;
- #define cputime_zero ((cputime_t)0)
#define cputime_one_jiffy jiffies_to_cputime(1)
- #define cputime_max ((~((cputime_t)0) >> 1) - 1)
- #define cputime_add(__a, __b) ((__a) + (__b))
- #define cputime_sub(__a, __b) ((__a) - (__b))
- #define cputime_div(__a, __n) ((__a) / (__n))
- #define cputime_halve(__a) ((__a) >> 1)
- #define cputime_eq(__a, __b) ((__a) == (__b))
- #define cputime_gt(__a, __b) ((__a) > (__b))
- #define cputime_ge(__a, __b) ((__a) >= (__b))
- #define cputime_lt(__a, __b) ((__a) < (__b))
- #define cputime_le(__a, __b) ((__a) <= (__b))
-
- #define cputime64_zero ((cputime64_t)0)
- #define cputime64_add(__a, __b) ((__a) + (__b))
- #define cputime64_sub(__a, __b) ((__a) - (__b))
- #define cputime_to_cputime64(__ct) (__ct)
/*
* Convert cputime <-> jiffies (HZ)
*/
- #define cputime_to_jiffies(__ct) ((__ct) / (NSEC_PER_SEC / HZ))
- #define jiffies_to_cputime(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
- #define cputime64_to_jiffies64(__ct) ((__ct) / (NSEC_PER_SEC / HZ))
- #define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ))
+ #define cputime_to_jiffies(__ct) \
+ ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
+ #define jiffies_to_cputime(__jif) \
+ (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
+ #define cputime64_to_jiffies64(__ct) \
+ ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
+ #define jiffies64_to_cputime64(__jif) \
+ (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
/*
* Convert cputime <-> microseconds
*/
- #define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC)
- #define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC)
- #define usecs_to_cputime64(__usecs) usecs_to_cputime(__usecs)
+ #define cputime_to_usecs(__ct) \
+ ((__force u64)(__ct) / NSEC_PER_USEC)
+ #define usecs_to_cputime(__usecs) \
+ (__force cputime_t)((__usecs) * NSEC_PER_USEC)
++#define usecs_to_cputime64(__usecs) \
++ (__force cputime64_t)((__usecs) * NSEC_PER_USEC)
/*
* Convert cputime <-> seconds
*/
- #define cputime_to_secs(__ct) ((__ct) / NSEC_PER_SEC)
- #define secs_to_cputime(__secs) ((__secs) * NSEC_PER_SEC)
+ #define cputime_to_secs(__ct) \
+ ((__force u64)(__ct) / NSEC_PER_SEC)
+ #define secs_to_cputime(__secs) \
+ (__force cputime_t)((__secs) * NSEC_PER_SEC)
/*
* Convert cputime <-> timespec (nsec)
*/
static inline cputime_t timespec_to_cputime(const struct timespec *val)
{
- cputime_t ret = val->tv_sec * NSEC_PER_SEC;
- return (ret + val->tv_nsec);
+ u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+ return (__force cputime_t) ret;
}
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
{
- val->tv_sec = ct / NSEC_PER_SEC;
- val->tv_nsec = ct % NSEC_PER_SEC;
+ val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
+ val->tv_nsec = (__force u64) ct % NSEC_PER_SEC;
}
/*
*/
static inline cputime_t timeval_to_cputime(struct timeval *val)
{
- cputime_t ret = val->tv_sec * NSEC_PER_SEC;
- return (ret + val->tv_usec * NSEC_PER_USEC);
+ u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
+ return (__force cputime_t) ret;
}
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
{
- val->tv_sec = ct / NSEC_PER_SEC;
- val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC;
+ val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
+ val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC;
}
/*
* Convert cputime <-> clock (USER_HZ)
*/
- #define cputime_to_clock_t(__ct) ((__ct) / (NSEC_PER_SEC / USER_HZ))
- #define clock_t_to_cputime(__x) ((__x) * (NSEC_PER_SEC / USER_HZ))
+ #define cputime_to_clock_t(__ct) \
+ ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ))
+ #define clock_t_to_cputime(__x) \
+ (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
/*
* Convert cputime64 to clock.
*/
- #define cputime64_to_clock_t(__ct) cputime_to_clock_t((cputime_t)__ct)
+ #define cputime64_to_clock_t(__ct) \
+ cputime_to_clock_t((__force cputime_t)__ct)
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
#endif /* __IA64_CPUTIME_H */
#include <asm/time.h>
#include <asm/param.h>
- typedef u64 cputime_t;
- typedef u64 cputime64_t;
-
- #define cputime_zero ((cputime_t)0)
- #define cputime_max ((~((cputime_t)0) >> 1) - 1)
- #define cputime_add(__a, __b) ((__a) + (__b))
- #define cputime_sub(__a, __b) ((__a) - (__b))
- #define cputime_div(__a, __n) ((__a) / (__n))
- #define cputime_halve(__a) ((__a) >> 1)
- #define cputime_eq(__a, __b) ((__a) == (__b))
- #define cputime_gt(__a, __b) ((__a) > (__b))
- #define cputime_ge(__a, __b) ((__a) >= (__b))
- #define cputime_lt(__a, __b) ((__a) < (__b))
- #define cputime_le(__a, __b) ((__a) <= (__b))
-
- #define cputime64_zero ((cputime64_t)0)
- #define cputime64_add(__a, __b) ((__a) + (__b))
- #define cputime64_sub(__a, __b) ((__a) - (__b))
- #define cputime_to_cputime64(__ct) (__ct)
+ typedef u64 __nocast cputime_t;
+ typedef u64 __nocast cputime64_t;
#ifdef __KERNEL__
static inline unsigned long cputime_to_jiffies(const cputime_t ct)
{
- return mulhdu(ct, __cputime_jiffies_factor);
+ return mulhdu((__force u64) ct, __cputime_jiffies_factor);
}
/* Estimate the scaled cputime by scaling the real cputime based on
{
if (cpu_has_feature(CPU_FTR_SPURR) &&
__get_cpu_var(cputime_last_delta))
- return ct * __get_cpu_var(cputime_scaled_last_delta) /
- __get_cpu_var(cputime_last_delta);
+ return (__force u64) ct *
+ __get_cpu_var(cputime_scaled_last_delta) /
+ __get_cpu_var(cputime_last_delta);
return ct;
}
static inline cputime_t jiffies_to_cputime(const unsigned long jif)
{
- cputime_t ct;
+ u64 ct;
unsigned long sec;
/* have to be a little careful about overflow */
}
if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec;
- return ct;
+ return (__force cputime_t) ct;
}
static inline void setup_cputime_one_jiffy(void)
static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
{
- cputime_t ct;
+ u64 ct;
u64 sec;
/* have to be a little careful about overflow */
do_div(ct, HZ);
}
if (sec)
- ct += (cputime_t) sec * tb_ticks_per_sec;
- return ct;
+ ct += (u64) sec * tb_ticks_per_sec;
+ return (__force cputime64_t) ct;
}
static inline u64 cputime64_to_jiffies64(const cputime_t ct)
{
- return mulhdu(ct, __cputime_jiffies_factor);
+ return mulhdu((__force u64) ct, __cputime_jiffies_factor);
}
/*
static inline unsigned long cputime_to_usecs(const cputime_t ct)
{
- return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
+ return mulhdu((__force u64) ct, __cputime_msec_factor) * USEC_PER_MSEC;
}
static inline cputime_t usecs_to_cputime(const unsigned long us)
{
- cputime_t ct;
+ u64 ct;
unsigned long sec;
/* have to be a little careful about overflow */
}
if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec;
- return ct;
+ return (__force cputime_t) ct;
}
+#define usecs_to_cputime64(us) usecs_to_cputime(us)
+
/*
* Convert cputime <-> seconds
*/
static inline unsigned long cputime_to_secs(const cputime_t ct)
{
- return mulhdu(ct, __cputime_sec_factor);
+ return mulhdu((__force u64) ct, __cputime_sec_factor);
}
static inline cputime_t secs_to_cputime(const unsigned long sec)
{
- return (cputime_t) sec * tb_ticks_per_sec;
+ return (__force cputime_t)((u64) sec * tb_ticks_per_sec);
}
/*
*/
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
{
- u64 x = ct;
+ u64 x = (__force u64) ct;
unsigned int frac;
frac = do_div(x, tb_ticks_per_sec);
static inline cputime_t timespec_to_cputime(const struct timespec *p)
{
- cputime_t ct;
+ u64 ct;
ct = (u64) p->tv_nsec * tb_ticks_per_sec;
do_div(ct, 1000000000);
- return ct + (u64) p->tv_sec * tb_ticks_per_sec;
+ return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
}
/*
*/
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
{
- u64 x = ct;
+ u64 x = (__force u64) ct;
unsigned int frac;
frac = do_div(x, tb_ticks_per_sec);
static inline cputime_t timeval_to_cputime(const struct timeval *p)
{
- cputime_t ct;
+ u64 ct;
ct = (u64) p->tv_usec * tb_ticks_per_sec;
do_div(ct, 1000000);
- return ct + (u64) p->tv_sec * tb_ticks_per_sec;
+ return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
}
/*
static inline unsigned long cputime_to_clock_t(const cputime_t ct)
{
- return mulhdu(ct, __cputime_clockt_factor);
+ return mulhdu((__force u64) ct, __cputime_clockt_factor);
}
static inline cputime_t clock_t_to_cputime(const unsigned long clk)
{
- cputime_t ct;
+ u64 ct;
unsigned long sec;
/* have to be a little careful about overflow */
do_div(ct, USER_HZ);
}
if (sec)
- ct += (cputime_t) sec * tb_ticks_per_sec;
- return ct;
+ ct += (u64) sec * tb_ticks_per_sec;
+ return (__force cputime_t) ct;
}
#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct))
/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
- typedef unsigned long long cputime_t;
- typedef unsigned long long cputime64_t;
+ typedef unsigned long long __nocast cputime_t;
+ typedef unsigned long long __nocast cputime64_t;
- #ifndef __s390x__
-
- static inline unsigned int
- __div(unsigned long long n, unsigned int base)
+ static inline unsigned long __div(unsigned long long n, unsigned long base)
{
+ #ifndef __s390x__
register_pair rp;
rp.pair = n >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
return rp.subreg.odd;
+ #else /* __s390x__ */
+ return n / base;
+ #endif /* __s390x__ */
}
- #else /* __s390x__ */
+ #define cputime_one_jiffy jiffies_to_cputime(1)
- static inline unsigned int
- __div(unsigned long long n, unsigned int base)
+ /*
+ * Convert cputime to jiffies and back.
+ */
+ static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
{
- return n / base;
+ return __div((__force unsigned long long) cputime, 4096000000ULL / HZ);
}
- #endif /* __s390x__ */
+ static inline cputime_t jiffies_to_cputime(const unsigned int jif)
+ {
+ return (__force cputime_t)(jif * (4096000000ULL / HZ));
+ }
- #define cputime_zero (0ULL)
- #define cputime_one_jiffy jiffies_to_cputime(1)
- #define cputime_max ((~0UL >> 1) - 1)
- #define cputime_add(__a, __b) ((__a) + (__b))
- #define cputime_sub(__a, __b) ((__a) - (__b))
- #define cputime_div(__a, __n) ({ \
- unsigned long long __div = (__a); \
- do_div(__div,__n); \
- __div; \
- })
- #define cputime_halve(__a) ((__a) >> 1)
- #define cputime_eq(__a, __b) ((__a) == (__b))
- #define cputime_gt(__a, __b) ((__a) > (__b))
- #define cputime_ge(__a, __b) ((__a) >= (__b))
- #define cputime_lt(__a, __b) ((__a) < (__b))
- #define cputime_le(__a, __b) ((__a) <= (__b))
- #define cputime_to_jiffies(__ct) (__div((__ct), 4096000000ULL / HZ))
- #define cputime_to_scaled(__ct) (__ct)
- #define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (4096000000ULL / HZ))
-
- #define cputime64_zero (0ULL)
- #define cputime64_add(__a, __b) ((__a) + (__b))
- #define cputime_to_cputime64(__ct) (__ct)
-
- static inline u64
- cputime64_to_jiffies64(cputime64_t cputime)
- {
- do_div(cputime, 4096000000ULL / HZ);
- return cputime;
+ static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
+ {
+ unsigned long long jif = (__force unsigned long long) cputime;
+ do_div(jif, 4096000000ULL / HZ);
+ return jif;
+ }
+
+ static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
+ {
+ return (__force cputime64_t)(jif * (4096000000ULL / HZ));
}
/*
* Convert cputime to microseconds and back.
*/
- static inline unsigned int
- cputime_to_usecs(const cputime_t cputime)
+ static inline unsigned int cputime_to_usecs(const cputime_t cputime)
{
- return cputime_div(cputime, 4096);
+ return (__force unsigned long long) cputime >> 12;
}
- static inline cputime_t
- usecs_to_cputime(const unsigned int m)
+ static inline cputime_t usecs_to_cputime(const unsigned int m)
{
- return (cputime_t) m * 4096;
+ return (__force cputime_t)(m * 4096ULL);
}
+#define usecs_to_cputime64(m) usecs_to_cputime(m)
+
/*
* Convert cputime to milliseconds and back.
*/
- static inline unsigned int
- cputime_to_secs(const cputime_t cputime)
+ static inline unsigned int cputime_to_secs(const cputime_t cputime)
{
- return __div(cputime, 2048000000) >> 1;
+ return __div((__force unsigned long long) cputime, 2048000000) >> 1;
}
- static inline cputime_t
- secs_to_cputime(const unsigned int s)
+ static inline cputime_t secs_to_cputime(const unsigned int s)
{
- return (cputime_t) s * 4096000000ULL;
+ return (__force cputime_t)(s * 4096000000ULL);
}
/*
* Convert cputime to timespec and back.
*/
- static inline cputime_t
- timespec_to_cputime(const struct timespec *value)
+ static inline cputime_t timespec_to_cputime(const struct timespec *value)
{
- return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL;
+ unsigned long long ret = value->tv_sec * 4096000000ULL;
+ return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000);
}
- static inline void
- cputime_to_timespec(const cputime_t cputime, struct timespec *value)
+ static inline void cputime_to_timespec(const cputime_t cputime,
+ struct timespec *value)
{
+ unsigned long long __cputime = (__force unsigned long long) cputime;
#ifndef __s390x__
register_pair rp;
- rp.pair = cputime >> 1;
+ rp.pair = __cputime >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
value->tv_nsec = rp.subreg.even * 1000 / 4096;
value->tv_sec = rp.subreg.odd;
#else
- value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096;
- value->tv_sec = cputime / 4096000000ULL;
+ value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096;
+ value->tv_sec = __cputime / 4096000000ULL;
#endif
}
* Since cputime and timeval have the same resolution (microseconds)
* this is easy.
*/
- static inline cputime_t
- timeval_to_cputime(const struct timeval *value)
+ static inline cputime_t timeval_to_cputime(const struct timeval *value)
{
- return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL;
+ unsigned long long ret = value->tv_sec * 4096000000ULL;
+ return (__force cputime_t)(ret + value->tv_usec * 4096ULL);
}
- static inline void
- cputime_to_timeval(const cputime_t cputime, struct timeval *value)
+ static inline void cputime_to_timeval(const cputime_t cputime,
+ struct timeval *value)
{
+ unsigned long long __cputime = (__force unsigned long long) cputime;
#ifndef __s390x__
register_pair rp;
- rp.pair = cputime >> 1;
+ rp.pair = __cputime >> 1;
asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
value->tv_usec = rp.subreg.even / 4096;
value->tv_sec = rp.subreg.odd;
#else
- value->tv_usec = (cputime % 4096000000ULL) / 4096;
- value->tv_sec = cputime / 4096000000ULL;
+ value->tv_usec = (__cputime % 4096000000ULL) / 4096;
+ value->tv_sec = __cputime / 4096000000ULL;
#endif
}
/*
* Convert cputime to clock and back.
*/
- static inline clock_t
- cputime_to_clock_t(cputime_t cputime)
+ static inline clock_t cputime_to_clock_t(cputime_t cputime)
{
- return cputime_div(cputime, 4096000000ULL / USER_HZ);
+ unsigned long long clock = (__force unsigned long long) cputime;
+ do_div(clock, 4096000000ULL / USER_HZ);
+ return clock;
}
- static inline cputime_t
- clock_t_to_cputime(unsigned long x)
+ static inline cputime_t clock_t_to_cputime(unsigned long x)
{
- return (cputime_t) x * (4096000000ULL / USER_HZ);
+ return (__force cputime_t)(x * (4096000000ULL / USER_HZ));
}
/*
* Convert cputime64 to clock.
*/
- static inline clock_t
- cputime64_to_clock_t(cputime64_t cputime)
+ static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
{
- return cputime_div(cputime, 4096000000ULL / USER_HZ);
+ unsigned long long clock = (__force unsigned long long) cputime;
+ do_div(clock, 4096000000ULL / USER_HZ);
+ return clock;
}
struct s390_idle_data {
#define arch_idle_time(cpu) 0
#endif
- static cputime64_t get_idle_time(int cpu)
+ static u64 get_idle_time(int cpu)
{
- u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
- cputime64_t idle;
+ u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
if (idle_time == -1ULL) {
/* !NO_HZ so we can rely on cpustat.idle */
- idle = kstat_cpu(cpu).cpustat.idle;
- idle = cputime64_add(idle, arch_idle_time(cpu));
+ idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
+ idle += arch_idle_time(cpu);
} else
- idle = nsecs_to_jiffies64(1000 * idle_time);
+ idle = usecs_to_cputime64(idle_time);
return idle;
}
- static cputime64_t get_iowait_time(int cpu)
+ static u64 get_iowait_time(int cpu)
{
- u64 iowait_time = get_cpu_iowait_time_us(cpu, NULL);
- cputime64_t iowait;
+ u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL);
if (iowait_time == -1ULL)
/* !NO_HZ so we can rely on cpustat.iowait */
- iowait = kstat_cpu(cpu).cpustat.iowait;
+ iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
else
- iowait = nsecs_to_jiffies64(1000 * iowait_time);
+ iowait = usecs_to_cputime64(iowait_time);
return iowait;
}
{
int i, j;
unsigned long jif;
- cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
- cputime64_t guest, guest_nice;
+ u64 user, nice, system, idle, iowait, irq, softirq, steal;
+ u64 guest, guest_nice;
u64 sum = 0;
u64 sum_softirq = 0;
unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
struct timespec boottime;
user = nice = system = idle = iowait =
- irq = softirq = steal = cputime64_zero;
- guest = guest_nice = cputime64_zero;
+ irq = softirq = steal = 0;
+ guest = guest_nice = 0;
getboottime(&boottime);
jif = boottime.tv_sec;
for_each_possible_cpu(i) {
- user = cputime64_add(user, kstat_cpu(i).cpustat.user);
- nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
- system = cputime64_add(system, kstat_cpu(i).cpustat.system);
- idle = cputime64_add(idle, get_idle_time(i));
- iowait = cputime64_add(iowait, get_iowait_time(i));
- irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
- softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
- steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
- guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
- guest_nice = cputime64_add(guest_nice,
- kstat_cpu(i).cpustat.guest_nice);
- sum += kstat_cpu_irqs_sum(i);
- sum += arch_irq_stat_cpu(i);
+ user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
+ nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
+ system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
+ idle += get_idle_time(i);
+ iowait += get_iowait_time(i);
+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
for (j = 0; j < NR_SOFTIRQS; j++) {
unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
(unsigned long long)cputime64_to_clock_t(guest_nice));
for_each_online_cpu(i) {
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
- user = kstat_cpu(i).cpustat.user;
- nice = kstat_cpu(i).cpustat.nice;
- system = kstat_cpu(i).cpustat.system;
+ user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
+ nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
+ system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
idle = get_idle_time(i);
iowait = get_iowait_time(i);
- irq = kstat_cpu(i).cpustat.irq;
- softirq = kstat_cpu(i).cpustat.softirq;
- steal = kstat_cpu(i).cpustat.steal;
- guest = kstat_cpu(i).cpustat.guest;
- guest_nice = kstat_cpu(i).cpustat.guest_nice;
+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
seq_printf(p,
"cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu "
"%llu\n",
#include <linux/time.h>
#include <linux/jiffies.h>
- typedef unsigned long cputime_t;
+ typedef unsigned long __nocast cputime_t;
- #define cputime_zero (0UL)
#define cputime_one_jiffy jiffies_to_cputime(1)
- #define cputime_max ((~0UL >> 1) - 1)
- #define cputime_add(__a, __b) ((__a) + (__b))
- #define cputime_sub(__a, __b) ((__a) - (__b))
- #define cputime_div(__a, __n) ((__a) / (__n))
- #define cputime_halve(__a) ((__a) >> 1)
- #define cputime_eq(__a, __b) ((__a) == (__b))
- #define cputime_gt(__a, __b) ((__a) > (__b))
- #define cputime_ge(__a, __b) ((__a) >= (__b))
- #define cputime_lt(__a, __b) ((__a) < (__b))
- #define cputime_le(__a, __b) ((__a) <= (__b))
- #define cputime_to_jiffies(__ct) (__ct)
+ #define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
#define cputime_to_scaled(__ct) (__ct)
- #define jiffies_to_cputime(__hz) (__hz)
+ #define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
- typedef u64 cputime64_t;
+ typedef u64 __nocast cputime64_t;
- #define cputime64_zero (0ULL)
- #define cputime64_add(__a, __b) ((__a) + (__b))
- #define cputime64_sub(__a, __b) ((__a) - (__b))
- #define cputime64_to_jiffies64(__ct) (__ct)
- #define jiffies64_to_cputime64(__jif) (__jif)
- #define cputime_to_cputime64(__ct) ((u64) __ct)
- #define cputime64_gt(__a, __b) ((__a) > (__b))
+ #define cputime64_to_jiffies64(__ct) (__force u64)(__ct)
+ #define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif)
- #define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct)
+ #define nsecs_to_cputime64(__ct) \
+ jiffies64_to_cputime64(nsecs_to_jiffies64(__ct))
/*
* Convert cputime to microseconds and back.
*/
- #define cputime_to_usecs(__ct) jiffies_to_usecs(__ct)
- #define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs)
- #define usecs_to_cputime64(__msecs) nsecs_to_jiffies64((__msecs) * 1000)
+ #define cputime_to_usecs(__ct) \
- jiffies_to_usecs(cputime_to_jiffies(__ct));
-#define usecs_to_cputime(__msecs) \
- jiffies_to_cputime(usecs_to_jiffies(__msecs));
++ jiffies_to_usecs(cputime_to_jiffies(__ct))
++#define usecs_to_cputime(__usec) \
++ jiffies_to_cputime(usecs_to_jiffies(__usec))
++#define usecs_to_cputime64(__usec) \
++ jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
/*
* Convert cputime to seconds and back.
*/
- #define cputime_to_secs(jif) ((jif) / HZ)
- #define secs_to_cputime(sec) ((sec) * HZ)
+ #define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ)
+ #define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ)
/*
* Convert cputime to timespec and back.
*/
- #define timespec_to_cputime(__val) timespec_to_jiffies(__val)
- #define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val)
+ #define timespec_to_cputime(__val) \
+ jiffies_to_cputime(timespec_to_jiffies(__val))
+ #define cputime_to_timespec(__ct,__val) \
+ jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
/*
* Convert cputime to timeval and back.
*/
- #define timeval_to_cputime(__val) timeval_to_jiffies(__val)
- #define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val)
+ #define timeval_to_cputime(__val) \
+ jiffies_to_cputime(timeval_to_jiffies(__val))
+ #define cputime_to_timeval(__ct,__val) \
+ jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
/*
* Convert cputime to clock and back.
*/
- #define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct)
- #define clock_t_to_cputime(__x) clock_t_to_jiffies(__x)
+ #define cputime_to_clock_t(__ct) \
+ jiffies_to_clock_t(cputime_to_jiffies(__ct))
+ #define clock_t_to_cputime(__x) \
+ jiffies_to_cputime(clock_t_to_jiffies(__x))
/*
* Convert cputime64 to clock.
*/
- #define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct)
+ #define cputime64_to_clock_t(__ct) \
+ jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern void select_nohz_load_balancer(int stop_tick);
+ extern void set_cpu_sd_state_idle(void);
extern int get_nohz_timer_target(void);
#else
static inline void select_nohz_load_balancer(int stop_tick) { }
+ static inline void set_cpu_sd_state_idle(void) { }
#endif
/*
#define INIT_CPUTIME \
(struct task_cputime) { \
- .utime = cputime_zero, \
- .stime = cputime_zero, \
+ .utime = 0, \
+ .stime = 0, \
.sum_exec_runtime = 0, \
}
* single CPU.
*/
unsigned int power, power_orig;
+ /*
+ * Number of busy cpus in this group.
+ */
+ atomic_t nr_busy_cpus;
};
struct sched_group {
return to_cpumask(sg->cpumask);
}
+ /**
+ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
+ * @group: The group whose first cpu is to be returned.
+ */
+ static inline unsigned int group_first_cpu(struct sched_group *group)
+ {
+ return cpumask_first(sched_group_cpus(group));
+ }
+
struct sched_domain_attr {
int relax_domain_level;
};
* older sibling, respectively. (p->father can be replaced with
* p->real_parent->pid)
*/
- struct task_struct *real_parent; /* real parent process */
- struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
+ struct task_struct __rcu *real_parent; /* real parent process */
+ struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
/*
* children/sibling forms the list of my natural children
*/
extern int sched_setscheduler_nocheck(struct task_struct *, int,
const struct sched_param *);
extern struct task_struct *idle_task(int cpu);
+/**
+ * is_idle_task - is the specified task an idle task?
+ * @tsk: the task in question.
+ */
+static inline bool is_idle_task(struct task_struct *p)
+{
+ return p->pid == 0;
+}
extern struct task_struct *curr_task(int cpu);
extern void set_curr_task(int cpu, struct task_struct *p);
write_lock_irq(&tasklist_lock);
for_each_process(p) {
if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
- (!cputime_eq(p->utime, cputime_zero) ||
- !cputime_eq(p->stime, cputime_zero)))
+ (p->utime || p->stime))
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
"(state = %ld, flags = %x)\n",
p->comm, task_pid_nr(p), cpu,
cpu_maps_update_done();
return err;
}
+EXPORT_SYMBOL_GPL(cpu_up);
#ifdef CONFIG_PM_SLEEP_SMP
static cpumask_var_t frozen_cpus;
* We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct.
*/
- sig->utime = cputime_add(sig->utime, tsk->utime);
- sig->stime = cputime_add(sig->stime, tsk->stime);
- sig->gtime = cputime_add(sig->gtime, tsk->gtime);
+ sig->utime += tsk->utime;
+ sig->stime += tsk->stime;
+ sig->gtime += tsk->gtime;
sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt;
sig->nvcsw += tsk->nvcsw;
spin_lock_irq(&p->real_parent->sighand->siglock);
psig = p->real_parent->signal;
sig = p->signal;
- psig->cutime =
- cputime_add(psig->cutime,
- cputime_add(tgutime,
- sig->cutime));
- psig->cstime =
- cputime_add(psig->cstime,
- cputime_add(tgstime,
- sig->cstime));
- psig->cgtime =
- cputime_add(psig->cgtime,
- cputime_add(p->gtime,
- cputime_add(sig->gtime,
- sig->cgtime)));
+ psig->cutime += tgutime + sig->cutime;
+ psig->cstime += tgstime + sig->cstime;
+ psig->cgtime += p->gtime + sig->gtime + sig->cgtime;
psig->cmin_flt +=
p->min_flt + sig->min_flt + sig->cmin_flt;
psig->cmaj_flt +=
}
/* dead body doesn't have much to contribute */
- if (p->exit_state == EXIT_DEAD)
+ if (unlikely(p->exit_state == EXIT_DEAD)) {
+ /*
+ * But do not ignore this task until the tracer does
+ * wait_task_zombie()->do_notify_parent().
+ */
+ if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
+ wo->notask_error = 0;
return 0;
+ }
/* slay zombie? */
if (p->exit_state == EXIT_ZOMBIE) {
info.si_uid = __task_cred(tsk)->uid;
rcu_read_unlock();
- info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
- tsk->signal->utime));
- info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
- tsk->signal->stime));
+ info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
+ info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime);
info.si_status = tsk->exit_code & 0x7f;
if (tsk->exit_code & 0x80)
*/
if (!(sig->flags & SIGNAL_STOP_STOPPED))
sig->group_exit_code = signr;
- else
- WARN_ON_ONCE(!current->ptrace);
sig->group_stop_count = 0;
}
EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
-/**
- * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
- *
- * When the next event is more than a tick into the future, stop the idle tick
- * Called either from the idle loop or from irq_exit() when an idle period was
- * just interrupted by an interrupt which did not cause a reschedule.
- */
-void tick_nohz_stop_sched_tick(int inidle)
+static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
{
- unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
- struct tick_sched *ts;
+ unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
ktime_t last_update, expires, now;
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
u64 time_delta;
int cpu;
- local_irq_save(flags);
-
cpu = smp_processor_id();
ts = &per_cpu(tick_cpu_sched, cpu);
- /*
- * Update the idle state in the scheduler domain hierarchy
- * when tick_nohz_stop_sched_tick() is called from the idle loop.
- * State will be updated to busy during the first busy tick after
- * exiting idle.
- */
- if (inidle)
- set_cpu_sd_state_idle();
-
- /*
- * Call to tick_nohz_start_idle stops the last_update_time from being
- * updated. Thus, it must not be called in the event we are called from
- * irq_exit() with the prior state different than idle.
- */
- if (!inidle && !ts->inidle)
- goto end;
-
- /*
- * Set ts->inidle unconditionally. Even if the system did not
- * switch to NOHZ mode the cpu frequency governers rely on the
- * update of the idle time accounting in tick_nohz_start_idle().
- */
- ts->inidle = 1;
-
now = tick_nohz_start_idle(cpu, ts);
/*
}
if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
- goto end;
+ return;
if (need_resched())
- goto end;
+ return;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
static int ratelimit;
(unsigned int) local_softirq_pending());
ratelimit++;
}
- goto end;
+ return;
}
ts->idle_calls++;
ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
ts->tick_stopped = 1;
ts->idle_jiffies = last_jiffies;
- rcu_enter_nohz();
}
ts->idle_sleeps++;
ts->next_jiffies = next_jiffies;
ts->last_jiffies = last_jiffies;
ts->sleep_length = ktime_sub(dev->next_event, now);
-end:
- local_irq_restore(flags);
+}
+
+/**
+ * tick_nohz_idle_enter - stop the idle tick from the idle task
+ *
+ * When the next event is more than a tick into the future, stop the idle tick
+ * Called when we start the idle loop.
+ *
+ * The arch is responsible of calling:
+ *
+ * - rcu_idle_enter() after its last use of RCU before the CPU is put
+ * to sleep.
+ * - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
+ */
+void tick_nohz_idle_enter(void)
+{
+ struct tick_sched *ts;
+
+ WARN_ON_ONCE(irqs_disabled());
+
++ /*
++ * Update the idle state in the scheduler domain hierarchy
++ * when tick_nohz_stop_sched_tick() is called from the idle loop.
++ * State will be updated to busy during the first busy tick after
++ * exiting idle.
++ */
++ set_cpu_sd_state_idle();
++
+ local_irq_disable();
+
+ ts = &__get_cpu_var(tick_cpu_sched);
+ /*
+ * set ts->inidle unconditionally. even if the system did not
+ * switch to nohz mode the cpu frequency governers rely on the
+ * update of the idle time accounting in tick_nohz_start_idle().
+ */
+ ts->inidle = 1;
+ tick_nohz_stop_sched_tick(ts);
+
+ local_irq_enable();
+}
+
+/**
+ * tick_nohz_irq_exit - update next tick event from interrupt exit
+ *
+ * When an interrupt fires while we are idle and it doesn't cause
+ * a reschedule, it may still add, modify or delete a timer, enqueue
+ * an RCU callback, etc...
+ * So we need to re-calculate and reprogram the next tick event.
+ */
+void tick_nohz_irq_exit(void)
+{
+ struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+
+ if (!ts->inidle)
+ return;
+
+ tick_nohz_stop_sched_tick(ts);
}
/**
}
/**
- * tick_nohz_restart_sched_tick - restart the idle tick from the idle task
+ * tick_nohz_idle_exit - restart the idle tick from the idle task
*
* Restart the idle tick when the CPU is woken up from idle
+ * This also exit the RCU extended quiescent state. The CPU
+ * can use RCU again after this function is called.
*/
-void tick_nohz_restart_sched_tick(void)
+void tick_nohz_idle_exit(void)
{
int cpu = smp_processor_id();
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
ktime_t now;
local_irq_disable();
+
if (ts->idle_active || (ts->inidle && ts->tick_stopped))
now = ktime_get();
ts->inidle = 0;
- rcu_exit_nohz();
-
/* Update jiffies first */
select_nohz_load_balancer(0);
tick_do_update_jiffies64(now);