diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-12-15 14:56:09 +0100 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2011-12-15 14:56:19 +0100 |
commit | 648616343cdbe904c585a6c12e323d3b3c72e46f (patch) | |
tree | 514bce1b52663db4ab5662b637c764cf3c2ed1eb /drivers/cpufreq | |
parent | 55b02d2f4445ad625213817a1736bf2884d32547 (diff) |
[S390] cputime: add sparse checking and cleanup
Make cputime_t and cputime64_t nocast to enable sparse checking to
detect incorrect use of cputime. Drop the cputime macros for simple
scalar operations. The conversion macros are still needed.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 29 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 33 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_stats.c | 5 |
3 files changed, 32 insertions, 35 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index c97b468ee9f7..7f31a031c0b5 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -103,15 +103,14 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, cputime64_t busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, - kstat_cpu(cpu).cpustat.system); - - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); - - idle_time = cputime64_sub(cur_wall_time, busy_time); + busy_time = kstat_cpu(cpu).cpustat.user; + busy_time += kstat_cpu(cpu).cpustat.system; + busy_time += kstat_cpu(cpu).cpustat.irq; + busy_time += kstat_cpu(cpu).cpustat.softirq; + busy_time += kstat_cpu(cpu).cpustat.steal; + busy_time += kstat_cpu(cpu).cpustat.nice; + + idle_time = cur_wall_time - busy_time; if (wall) *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); @@ -353,20 +352,20 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - j_dbs_info->prev_cpu_wall); + wall_time = (unsigned int) + (cur_wall_time - j_dbs_info->prev_cpu_wall); j_dbs_info->prev_cpu_wall = cur_wall_time; - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - j_dbs_info->prev_cpu_idle); + idle_time = (unsigned int) + (cur_idle_time - j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle = cur_idle_time; if (dbs_tuners_ins.ignore_nice) { cputime64_t cur_nice; unsigned long cur_nice_jiffies; - cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, - j_dbs_info->prev_cpu_nice); + cur_nice = kstat_cpu(j).cpustat.nice - + j_dbs_info->prev_cpu_nice; /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index fa8af4ebb1d6..07cffe2f6cff 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -127,15 +127,14 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, cputime64_t busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, - kstat_cpu(cpu).cpustat.system); - - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); - - idle_time = cputime64_sub(cur_wall_time, busy_time); + busy_time = kstat_cpu(cpu).cpustat.user; + busy_time += kstat_cpu(cpu).cpustat.system; + busy_time += kstat_cpu(cpu).cpustat.irq; + busy_time += kstat_cpu(cpu).cpustat.softirq; + busy_time += kstat_cpu(cpu).cpustat.steal; + busy_time += kstat_cpu(cpu).cpustat.nice; + + idle_time = cur_wall_time - busy_time; if (wall) *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); @@ -442,24 +441,24 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - j_dbs_info->prev_cpu_wall); + wall_time = (unsigned int) + (cur_wall_time - j_dbs_info->prev_cpu_wall); j_dbs_info->prev_cpu_wall = cur_wall_time; - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - j_dbs_info->prev_cpu_idle); + idle_time = (unsigned int) + (cur_idle_time - j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle = cur_idle_time; - iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, - j_dbs_info->prev_cpu_iowait); + iowait_time = (unsigned int) + (cur_iowait_time - j_dbs_info->prev_cpu_iowait); j_dbs_info->prev_cpu_iowait = cur_iowait_time; if (dbs_tuners_ins.ignore_nice) { cputime64_t cur_nice; unsigned long cur_nice_jiffies; - cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, - j_dbs_info->prev_cpu_nice); + cur_nice = kstat_cpu(j).cpustat.nice - + j_dbs_info->prev_cpu_nice; /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index c5072a91e848..2a508edd768b 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -61,9 +61,8 @@ static int cpufreq_stats_update(unsigned int cpu) spin_lock(&cpufreq_stats_lock); stat = per_cpu(cpufreq_stats_table, cpu); if (stat->time_in_state) - stat->time_in_state[stat->last_index] = - cputime64_add(stat->time_in_state[stat->last_index], - cputime_sub(cur_time, stat->last_time)); + stat->time_in_state[stat->last_index] += + cur_time - stat->last_time; stat->last_time = cur_time; spin_unlock(&cpufreq_stats_lock); return 0; |