diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/base/arch_topology.c | 52 | ||||
-rw-r--r-- | drivers/base/power/main.c | 216 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 2 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 15 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 2 | ||||
-rw-r--r-- | drivers/base/power/wakeup_stats.c | 2 | ||||
-rw-r--r-- | drivers/base/topology.c | 52 | ||||
-rw-r--r-- | drivers/cpufreq/amd-pstate-ut.c | 21 | ||||
-rw-r--r-- | drivers/cpufreq/amd-pstate.c | 120 | ||||
-rw-r--r-- | drivers/cpufreq/amd-pstate.h | 3 | ||||
-rw-r--r-- | drivers/cpufreq/cppc_cpufreq.c | 109 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 463 | ||||
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 181 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-psci.c | 43 | ||||
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 2 | ||||
-rw-r--r-- | drivers/cpuidle/governors/teo.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pm.c | 2 | ||||
-rw-r--r-- | drivers/idle/intel_idle.c | 102 | ||||
-rw-r--r-- | drivers/opp/core.c | 428 | ||||
-rw-r--r-- | drivers/opp/cpu.c | 30 | ||||
-rw-r--r-- | drivers/opp/of.c | 205 | ||||
-rw-r--r-- | drivers/opp/opp.h | 1 | ||||
-rw-r--r-- | drivers/usb/typec/ucsi/ucsi_ccg.c | 2 |
23 files changed, 1195 insertions, 862 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index af0029d30dbe..1037169abb45 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -154,14 +154,6 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, per_cpu(arch_freq_scale, i) = scale; } -DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; -EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale); - -void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) -{ - per_cpu(cpu_scale, cpu) = capacity; -} - DEFINE_PER_CPU(unsigned long, hw_pressure); /** @@ -207,53 +199,9 @@ void topology_update_hw_pressure(const struct cpumask *cpus, } EXPORT_SYMBOL_GPL(topology_update_hw_pressure); -static ssize_t cpu_capacity_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct cpu *cpu = container_of(dev, struct cpu, dev); - - return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); -} - static void update_topology_flags_workfn(struct work_struct *work); static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn); -static DEVICE_ATTR_RO(cpu_capacity); - -static int cpu_capacity_sysctl_add(unsigned int cpu) -{ - struct device *cpu_dev = get_cpu_device(cpu); - - if (!cpu_dev) - return -ENOENT; - - device_create_file(cpu_dev, &dev_attr_cpu_capacity); - - return 0; -} - -static int cpu_capacity_sysctl_remove(unsigned int cpu) -{ - struct device *cpu_dev = get_cpu_device(cpu); - - if (!cpu_dev) - return -ENOENT; - - device_remove_file(cpu_dev, &dev_attr_cpu_capacity); - - return 0; -} - -static int register_cpu_capacity_sysctl(void) -{ - cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity", - cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove); - - return 0; -} -subsys_initcall(register_cpu_capacity_sysctl); - static int update_topology; int topology_update_cpu_topology(void) diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 95f51a7174c7..19fd55b8ac77 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -63,6 +63,7 @@ static LIST_HEAD(dpm_noirq_list); static DEFINE_MUTEX(dpm_list_mtx); static pm_message_t pm_transition; +static DEFINE_MUTEX(async_wip_mtx); static int async_error; static const char *pm_verb(int event) @@ -597,8 +598,11 @@ static bool is_async(struct device *dev) && !pm_trace_is_enabled(); } -static bool dpm_async_fn(struct device *dev, async_func_t func) +static bool __dpm_async(struct device *dev, async_func_t func) { + if (dev->power.work_in_progress) + return true; + if (!is_async(dev)) return false; @@ -611,14 +615,37 @@ static bool dpm_async_fn(struct device *dev, async_func_t func) put_device(dev); + return false; +} + +static bool dpm_async_fn(struct device *dev, async_func_t func) +{ + guard(mutex)(&async_wip_mtx); + + return __dpm_async(dev, func); +} + +static int dpm_async_with_cleanup(struct device *dev, void *fn) +{ + guard(mutex)(&async_wip_mtx); + + if (!__dpm_async(dev, fn)) + dev->power.work_in_progress = false; + + return 0; +} + +static void dpm_async_resume_children(struct device *dev, async_func_t func) +{ /* - * async_schedule_dev_nocall() above has returned false, so func() is - * not running and it is safe to update power.work_in_progress without - * extra synchronization. + * Start processing "async" children of the device unless it's been + * started already for them. + * + * This could have been done for the device's "async" consumers too, but + * they either need to wait for their parents or the processing has + * already started for them after their parents were processed. */ - dev->power.work_in_progress = false; - - return false; + device_for_each_child(dev, func, dpm_async_with_cleanup); } static void dpm_clear_async_state(struct device *dev) @@ -627,6 +654,13 @@ static void dpm_clear_async_state(struct device *dev) dev->power.work_in_progress = false; } +static bool dpm_root_device(struct device *dev) +{ + return !dev->parent; +} + +static void async_resume_noirq(void *data, async_cookie_t cookie); + /** * device_resume_noirq - Execute a "noirq resume" callback for given device. * @dev: Device to handle. @@ -710,6 +744,8 @@ Out: dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async noirq" : " noirq", error); } + + dpm_async_resume_children(dev, async_resume_noirq); } static void async_resume_noirq(void *data, async_cookie_t cookie) @@ -733,19 +769,20 @@ static void dpm_noirq_resume_devices(pm_message_t state) mutex_lock(&dpm_list_mtx); /* - * Trigger the resume of "async" devices upfront so they don't have to - * wait for the "non-async" ones they don't depend on. + * Start processing "async" root devices upfront so they don't wait for + * the "sync" devices they don't depend on. */ list_for_each_entry(dev, &dpm_noirq_list, power.entry) { dpm_clear_async_state(dev); - dpm_async_fn(dev, async_resume_noirq); + if (dpm_root_device(dev)) + dpm_async_with_cleanup(dev, async_resume_noirq); } while (!list_empty(&dpm_noirq_list)) { dev = to_device(dpm_noirq_list.next); list_move_tail(&dev->power.entry, &dpm_late_early_list); - if (!dev->power.work_in_progress) { + if (!dpm_async_fn(dev, async_resume_noirq)) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -781,6 +818,8 @@ void dpm_resume_noirq(pm_message_t state) device_wakeup_disarm_wake_irqs(); } +static void async_resume_early(void *data, async_cookie_t cookie); + /** * device_resume_early - Execute an "early resume" callback for given device. * @dev: Device to handle. @@ -848,6 +887,8 @@ Out: dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async early" : " early", error); } + + dpm_async_resume_children(dev, async_resume_early); } static void async_resume_early(void *data, async_cookie_t cookie) @@ -875,19 +916,20 @@ void dpm_resume_early(pm_message_t state) mutex_lock(&dpm_list_mtx); /* - * Trigger the resume of "async" devices upfront so they don't have to - * wait for the "non-async" ones they don't depend on. + * Start processing "async" root devices upfront so they don't wait for + * the "sync" devices they don't depend on. */ list_for_each_entry(dev, &dpm_late_early_list, power.entry) { dpm_clear_async_state(dev); - dpm_async_fn(dev, async_resume_early); + if (dpm_root_device(dev)) + dpm_async_with_cleanup(dev, async_resume_early); } while (!list_empty(&dpm_late_early_list)) { dev = to_device(dpm_late_early_list.next); list_move_tail(&dev->power.entry, &dpm_suspended_list); - if (!dev->power.work_in_progress) { + if (!dpm_async_fn(dev, async_resume_early)) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -919,6 +961,8 @@ void dpm_resume_start(pm_message_t state) } EXPORT_SYMBOL_GPL(dpm_resume_start); +static void async_resume(void *data, async_cookie_t cookie); + /** * device_resume - Execute "resume" callbacks for given device. * @dev: Device to handle. @@ -1018,6 +1062,8 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) dpm_save_failed_dev(dev_name(dev)); pm_dev_err(dev, state, async ? " async" : "", error); } + + dpm_async_resume_children(dev, async_resume); } static void async_resume(void *data, async_cookie_t cookie) @@ -1049,19 +1095,20 @@ void dpm_resume(pm_message_t state) mutex_lock(&dpm_list_mtx); /* - * Trigger the resume of "async" devices upfront so they don't have to - * wait for the "non-async" ones they don't depend on. + * Start processing "async" root devices upfront so they don't wait for + * the "sync" devices they don't depend on. */ list_for_each_entry(dev, &dpm_suspended_list, power.entry) { dpm_clear_async_state(dev); - dpm_async_fn(dev, async_resume); + if (dpm_root_device(dev)) + dpm_async_with_cleanup(dev, async_resume); } while (!list_empty(&dpm_suspended_list)) { dev = to_device(dpm_suspended_list.next); list_move_tail(&dev->power.entry, &dpm_prepared_list); - if (!dev->power.work_in_progress) { + if (!dpm_async_fn(dev, async_resume)) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -1189,6 +1236,41 @@ EXPORT_SYMBOL_GPL(dpm_resume_end); /*------------------------- Suspend routines -------------------------*/ +static bool dpm_leaf_device(struct device *dev) +{ + struct device *child; + + lockdep_assert_held(&dpm_list_mtx); + + child = device_find_any_child(dev); + if (child) { + put_device(child); + + return false; + } + + return true; +} + +static void dpm_async_suspend_parent(struct device *dev, async_func_t func) +{ + guard(mutex)(&dpm_list_mtx); + + /* + * If the device is suspended asynchronously and the parent's callback + * deletes both the device and the parent itself, the parent object may + * be freed while this function is running, so avoid that by checking + * if the device has been deleted already as the parent cannot be + * deleted before it. + */ + if (!device_pm_initialized(dev)) + return; + + /* Start processing the device's parent if it is "async". */ + if (dev->parent) + dpm_async_with_cleanup(dev->parent, func); +} + /** * resume_event - Return a "resume" message for given "suspend" sleep state. * @sleep_state: PM message representing a sleep state. @@ -1226,6 +1308,8 @@ static void dpm_superior_set_must_resume(struct device *dev) device_links_read_unlock(idx); } +static void async_suspend_noirq(void *data, async_cookie_t cookie); + /** * device_suspend_noirq - Execute a "noirq suspend" callback for given device. * @dev: Device to handle. @@ -1304,7 +1388,13 @@ Skip: Complete: complete_all(&dev->power.completion); TRACE_SUSPEND(error); - return error; + + if (error || async_error) + return error; + + dpm_async_suspend_parent(dev, async_suspend_noirq); + + return 0; } static void async_suspend_noirq(void *data, async_cookie_t cookie) @@ -1318,6 +1408,7 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie) static int dpm_noirq_suspend_devices(pm_message_t state) { ktime_t starttime = ktime_get(); + struct device *dev; int error = 0; trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); @@ -1327,12 +1418,21 @@ static int dpm_noirq_suspend_devices(pm_message_t state) mutex_lock(&dpm_list_mtx); + /* + * Start processing "async" leaf devices upfront so they don't need to + * wait for the "sync" devices they don't depend on. + */ + list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) { + dpm_clear_async_state(dev); + if (dpm_leaf_device(dev)) + dpm_async_with_cleanup(dev, async_suspend_noirq); + } + while (!list_empty(&dpm_late_early_list)) { - struct device *dev = to_device(dpm_late_early_list.prev); + dev = to_device(dpm_late_early_list.prev); list_move(&dev->power.entry, &dpm_noirq_list); - dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend_noirq)) continue; @@ -1346,8 +1446,14 @@ static int dpm_noirq_suspend_devices(pm_message_t state) mutex_lock(&dpm_list_mtx); - if (error || async_error) + if (error || async_error) { + /* + * Move all devices to the target list to resume them + * properly. + */ + list_splice(&dpm_late_early_list, &dpm_noirq_list); break; + } } mutex_unlock(&dpm_list_mtx); @@ -1400,6 +1506,8 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev) spin_unlock_irq(&parent->power.lock); } +static void async_suspend_late(void *data, async_cookie_t cookie); + /** * device_suspend_late - Execute a "late suspend" callback for given device. * @dev: Device to handle. @@ -1476,7 +1584,13 @@ Skip: Complete: TRACE_SUSPEND(error); complete_all(&dev->power.completion); - return error; + + if (error || async_error) + return error; + + dpm_async_suspend_parent(dev, async_suspend_late); + + return 0; } static void async_suspend_late(void *data, async_cookie_t cookie) @@ -1494,6 +1608,7 @@ static void async_suspend_late(void *data, async_cookie_t cookie) int dpm_suspend_late(pm_message_t state) { ktime_t starttime = ktime_get(); + struct device *dev; int error = 0; trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); @@ -1505,12 +1620,21 @@ int dpm_suspend_late(pm_message_t state) mutex_lock(&dpm_list_mtx); + /* + * Start processing "async" leaf devices upfront so they don't need to + * wait for the "sync" devices they don't depend on. + */ + list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) { + dpm_clear_async_state(dev); + if (dpm_leaf_device(dev)) + dpm_async_with_cleanup(dev, async_suspend_late); + } + while (!list_empty(&dpm_suspended_list)) { - struct device *dev = to_device(dpm_suspended_list.prev); + dev = to_device(dpm_suspended_list.prev); list_move(&dev->power.entry, &dpm_late_early_list); - dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend_late)) continue; @@ -1524,8 +1648,14 @@ int dpm_suspend_late(pm_message_t state) mutex_lock(&dpm_list_mtx); - if (error || async_error) + if (error || async_error) { + /* + * Move all devices to the target list to resume them + * properly. + */ + list_splice(&dpm_suspended_list, &dpm_late_early_list); break; + } } mutex_unlock(&dpm_list_mtx); @@ -1614,6 +1744,8 @@ static void dpm_clear_superiors_direct_complete(struct device *dev) device_links_read_unlock(idx); } +static void async_suspend(void *data, async_cookie_t cookie); + /** * device_suspend - Execute "suspend" callbacks for given device. * @dev: Device to handle. @@ -1743,7 +1875,13 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) complete_all(&dev->power.completion); TRACE_SUSPEND(error); - return error; + + if (error || async_error) + return error; + + dpm_async_suspend_parent(dev, async_suspend); + + return 0; } static void async_suspend(void *data, async_cookie_t cookie) @@ -1761,6 +1899,7 @@ static void async_suspend(void *data, async_cookie_t cookie) int dpm_suspend(pm_message_t state) { ktime_t starttime = ktime_get(); + struct device *dev; int error = 0; trace_suspend_resume(TPS("dpm_suspend"), state.event, true); @@ -1774,12 +1913,21 @@ int dpm_suspend(pm_message_t state) mutex_lock(&dpm_list_mtx); + /* + * Start processing "async" leaf devices upfront so they don't need to + * wait for the "sync" devices they don't depend on. + */ + list_for_each_entry_reverse(dev, &dpm_prepared_list, power.entry) { + dpm_clear_async_state(dev); + if (dpm_leaf_device(dev)) + dpm_async_with_cleanup(dev, async_suspend); + } + while (!list_empty(&dpm_prepared_list)) { - struct device *dev = to_device(dpm_prepared_list.prev); + dev = to_device(dpm_prepared_list.prev); list_move(&dev->power.entry, &dpm_suspended_list); - dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend)) continue; @@ -1793,8 +1941,14 @@ int dpm_suspend(pm_message_t state) mutex_lock(&dpm_list_mtx); - if (error || async_error) + if (error || async_error) { + /* + * Move all devices to the target list to resume them + * properly. + */ + list_splice(&dpm_prepared_list, &dpm_suspended_list); break; + } } mutex_unlock(&dpm_list_mtx); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 205a4f8828b0..c55a7c70bc1a 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1011,7 +1011,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) * If 'expires' is after the current time, we've been called * too early. */ - if (expires > 0 && expires < ktime_get_mono_fast_ns()) { + if (expires > 0 && expires <= ktime_get_mono_fast_ns()) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index f84018125b46..13b31a3adc77 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -611,15 +611,9 @@ static DEVICE_ATTR_RW(async); #endif /* CONFIG_PM_ADVANCED_DEBUG */ static struct attribute *power_attrs[] = { -#ifdef CONFIG_PM_ADVANCED_DEBUG -#ifdef CONFIG_PM_SLEEP +#if defined(CONFIG_PM_ADVANCED_DEBUG) && defined(CONFIG_PM_SLEEP) &dev_attr_async.attr, #endif - &dev_attr_runtime_status.attr, - &dev_attr_runtime_usage.attr, - &dev_attr_runtime_active_kids.attr, - &dev_attr_runtime_enabled.attr, -#endif /* CONFIG_PM_ADVANCED_DEBUG */ NULL, }; static const struct attribute_group pm_attr_group = { @@ -650,13 +644,16 @@ static const struct attribute_group pm_wakeup_attr_group = { }; static struct attribute *runtime_attrs[] = { -#ifndef CONFIG_PM_ADVANCED_DEBUG &dev_attr_runtime_status.attr, -#endif &dev_attr_control.attr, &dev_attr_runtime_suspended_time.attr, &dev_attr_runtime_active_time.attr, &dev_attr_autosuspend_delay_ms.attr, +#ifdef CONFIG_PM_ADVANCED_DEBUG + &dev_attr_runtime_usage.attr, + &dev_attr_runtime_active_kids.attr, + &dev_attr_runtime_enabled.attr, +#endif NULL, }; static const struct attribute_group pm_runtime_attr_group = { diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 63bf914a4d44..7e612977be1b 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -337,7 +337,7 @@ int device_wakeup_enable(struct device *dev) if (!dev || !dev->power.can_wakeup) return -EINVAL; - if (pm_suspend_target_state != PM_SUSPEND_ON) + if (pm_sleep_transition_in_progress()) dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__); ws = wakeup_source_register(dev, dev_name(dev)); diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c index 6732ed2869f9..3ffd427248e8 100644 --- a/drivers/base/power/wakeup_stats.c +++ b/drivers/base/power/wakeup_stats.c @@ -34,6 +34,7 @@ wakeup_attr(active_count); wakeup_attr(event_count); wakeup_attr(wakeup_count); wakeup_attr(expire_count); +wakeup_attr(relax_count); static ssize_t active_time_ms_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -119,6 +120,7 @@ static struct attribute *wakeup_source_attrs[] = { &dev_attr_event_count.attr, &dev_attr_wakeup_count.attr, &dev_attr_expire_count.attr, + &dev_attr_relax_count.attr, &dev_attr_active_time_ms.attr, &dev_attr_total_time_ms.attr, &dev_attr_max_time_ms.attr, diff --git a/drivers/base/topology.c b/drivers/base/topology.c index b962da263eee..8b42df05feff 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -208,3 +208,55 @@ static int __init topology_sysfs_init(void) } device_initcall(topology_sysfs_init); + +DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; +EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale); + +void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) +{ + per_cpu(cpu_scale, cpu) = capacity; +} + +static ssize_t cpu_capacity_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + + return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); +} + +static DEVICE_ATTR_RO(cpu_capacity); + +static int cpu_capacity_sysctl_add(unsigned int cpu) +{ + struct device *cpu_dev = get_cpu_device(cpu); + + if (!cpu_dev) + return -ENOENT; + + device_create_file(cpu_dev, &dev_attr_cpu_capacity); + + return 0; +} + +static int cpu_capacity_sysctl_remove(unsigned int cpu) +{ + struct device *cpu_dev = get_cpu_device(cpu); + + if (!cpu_dev) + return -ENOENT; + + device_remove_file(cpu_dev, &dev_attr_cpu_capacity); + + return 0; +} + +static int register_cpu_capacity_sysctl(void) +{ + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity", + cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove); + + return 0; +} +subsys_initcall(register_cpu_capacity_sysctl); diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c index c8d031b297d2..447b9aa5ce40 100644 --- a/drivers/cpufreq/amd-pstate-ut.c +++ b/drivers/cpufreq/amd-pstate-ut.c @@ -244,25 +244,30 @@ static int amd_pstate_set_mode(enum amd_pstate_mode mode) static int amd_pstate_ut_check_driver(u32 index) { enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE; + enum amd_pstate_mode orig_mode = amd_pstate_get_status(); + int ret; for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) { - int ret = amd_pstate_set_mode(mode1); + ret = amd_pstate_set_mode(mode1); if (ret) return ret; for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) { if (mode1 == mode2) continue; ret = amd_pstate_set_mode(mode2); - if (ret) { - pr_err("%s: failed to update status for %s->%s\n", __func__, - amd_pstate_get_mode_string(mode1), - amd_pstate_get_mode_string(mode2)); - return ret; - } + if (ret) + goto out; } } - return 0; +out: + if (ret) + pr_warn("%s: failed to update status for %s->%s: %d\n", __func__, + amd_pstate_get_mode_string(mode1), + amd_pstate_get_mode_string(mode2), ret); + + amd_pstate_set_mode(orig_mode); + return ret; } static int __init amd_pstate_ut_init(void) diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 47d67c40e4f7..f3477ab37742 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -389,7 +389,8 @@ static inline int amd_pstate_cppc_enable(struct cpufreq_policy *policy) static int msr_init_perf(struct amd_cpudata *cpudata) { union perf_cached perf = READ_ONCE(cpudata->perf); - u64 cap1, numerator; + u64 cap1, numerator, cppc_req; + u8 min_perf; int ret = rdmsrq_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &cap1); @@ -400,6 +401,22 @@ static int msr_init_perf(struct amd_cpudata *cpudata) if (ret) return ret; + ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req); + if (ret) + return ret; + + WRITE_ONCE(cpudata->cppc_req_cached, cppc_req); + min_perf = FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cppc_req); + + /* + * Clear out the min_perf part to check if the rest of the MSR is 0, if yes, this is an + * indication that the min_perf value is the one specified through the BIOS option + */ + cppc_req &= ~(AMD_CPPC_MIN_PERF_MASK); + + if (!cppc_req) + perf.bios_min_perf = min_perf; + perf.highest_perf = numerator; perf.max_limit_perf = numerator; perf.min_limit_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1); @@ -555,6 +572,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf, if (!policy) return; + /* limit the max perf when core performance boost feature is disabled */ + if (!cpudata->boost_supported) + max_perf = min_t(u8, perf.nominal_perf, max_perf); + des_perf = clamp_t(u8, des_perf, min_perf, max_perf); policy->cur = perf_to_freq(perf, cpudata->nominal_freq, des_perf); @@ -564,10 +585,6 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf, des_perf = 0; } - /* limit the max perf when core performance boost feature is disabled */ - if (!cpudata->boost_supported) - max_perf = min_t(u8, perf.nominal_perf, max_perf); - if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) { trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq, cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc, @@ -581,20 +598,26 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data) { /* * Initialize lower frequency limit (i.e.policy->min) with - * lowest_nonlinear_frequency which is the most energy efficient - * frequency. Override the initial value set by cpufreq core and - * amd-pstate qos_requests. + * lowest_nonlinear_frequency or the min frequency (if) specified in BIOS, + * Override the initial value set by cpufreq core and amd-pstate qos_requests. */ if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) { struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(policy_data->cpu); struct amd_cpudata *cpudata; + union perf_cached perf; if (!policy) return -EINVAL; cpudata = policy->driver_data; - policy_data->min = cpudata->lowest_nonlinear_freq; + perf = READ_ONCE(cpudata->perf); + + if (perf.bios_min_perf) + policy_data->min = perf_to_freq(perf, cpudata->nominal_freq, + perf.bios_min_perf); + else + policy_data->min = cpudata->lowest_nonlinear_freq; } cpufreq_verify_within_cpu_limits(policy_data); @@ -809,19 +832,16 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata) sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu); } -static void amd_pstate_update_limits(unsigned int cpu) +static void amd_pstate_update_limits(struct cpufreq_policy *policy) { - struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); struct amd_cpudata *cpudata; u32 prev_high = 0, cur_high = 0; bool highest_perf_changed = false; + unsigned int cpu = policy->cpu; if (!amd_pstate_prefcore) return; - if (!policy) - return; - if (amd_get_highest_perf(cpu, &cur_high)) return; @@ -1027,6 +1047,10 @@ free_cpudata1: static void amd_pstate_cpu_exit(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; + union perf_cached perf = READ_ONCE(cpudata->perf); + + /* Reset CPPC_REQ MSR to the BIOS value */ + amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false); freq_qos_remove_request(&cpudata->req[1]); freq_qos_remove_request(&cpudata->req[0]); @@ -1308,6 +1332,12 @@ static ssize_t amd_pstate_show_status(char *buf) return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]); } +int amd_pstate_get_status(void) +{ + return cppc_state; +} +EXPORT_SYMBOL_GPL(amd_pstate_get_status); + int amd_pstate_update_status(const char *buf, size_t size) { int mode_idx; @@ -1422,7 +1452,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) struct amd_cpudata *cpudata; union perf_cached perf; struct device *dev; - u64 value; int ret; /* @@ -1487,12 +1516,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) cpudata->epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE; } - if (cpu_feature_enabled(X86_FEATURE_CPPC)) { - ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); - if (ret) - return ret; - WRITE_ONCE(cpudata->cppc_req_cached, value); - } ret = amd_pstate_set_epp(policy, cpudata->epp_default); if (ret) return ret; @@ -1512,6 +1535,11 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) struct amd_cpudata *cpudata = policy->driver_data; if (cpudata) { + union perf_cached perf = READ_ONCE(cpudata->perf); + + /* Reset CPPC_REQ MSR to the BIOS value */ + amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false); + kfree(cpudata); policy->driver_data = NULL; } @@ -1562,21 +1590,38 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) return 0; } -static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy) +static int amd_pstate_cpu_online(struct cpufreq_policy *policy) { - pr_debug("AMD CPU Core %d going online\n", policy->cpu); - return amd_pstate_cppc_enable(policy); } -static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) +static int amd_pstate_cpu_offline(struct cpufreq_policy *policy) { - return 0; + struct amd_cpudata *cpudata = policy->driver_data; + union perf_cached perf = READ_ONCE(cpudata->perf); + + /* + * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified + * min_perf value across kexec reboots. If this CPU is just onlined normally after this, the + * limits, epp and desired perf will get reset to the cached values in cpudata struct + */ + return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false); } -static int amd_pstate_epp_suspend(struct cpufreq_policy *policy) +static int amd_pstate_suspend(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; + union perf_cached perf = READ_ONCE(cpudata->perf); + int ret; + + /* + * Reset CPPC_REQ MSR to the BIOS value, this will allow us to retain the BIOS specified + * min_perf value across kexec reboots. If this CPU is just resumed back without kexec, + * the limits, epp and desired perf will get reset to the cached values in cpudata struct + */ + ret = amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false); + if (ret) + return ret; /* invalidate to ensure it's rewritten during resume */ cpudata->cppc_req_cached = 0; @@ -1587,6 +1632,17 @@ static int amd_pstate_epp_suspend(struct cpufreq_policy *policy) return 0; } +static int amd_pstate_resume(struct cpufreq_policy *policy) +{ + struct amd_cpudata *cpudata = policy->driver_data; + union perf_cached perf = READ_ONCE(cpudata->perf); + int cur_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->cur); + + /* Set CPPC_REQ to last sane value until the governor updates it */ + return amd_pstate_update_perf(policy, perf.min_limit_perf, cur_perf, perf.max_limit_perf, + 0U, false); +} + static int amd_pstate_epp_resume(struct cpufreq_policy *policy) { struct amd_cpudata *cpudata = policy->driver_data; @@ -1612,6 +1668,10 @@ static struct cpufreq_driver amd_pstate_driver = { .fast_switch = amd_pstate_fast_switch, .init = amd_pstate_cpu_init, .exit = amd_pstate_cpu_exit, + .online = amd_pstate_cpu_online, + .offline = amd_pstate_cpu_offline, + .suspend = amd_pstate_suspend, + .resume = amd_pstate_resume, .set_boost = amd_pstate_set_boost, .update_limits = amd_pstate_update_limits, .name = "amd-pstate", @@ -1624,9 +1684,9 @@ static struct cpufreq_driver amd_pstate_epp_driver = { .setpolicy = amd_pstate_epp_set_policy, .init = amd_pstate_epp_cpu_init, .exit = amd_pstate_epp_cpu_exit, - .offline = amd_pstate_epp_cpu_offline, - .online = amd_pstate_epp_cpu_online, - .suspend = amd_pstate_epp_suspend, + .offline = amd_pstate_cpu_offline, + .online = amd_pstate_cpu_online, + .suspend = amd_pstate_suspend, .resume = amd_pstate_epp_resume, .update_limits = amd_pstate_update_limits, .set_boost = amd_pstate_set_boost, diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h index fbe1c08d3f06..cb45fdca27a6 100644 --- a/drivers/cpufreq/amd-pstate.h +++ b/drivers/cpufreq/amd-pstate.h @@ -30,6 +30,7 @@ * @lowest_perf: the absolute lowest performance level of the processor * @min_limit_perf: Cached value of the performance corresponding to policy->min * @max_limit_perf: Cached value of the performance corresponding to policy->max + * @bios_min_perf: Cached perf value corresponding to the "Requested CPU Min Frequency" BIOS option */ union perf_cached { struct { @@ -39,6 +40,7 @@ union perf_cached { u8 lowest_perf; u8 min_limit_perf; u8 max_limit_perf; + u8 bios_min_perf; }; u64 val; }; @@ -119,6 +121,7 @@ enum amd_pstate_mode { AMD_PSTATE_MAX, }; const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode); +int amd_pstate_get_status(void); int amd_pstate_update_status(const char *buf, size_t size); #endif /* _LINUX_AMD_PSTATE_H */ diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index cb93f00bafdb..b7c688a5659c 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -808,10 +808,119 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf); } + +static ssize_t show_auto_select(struct cpufreq_policy *policy, char *buf) +{ + bool val; + int ret; + + ret = cppc_get_auto_sel(policy->cpu, &val); + + /* show "<unsupported>" when this register is not supported by cpc */ + if (ret == -EOPNOTSUPP) + return sysfs_emit(buf, "<unsupported>\n"); + + if (ret) + return ret; + + return sysfs_emit(buf, "%d\n", val); +} + +static ssize_t store_auto_select(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + bool val; + int ret; + + ret = kstrtobool(buf, &val); + if (ret) + return ret; + + ret = cppc_set_auto_sel(policy->cpu, val); + if (ret) + return ret; + + return count; +} + +static ssize_t show_auto_act_window(struct cpufreq_policy *policy, char *buf) +{ + u64 val; + int ret; + + ret = cppc_get_auto_act_window(policy->cpu, &val); + + /* show "<unsupported>" when this register is not supported by cpc */ + if (ret == -EOPNOTSUPP) + return sysfs_emit(buf, "<unsupported>\n"); + + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", val); +} + +static ssize_t store_auto_act_window(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + u64 usec; + int ret; + + ret = kstrtou64(buf, 0, &usec); + if (ret) + return ret; + + ret = cppc_set_auto_act_window(policy->cpu, usec); + if (ret) + return ret; + + return count; +} + +static ssize_t show_energy_performance_preference_val(struct cpufreq_policy *policy, char *buf) +{ + u64 val; + int ret; + + ret = cppc_get_epp_perf(policy->cpu, &val); + + /* show "<unsupported>" when this register is not supported by cpc */ + if (ret == -EOPNOTSUPP) + return sysfs_emit(buf, "<unsupported>\n"); + + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", val); +} + +static ssize_t store_energy_performance_preference_val(struct cpufreq_policy *policy, + const char *buf, size_t count) +{ + u64 val; + int ret; + + ret = kstrtou64(buf, 0, &val); + if (ret) + return ret; + + ret = cppc_set_epp(policy->cpu, val); + if (ret) + return ret; + + return count; +} + cpufreq_freq_attr_ro(freqdomain_cpus); +cpufreq_freq_attr_rw(auto_select); +cpufreq_freq_attr_rw(auto_act_window); +cpufreq_freq_attr_rw(energy_performance_preference_val); static struct freq_attr *cppc_cpufreq_attr[] = { &freqdomain_cpus, + &auto_select, + &auto_act_window, + &energy_performance_preference_val, NULL, }; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index f45ded62b0e0..d7426e1d8bdd 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -255,51 +255,6 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy) } EXPORT_SYMBOL_GPL(cpufreq_cpu_put); -/** - * cpufreq_cpu_release - Unlock a policy and decrement its usage counter. - * @policy: cpufreq policy returned by cpufreq_cpu_acquire(). - */ -void cpufreq_cpu_release(struct cpufreq_policy *policy) -{ - if (WARN_ON(!policy)) - return; - - lockdep_assert_held(&policy->rwsem); - - up_write(&policy->rwsem); - - cpufreq_cpu_put(policy); -} - -/** - * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it. - * @cpu: CPU to find the policy for. - * - * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and - * if the policy returned by it is not NULL, acquire its rwsem for writing. - * Return the policy if it is active or release it and return NULL otherwise. - * - * The policy returned by this function has to be released with the help of - * cpufreq_cpu_release() in order to release its rwsem and balance its usage - * counter properly. - */ -struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu) -{ - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - - if (!policy) - return NULL; - - down_write(&policy->rwsem); - - if (policy_is_inactive(policy)) { - cpufreq_cpu_release(policy); - return NULL; - } - - return policy; -} - /********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES * *********************************************************************/ @@ -636,6 +591,22 @@ static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf) return sysfs_emit(buf, "%d\n", policy->boost_enabled); } +static int policy_set_boost(struct cpufreq_policy *policy, bool enable) +{ + int ret; + + if (policy->boost_enabled == enable) + return 0; + + policy->boost_enabled = enable; + + ret = cpufreq_driver->set_boost(policy, enable); + if (ret) + policy->boost_enabled = !policy->boost_enabled; + + return ret; +} + static ssize_t store_local_boost(struct cpufreq_policy *policy, const char *buf, size_t count) { @@ -651,21 +622,11 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy, if (!policy->boost_supported) return -EINVAL; - if (policy->boost_enabled == enable) + ret = policy_set_boost(policy, enable); + if (!ret) return count; - policy->boost_enabled = enable; - - cpus_read_lock(); - ret = cpufreq_driver->set_boost(policy, enable); - cpus_read_unlock(); - - if (ret) { - policy->boost_enabled = !policy->boost_enabled; - return ret; - } - - return count; + return ret; } static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost); @@ -845,7 +806,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) static ssize_t store_scaling_governor(struct cpufreq_policy *policy, const char *buf, size_t count) { - char str_governor[16]; + char str_governor[CPUFREQ_NAME_LEN]; int ret; ret = sscanf(buf, "%15s", str_governor); @@ -956,9 +917,9 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, if (!policy->governor || !policy->governor->store_setspeed) return -EINVAL; - ret = sscanf(buf, "%u", &freq); - if (ret != 1) - return -EINVAL; + ret = kstrtouint(buf, 0, &freq); + if (ret) + return ret; policy->governor->store_setspeed(policy, freq); @@ -1025,17 +986,16 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) { struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); - ssize_t ret = -EBUSY; if (!fattr->show) return -EIO; - down_read(&policy->rwsem); + guard(cpufreq_policy_read)(policy); + if (likely(!policy_is_inactive(policy))) - ret = fattr->show(policy, buf); - up_read(&policy->rwsem); + return fattr->show(policy, buf); - return ret; + return -EBUSY; } static ssize_t store(struct kobject *kobj, struct attribute *attr, @@ -1043,17 +1003,16 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, { struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); - ssize_t ret = -EBUSY; if (!fattr->store) return -EIO; - down_write(&policy->rwsem); + guard(cpufreq_policy_write)(policy); + if (likely(!policy_is_inactive(policy))) - ret = fattr->store(policy, buf, count); - up_write(&policy->rwsem); + return fattr->store(policy, buf, count); - return ret; + return -EBUSY; } static void cpufreq_sysfs_release(struct kobject *kobj) @@ -1211,7 +1170,8 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp if (cpumask_test_cpu(cpu, policy->cpus)) return 0; - down_write(&policy->rwsem); + guard(cpufreq_policy_write)(policy); + if (has_target()) cpufreq_stop_governor(policy); @@ -1222,7 +1182,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp if (ret) pr_err("%s: Failed to start governor\n", __func__); } - up_write(&policy->rwsem); + return ret; } @@ -1242,9 +1202,10 @@ static void handle_update(struct work_struct *work) container_of(work, struct cpufreq_policy, update); pr_debug("handle_update for cpu %u called\n", policy->cpu); - down_write(&policy->rwsem); + + guard(cpufreq_policy_write)(policy); + refresh_frequency_limits(policy); - up_write(&policy->rwsem); } static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq, @@ -1270,11 +1231,11 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) struct kobject *kobj; struct completion *cmp; - down_write(&policy->rwsem); - cpufreq_stats_free_table(policy); - kobj = &policy->kobj; - cmp = &policy->kobj_unregister; - up_write(&policy->rwsem); + scoped_guard(cpufreq_policy_write, policy) { + cpufreq_stats_free_table(policy); + kobj = &policy->kobj; + cmp = &policy->kobj_unregister; + } kobject_put(kobj); /* @@ -1350,7 +1311,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) init_waitqueue_head(&policy->transition_wait); INIT_WORK(&policy->update, handle_update); - policy->cpu = cpu; return policy; err_min_qos_notifier: @@ -1419,35 +1379,17 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) kfree(policy); } -static int cpufreq_online(unsigned int cpu) +static int cpufreq_policy_online(struct cpufreq_policy *policy, + unsigned int cpu, bool new_policy) { - struct cpufreq_policy *policy; - bool new_policy; unsigned long flags; unsigned int j; int ret; - pr_debug("%s: bringing CPU%u online\n", __func__, cpu); - - /* Check if this CPU already has a policy to manage it */ - policy = per_cpu(cpufreq_cpu_data, cpu); - if (policy) { - WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); - if (!policy_is_inactive(policy)) - return cpufreq_add_policy_cpu(policy, cpu); + guard(cpufreq_policy_write)(policy); - /* This is the only online CPU for the policy. Start over. */ - new_policy = false; - down_write(&policy->rwsem); - policy->cpu = cpu; - policy->governor = NULL; - } else { - new_policy = true; - policy = cpufreq_policy_alloc(cpu); - if (!policy) - return -ENOMEM; - down_write(&policy->rwsem); - } + policy->cpu = cpu; + policy->governor = NULL; if (!new_policy && cpufreq_driver->online) { /* Recover policy->cpus using related_cpus */ @@ -1470,7 +1412,7 @@ static int cpufreq_online(unsigned int cpu) if (ret) { pr_debug("%s: %d: initialization failed\n", __func__, __LINE__); - goto out_free_policy; + goto out_clear_policy; } /* @@ -1621,7 +1563,55 @@ static int cpufreq_online(unsigned int cpu) goto out_destroy_policy; } - up_write(&policy->rwsem); + return 0; + +out_destroy_policy: + for_each_cpu(j, policy->real_cpus) + remove_cpu_dev_symlink(policy, j, get_cpu_device(j)); + +out_offline_policy: + if (cpufreq_driver->offline) + cpufreq_driver->offline(policy); + +out_exit_policy: + if (cpufreq_driver->exit) + cpufreq_driver->exit(policy); + +out_clear_policy: + cpumask_clear(policy->cpus); + + return ret; +} + +static int cpufreq_online(unsigned int cpu) +{ + struct cpufreq_policy *policy; + bool new_policy; + int ret; + + pr_debug("%s: bringing CPU%u online\n", __func__, cpu); + + /* Check if this CPU already has a policy to manage it */ + policy = per_cpu(cpufreq_cpu_data, cpu); + if (policy) { + WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); + if (!policy_is_inactive(policy)) + return cpufreq_add_policy_cpu(policy, cpu); + + /* This is the only online CPU for the policy. Start over. */ + new_policy = false; + } else { + new_policy = true; + policy = cpufreq_policy_alloc(cpu); + if (!policy) + return -ENOMEM; + } + + ret = cpufreq_policy_online(policy, cpu, new_policy); + if (ret) { + cpufreq_policy_free(policy); + return ret; + } kobject_uevent(&policy->kobj, KOBJ_ADD); @@ -1633,41 +1623,24 @@ static int cpufreq_online(unsigned int cpu) if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver)) policy->cdev = of_cpufreq_cooling_register(policy); - /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */ + /* + * Let the per-policy boost flag mirror the cpufreq_driver boost during + * initialization for a new policy. For an existing policy, maintain the + * previous boost value unless global boost is disabled. + */ if (cpufreq_driver->set_boost && policy->boost_supported && - policy->boost_enabled != cpufreq_boost_enabled()) { - policy->boost_enabled = cpufreq_boost_enabled(); - ret = cpufreq_driver->set_boost(policy, policy->boost_enabled); + (new_policy || !cpufreq_boost_enabled())) { + ret = policy_set_boost(policy, cpufreq_boost_enabled()); if (ret) { /* If the set_boost fails, the online operation is not affected */ pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu, - str_enable_disable(policy->boost_enabled)); - policy->boost_enabled = !policy->boost_enabled; + str_enable_disable(cpufreq_boost_enabled())); } } pr_debug("initialization complete\n"); return 0; - -out_destroy_policy: - for_each_cpu(j, policy->real_cpus) - remove_cpu_dev_symlink(policy, j, get_cpu_device(j)); - -out_offline_policy: - if (cpufreq_driver->offline) - cpufreq_driver->offline(policy); - -out_exit_policy: - if (cpufreq_driver->exit) - cpufreq_driver->exit(policy); - -out_free_policy: - cpumask_clear(policy->cpus); - up_write(&policy->rwsem); - - cpufreq_policy_free(policy); - return ret; } /** @@ -1757,11 +1730,10 @@ static int cpufreq_offline(unsigned int cpu) return 0; } - down_write(&policy->rwsem); + guard(cpufreq_policy_write)(policy); __cpufreq_offline(cpu, policy); - up_write(&policy->rwsem); return 0; } @@ -1778,33 +1750,29 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) if (!policy) return; - down_write(&policy->rwsem); + scoped_guard(cpufreq_policy_write, policy) { + if (cpu_online(cpu)) + __cpufreq_offline(cpu, policy); - if (cpu_online(cpu)) - __cpufreq_offline(cpu, policy); + remove_cpu_dev_symlink(policy, cpu, dev); - remove_cpu_dev_symlink(policy, cpu, dev); + if (!cpumask_empty(policy->real_cpus)) + return; - if (!cpumask_empty(policy->real_cpus)) { - up_write(&policy->rwsem); - return; - } + /* + * Unregister cpufreq cooling once all the CPUs of the policy + * are removed. + */ + if (cpufreq_thermal_control_enabled(cpufreq_driver)) { + cpufreq_cooling_unregister(policy->cdev); + policy->cdev = NULL; + } - /* - * Unregister cpufreq cooling once all the CPUs of the policy are - * removed. - */ - if (cpufreq_thermal_control_enabled(cpufreq_driver)) { - cpufreq_cooling_unregister(policy->cdev); - policy->cdev = NULL; + /* We did light-weight exit earlier, do full tear down now */ + if (cpufreq_driver->offline && cpufreq_driver->exit) + cpufreq_driver->exit(policy); } - /* We did light-weight exit earlier, do full tear down now */ - if (cpufreq_driver->offline && cpufreq_driver->exit) - cpufreq_driver->exit(policy); - - up_write(&policy->rwsem); - cpufreq_policy_free(policy); } @@ -1874,27 +1842,26 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b */ unsigned int cpufreq_quick_get(unsigned int cpu) { - struct cpufreq_policy *policy; - unsigned int ret_freq = 0; + struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL; unsigned long flags; read_lock_irqsave(&cpufreq_driver_lock, flags); if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) { - ret_freq = cpufreq_driver->get(cpu); + unsigned int ret_freq = cpufreq_driver->get(cpu); + read_unlock_irqrestore(&cpufreq_driver_lock, flags); + return ret_freq; } read_unlock_irqrestore(&cpufreq_driver_lock, flags); policy = cpufreq_cpu_get(cpu); - if (policy) { - ret_freq = policy->cur; - cpufreq_cpu_put(policy); - } + if (policy) + return policy->cur; - return ret_freq; + return 0; } EXPORT_SYMBOL(cpufreq_quick_get); @@ -1906,15 +1873,13 @@ EXPORT_SYMBOL(cpufreq_quick_get); */ unsigned int cpufreq_quick_get_max(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - unsigned int ret_freq = 0; + struct cpufreq_policy *policy __free(put_cpufreq_policy); - if (policy) { - ret_freq = policy->max; - cpufreq_cpu_put(policy); - } + policy = cpufreq_cpu_get(cpu); + if (policy) + return policy->max; - return ret_freq; + return 0; } EXPORT_SYMBOL(cpufreq_quick_get_max); @@ -1926,15 +1891,13 @@ EXPORT_SYMBOL(cpufreq_quick_get_max); */ __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - unsigned int ret_freq = 0; + struct cpufreq_policy *policy __free(put_cpufreq_policy); - if (policy) { - ret_freq = policy->cpuinfo.max_freq; - cpufreq_cpu_put(policy); - } + policy = cpufreq_cpu_get(cpu); + if (policy) + return policy->cpuinfo.max_freq; - return ret_freq; + return 0; } EXPORT_SYMBOL(cpufreq_get_hw_max_freq); @@ -1954,19 +1917,18 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy) */ unsigned int cpufreq_get(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - unsigned int ret_freq = 0; + struct cpufreq_policy *policy __free(put_cpufreq_policy); - if (policy) { - down_read(&policy->rwsem); - if (cpufreq_driver->get) - ret_freq = __cpufreq_get(policy); - up_read(&policy->rwsem); + policy = cpufreq_cpu_get(cpu); + if (!policy) + return 0; - cpufreq_cpu_put(policy); - } + guard(cpufreq_policy_read)(policy); + + if (cpufreq_driver->get) + return __cpufreq_get(policy); - return ret_freq; + return 0; } EXPORT_SYMBOL(cpufreq_get); @@ -2025,9 +1987,9 @@ void cpufreq_suspend(void) for_each_active_policy(policy) { if (has_target()) { - down_write(&policy->rwsem); - cpufreq_stop_governor(policy); - up_write(&policy->rwsem); + scoped_guard(cpufreq_policy_write, policy) { + cpufreq_stop_governor(policy); + } } if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy)) @@ -2068,9 +2030,9 @@ void cpufreq_resume(void) pr_err("%s: Failed to resume driver: %s\n", __func__, cpufreq_driver->name); } else if (has_target()) { - down_write(&policy->rwsem); - ret = cpufreq_start_governor(policy); - up_write(&policy->rwsem); + scoped_guard(cpufreq_policy_write, policy) { + ret = cpufreq_start_governor(policy); + } if (ret) pr_err("%s: Failed to start governor for CPU%u's policy\n", @@ -2438,15 +2400,9 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - int ret; - - down_write(&policy->rwsem); + guard(cpufreq_policy_write)(policy); - ret = __cpufreq_driver_target(policy, target_freq, relation); - - up_write(&policy->rwsem); - - return ret; + return __cpufreq_driver_target(policy, target_freq, relation); } EXPORT_SYMBOL_GPL(cpufreq_driver_target); @@ -2618,31 +2574,6 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); * POLICY INTERFACE * *********************************************************************/ -/** - * cpufreq_get_policy - get the current cpufreq_policy - * @policy: struct cpufreq_policy into which the current cpufreq_policy - * is written - * @cpu: CPU to find the policy for - * - * Reads the current cpufreq policy. - */ -int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) -{ - struct cpufreq_policy *cpu_policy; - if (!policy) - return -EINVAL; - - cpu_policy = cpufreq_cpu_get(cpu); - if (!cpu_policy) - return -EINVAL; - - memcpy(policy, cpu_policy, sizeof(*policy)); - - cpufreq_cpu_put(cpu_policy); - return 0; -} -EXPORT_SYMBOL(cpufreq_get_policy); - DEFINE_PER_CPU(unsigned long, cpufreq_pressure); /** @@ -2793,6 +2724,21 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, return ret; } +static void cpufreq_policy_refresh(struct cpufreq_policy *policy) +{ + guard(cpufreq_policy_write)(policy); + + /* + * BIOS might change freq behind our back + * -> ask driver for current freq and notify governors about a change + */ + if (cpufreq_driver->get && has_target() && + (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false)))) + return; + + refresh_frequency_limits(policy); +} + /** * cpufreq_update_policy - Re-evaluate an existing cpufreq policy. * @cpu: CPU to re-evaluate the policy for. @@ -2804,23 +2750,13 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, */ void cpufreq_update_policy(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); + struct cpufreq_policy *policy __free(put_cpufreq_policy); + policy = cpufreq_cpu_get(cpu); if (!policy) return; - /* - * BIOS might change freq behind our back - * -> ask driver for current freq and notify governors about a change - */ - if (cpufreq_driver->get && has_target() && - (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false)))) - goto unlock; - - refresh_frequency_limits(policy); - -unlock: - cpufreq_cpu_release(policy); + cpufreq_policy_refresh(policy); } EXPORT_SYMBOL(cpufreq_update_policy); @@ -2829,7 +2765,7 @@ EXPORT_SYMBOL(cpufreq_update_policy); * @cpu: CPU to update the policy limits for. * * Invoke the driver's ->update_limits callback if present or call - * cpufreq_update_policy() for @cpu. + * cpufreq_policy_refresh() for @cpu. */ void cpufreq_update_limits(unsigned int cpu) { @@ -2840,9 +2776,9 @@ void cpufreq_update_limits(unsigned int cpu) return; if (cpufreq_driver->update_limits) - cpufreq_driver->update_limits(cpu); + cpufreq_driver->update_limits(policy); else - cpufreq_update_policy(cpu); + cpufreq_policy_refresh(policy); } EXPORT_SYMBOL_GPL(cpufreq_update_limits); @@ -2876,8 +2812,10 @@ static int cpufreq_boost_trigger_state(int state) unsigned long flags; int ret = 0; - if (cpufreq_driver->boost_enabled == state) - return 0; + /* + * Don't compare 'cpufreq_driver->boost_enabled' with 'state' here to + * make sure all policies are in sync with global boost flag. + */ write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver->boost_enabled = state; @@ -2888,12 +2826,9 @@ static int cpufreq_boost_trigger_state(int state) if (!policy->boost_supported) continue; - policy->boost_enabled = state; - ret = cpufreq_driver->set_boost(policy, state); - if (ret) { - policy->boost_enabled = !policy->boost_enabled; + ret = policy_set_boost(policy, state); + if (ret) goto err_reset_state; - } } cpus_read_unlock(); @@ -3118,6 +3053,36 @@ static int __init cpufreq_core_init(void) return 0; } + +static bool cpufreq_policy_is_good_for_eas(unsigned int cpu) +{ + struct cpufreq_policy *policy __free(put_cpufreq_policy); + + policy = cpufreq_cpu_get(cpu); + if (!policy) { + pr_debug("cpufreq policy not set for CPU: %d\n", cpu); + return false; + } + + return sugov_is_governor(policy); +} + +bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask) +{ + unsigned int cpu; + + /* Do not attempt EAS if schedutil is not being used. */ + for_each_cpu(cpu, cpu_mask) { + if (!cpufreq_policy_is_good_for_eas(cpu)) { + pr_debug("rd %*pbl: schedutil is mandatory for EAS\n", + cpumask_pr_args(cpu_mask)); + return false; + } + } + + return true; +} + module_param(off, int, 0444); module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444); core_initcall(cpufreq_core_init); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index db8c99535e61..64587d318267 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -221,6 +221,7 @@ struct global_params { * @sched_flags: Store scheduler flags for possible cross CPU update * @hwp_boost_min: Last HWP boosted min performance * @suspended: Whether or not the driver has been suspended. + * @pd_registered: Set when a perf domain is registered for this CPU. * @hwp_notify_work: workqueue for HWP notifications. * * This structure stores per CPU instance data for all CPUs. @@ -260,6 +261,9 @@ struct cpudata { unsigned int sched_flags; u32 hwp_boost_min; bool suspended; +#ifdef CONFIG_ENERGY_MODEL + bool pd_registered; +#endif struct delayed_work hwp_notify_work; }; @@ -303,6 +307,7 @@ static bool hwp_is_hybrid; static struct cpufreq_driver *intel_pstate_driver __read_mostly; +#define INTEL_PSTATE_CORE_SCALING 100000 #define HYBRID_SCALING_FACTOR_ADL 78741 #define HYBRID_SCALING_FACTOR_MTL 80000 #define HYBRID_SCALING_FACTOR_LNL 86957 @@ -311,7 +316,7 @@ static int hybrid_scaling_factor; static inline int core_get_scaling(void) { - return 100000; + return INTEL_PSTATE_CORE_SCALING; } #ifdef CONFIG_ACPI @@ -948,12 +953,124 @@ static struct cpudata *hybrid_max_perf_cpu __read_mostly; */ static DEFINE_MUTEX(hybrid_capacity_lock); +#ifdef CONFIG_ENERGY_MODEL +#define HYBRID_EM_STATE_COUNT 4 + +static int hybrid_active_power(struct device *dev, unsigned long *power, + unsigned long *freq) +{ + /* + * Create "utilization bins" of 0-40%, 40%-60%, 60%-80%, and 80%-100% + * of the maximum capacity such that two CPUs of the same type will be + * regarded as equally attractive if the utilization of each of them + * falls into the same bin, which should prevent tasks from being + * migrated between them too often. + * + * For this purpose, return the "frequency" of 2 for the first + * performance level and otherwise leave the value set by the caller. + */ + if (!*freq) + *freq = 2; + + /* No power information. */ + *power = EM_MAX_POWER; + + return 0; +} + +static int hybrid_get_cost(struct device *dev, unsigned long freq, + unsigned long *cost) +{ + struct pstate_data *pstate = &all_cpu_data[dev->id]->pstate; + struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(dev->id); + + /* + * The smaller the perf-to-frequency scaling factor, the larger the IPC + * ratio between the given CPU and the least capable CPU in the system. + * Regard that IPC ratio as the primary cost component and assume that + * the scaling factors for different CPU types will differ by at least + * 5% and they will not be above INTEL_PSTATE_CORE_SCALING. + * + * Add the freq value to the cost, so that the cost of running on CPUs + * of the same type in different "utilization bins" is different. + */ + *cost = div_u64(100ULL * INTEL_PSTATE_CORE_SCALING, pstate->scaling) + freq; + /* + * Increase the cost slightly for CPUs able to access L3 to avoid + * touching it in case some other CPUs of the same type can do the work + * without it. + */ + if (cacheinfo) { + unsigned int i; + + /* Check if L3 cache is there. */ + for (i = 0; i < cacheinfo->num_leaves; i++) { + if (cacheinfo->info_list[i].level == 3) { + *cost += 2; + break; + } + } + } + + return 0; +} + +static bool hybrid_register_perf_domain(unsigned int cpu) +{ + static const struct em_data_callback cb + = EM_ADV_DATA_CB(hybrid_active_power, hybrid_get_cost); + struct cpudata *cpudata = all_cpu_data[cpu]; + struct device *cpu_dev; + + /* + * Registering EM perf domains without enabling asymmetric CPU capacity + * support is not really useful and one domain should not be registered + * more than once. + */ + if (!hybrid_max_perf_cpu || cpudata->pd_registered) + return false; + + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) + return false; + + if (em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb, + cpumask_of(cpu), false)) + return false; + + cpudata->pd_registered = true; + + return true; +} + +static void hybrid_register_all_perf_domains(void) +{ + unsigned int cpu; + + for_each_online_cpu(cpu) + hybrid_register_perf_domain(cpu); +} + +static void hybrid_update_perf_domain(struct cpudata *cpu) +{ + if (cpu->pd_registered) + em_adjust_cpu_capacity(cpu->cpu); +} +#else /* !CONFIG_ENERGY_MODEL */ +static inline bool hybrid_register_perf_domain(unsigned int cpu) { return false; } +static inline void hybrid_register_all_perf_domains(void) {} +static inline void hybrid_update_perf_domain(struct cpudata *cpu) {} +#endif /* CONFIG_ENERGY_MODEL */ + static void hybrid_set_cpu_capacity(struct cpudata *cpu) { arch_set_cpu_capacity(cpu->cpu, cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf, cpu->capacity_perf, cpu->pstate.max_pstate_physical); + hybrid_update_perf_domain(cpu); + + topology_set_cpu_scale(cpu->cpu, arch_scale_cpu_capacity(cpu->cpu)); pr_debug("CPU%d: perf = %u, max. perf = %u, base perf = %d\n", cpu->cpu, cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf, @@ -1042,6 +1159,11 @@ static void hybrid_refresh_cpu_capacity_scaling(void) guard(mutex)(&hybrid_capacity_lock); __hybrid_refresh_cpu_capacity_scaling(); + /* + * Perf domains are not registered before setting hybrid_max_perf_cpu, + * so register them all after setting up CPU capacity scaling. + */ + hybrid_register_all_perf_domains(); } static void hybrid_init_cpu_capacity_scaling(bool refresh) @@ -1069,7 +1191,7 @@ static void hybrid_init_cpu_capacity_scaling(bool refresh) hybrid_refresh_cpu_capacity_scaling(); /* * Disabling ITMT causes sched domains to be rebuilt to disable asym - * packing and enable asym capacity. + * packing and enable asym capacity and EAS. */ sched_clear_itmt_support(); } @@ -1147,6 +1269,14 @@ static void hybrid_update_capacity(struct cpudata *cpu) } hybrid_set_cpu_capacity(cpu); + /* + * If the CPU was offline to start with and it is going online for the + * first time, a perf domain needs to be registered for it if hybrid + * capacity scaling has been enabled already. In that case, sched + * domains need to be rebuilt to take the new perf domain into account. + */ + if (hybrid_register_perf_domain(cpu->cpu)) + em_rebuild_sched_domains(); unlock: mutex_unlock(&hybrid_capacity_lock); @@ -1356,9 +1486,11 @@ static void intel_pstate_update_policies(void) cpufreq_update_policy(cpu); } -static void __intel_pstate_update_max_freq(struct cpudata *cpudata, - struct cpufreq_policy *policy) +static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy, + struct cpudata *cpudata) { + guard(cpufreq_policy_write)(policy); + if (hwp_active) intel_pstate_get_hwp_cap(cpudata); @@ -1368,42 +1500,34 @@ static void __intel_pstate_update_max_freq(struct cpudata *cpudata, refresh_frequency_limits(policy); } -static void intel_pstate_update_limits(unsigned int cpu) +static bool intel_pstate_update_max_freq(struct cpudata *cpudata) { - struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); - struct cpudata *cpudata; + struct cpufreq_policy *policy __free(put_cpufreq_policy); + policy = cpufreq_cpu_get(cpudata->cpu); if (!policy) - return; + return false; - cpudata = all_cpu_data[cpu]; + __intel_pstate_update_max_freq(policy, cpudata); - __intel_pstate_update_max_freq(cpudata, policy); + return true; +} - /* Prevent the driver from being unregistered now. */ - mutex_lock(&intel_pstate_driver_lock); +static void intel_pstate_update_limits(struct cpufreq_policy *policy) +{ + struct cpudata *cpudata = all_cpu_data[policy->cpu]; - cpufreq_cpu_release(policy); + __intel_pstate_update_max_freq(policy, cpudata); hybrid_update_capacity(cpudata); - - mutex_unlock(&intel_pstate_driver_lock); } static void intel_pstate_update_limits_for_all(void) { int cpu; - for_each_possible_cpu(cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); - - if (!policy) - continue; - - __intel_pstate_update_max_freq(all_cpu_data[cpu], policy); - - cpufreq_cpu_release(policy); - } + for_each_possible_cpu(cpu) + intel_pstate_update_max_freq(all_cpu_data[cpu]); mutex_lock(&hybrid_capacity_lock); @@ -1843,13 +1967,8 @@ static void intel_pstate_notify_work(struct work_struct *work) { struct cpudata *cpudata = container_of(to_delayed_work(work), struct cpudata, hwp_notify_work); - struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu); - - if (policy) { - __intel_pstate_update_max_freq(cpudata, policy); - - cpufreq_cpu_release(policy); + if (intel_pstate_update_max_freq(cpudata)) { /* * The driver will not be unregistered while this function is * running, so update the capacity without acquiring the driver diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index 4fd43b8d61a9..3c2756a539c4 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -16,7 +16,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/platform_device.h> +#include <linux/device/faux.h> #include <linux/psci.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> @@ -423,14 +423,14 @@ deinit: * to register cpuidle driver then rollback to cancel all CPUs * registration. */ -static int psci_cpuidle_probe(struct platform_device *pdev) +static int psci_cpuidle_probe(struct faux_device *fdev) { int cpu, ret; struct cpuidle_driver *drv; struct cpuidle_device *dev; for_each_present_cpu(cpu) { - ret = psci_idle_init_cpu(&pdev->dev, cpu); + ret = psci_idle_init_cpu(&fdev->dev, cpu); if (ret) goto out_fail; } @@ -450,26 +450,37 @@ out_fail: return ret; } -static struct platform_driver psci_cpuidle_driver = { +static struct faux_device_ops psci_cpuidle_ops = { .probe = psci_cpuidle_probe, - .driver = { - .name = "psci-cpuidle", - }, }; +static bool __init dt_idle_state_present(void) +{ + struct device_node *cpu_node __free(device_node); + struct device_node *state_node __free(device_node); + + cpu_node = of_cpu_device_node_get(cpumask_first(cpu_possible_mask)); + if (!cpu_node) + return false; + + state_node = of_get_cpu_state_node(cpu_node, 0); + if (!state_node) + return false; + + return !!of_match_node(psci_idle_state_match, state_node); +} + static int __init psci_idle_init(void) { - struct platform_device *pdev; - int ret; + struct faux_device *fdev; - ret = platform_driver_register(&psci_cpuidle_driver); - if (ret) - return ret; + if (!dt_idle_state_present()) + return 0; - pdev = platform_device_register_simple("psci-cpuidle", -1, NULL, 0); - if (IS_ERR(pdev)) { - platform_driver_unregister(&psci_cpuidle_driver); - return PTR_ERR(pdev); + fdev = faux_device_create("psci-cpuidle", NULL, &psci_cpuidle_ops); + if (!fdev) { + pr_err("Failed to create psci-cpuidle device\n"); + return -ENODEV; } return 0; diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 39aa0aea61c6..52d5d26fc7c6 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -255,7 +255,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, */ data->next_timer_ns = KTIME_MAX; delta_tick = TICK_NSEC / 2; - data->bucket = which_bucket(KTIME_MAX); + data->bucket = BUCKETS - 1; } if (unlikely(drv->state_count <= 1 || latency_req == 0) || diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c index 8fe5e1b47ef9..bfa55c1eab5b 100644 --- a/drivers/cpuidle/governors/teo.c +++ b/drivers/cpuidle/governors/teo.c @@ -19,7 +19,7 @@ * * Of course, non-timer wakeup sources are more important in some use cases, * but even then it is generally unnecessary to consider idle duration values - * greater than the time time till the next timer event, referred as the sleep + * greater than the time till the next timer event, referred as the sleep * length in what follows, because the closest timer will ultimately wake up the * CPU anyway unless it is woken up earlier. * @@ -311,7 +311,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, struct cpuidle_state *s = &drv->states[i]; /* - * Update the sums of idle state mertics for all of the states + * Update the sums of idle state metrics for all of the states * shallower than the current one. */ intercept_sum += prev_bin->intercepts; diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 7b6b754ad6eb..cb7fbf74138e 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -641,7 +641,7 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe) return dev->power.runtime_status == RPM_SUSPENDING || dev->power.runtime_status == RPM_RESUMING || - pm_suspend_target_state != PM_SUSPEND_ON; + pm_suspend_in_progress(); #else return false; #endif diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 433d858b7be1..8ccb483204fa 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -48,9 +48,11 @@ #include <trace/events/power.h> #include <linux/sched.h> #include <linux/sched/smt.h> +#include <linux/mutex.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/moduleparam.h> +#include <linux/sysfs.h> #include <asm/cpuid/api.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> @@ -93,9 +95,15 @@ struct idle_cpu { */ unsigned long auto_demotion_disable_flags; bool disable_promotion_to_c1e; + bool c1_demotion_supported; bool use_acpi; }; +static bool c1_demotion_supported; +static DEFINE_MUTEX(c1_demotion_mutex); + +static struct device *sysfs_root __initdata; + static const struct idle_cpu *icpu __initdata; static struct cpuidle_state *cpuidle_state_table __initdata; @@ -1550,18 +1558,21 @@ static const struct idle_cpu idle_cpu_gmt __initconst = { static const struct idle_cpu idle_cpu_spr __initconst = { .state_table = spr_cstates, .disable_promotion_to_c1e = true, + .c1_demotion_supported = true, .use_acpi = true, }; static const struct idle_cpu idle_cpu_gnr __initconst = { .state_table = gnr_cstates, .disable_promotion_to_c1e = true, + .c1_demotion_supported = true, .use_acpi = true, }; static const struct idle_cpu idle_cpu_gnrd __initconst = { .state_table = gnrd_cstates, .disable_promotion_to_c1e = true, + .c1_demotion_supported = true, .use_acpi = true, }; @@ -1600,12 +1611,14 @@ static const struct idle_cpu idle_cpu_snr __initconst = { static const struct idle_cpu idle_cpu_grr __initconst = { .state_table = grr_cstates, .disable_promotion_to_c1e = true, + .c1_demotion_supported = true, .use_acpi = true, }; static const struct idle_cpu idle_cpu_srf __initconst = { .state_table = srf_cstates, .disable_promotion_to_c1e = true, + .c1_demotion_supported = true, .use_acpi = true, }; @@ -2325,6 +2338,88 @@ static void __init intel_idle_cpuidle_devices_uninit(void) cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i)); } +static void intel_c1_demotion_toggle(void *enable) +{ + unsigned long long msr_val; + + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_val); + /* + * Enable/disable C1 undemotion along with C1 demotion, as this is the + * most sensible configuration in general. + */ + if (enable) + msr_val |= NHM_C1_AUTO_DEMOTE | SNB_C1_AUTO_UNDEMOTE; + else + msr_val &= ~(NHM_C1_AUTO_DEMOTE | SNB_C1_AUTO_UNDEMOTE); + wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_val); +} + +static ssize_t intel_c1_demotion_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + bool enable; + int err; + + err = kstrtobool(buf, &enable); + if (err) + return err; + + mutex_lock(&c1_demotion_mutex); + /* Enable/disable C1 demotion on all CPUs */ + on_each_cpu(intel_c1_demotion_toggle, (void *)enable, 1); + mutex_unlock(&c1_demotion_mutex); + + return count; +} + +static ssize_t intel_c1_demotion_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long long msr_val; + + /* + * Read the MSR value for a CPU and assume it is the same for all CPUs. Any other + * configuration would be a BIOS bug. + */ + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_val); + return sysfs_emit(buf, "%d\n", !!(msr_val & NHM_C1_AUTO_DEMOTE)); +} +static DEVICE_ATTR_RW(intel_c1_demotion); + +static int __init intel_idle_sysfs_init(void) +{ + int err; + + if (!c1_demotion_supported) + return 0; + + sysfs_root = bus_get_dev_root(&cpu_subsys); + if (!sysfs_root) + return 0; + + err = sysfs_add_file_to_group(&sysfs_root->kobj, + &dev_attr_intel_c1_demotion.attr, + "cpuidle"); + if (err) { + put_device(sysfs_root); + return err; + } + + return 0; +} + +static void __init intel_idle_sysfs_uninit(void) +{ + if (!sysfs_root) + return; + + sysfs_remove_file_from_group(&sysfs_root->kobj, + &dev_attr_intel_c1_demotion.attr, + "cpuidle"); + put_device(sysfs_root); +} + static int __init intel_idle_init(void) { const struct x86_cpu_id *id; @@ -2375,6 +2470,8 @@ static int __init intel_idle_init(void) auto_demotion_disable_flags = icpu->auto_demotion_disable_flags; if (icpu->disable_promotion_to_c1e) c1e_promotion = C1E_PROMOTION_DISABLE; + if (icpu->c1_demotion_supported) + c1_demotion_supported = true; if (icpu->use_acpi || force_use_acpi) intel_idle_acpi_cst_extract(); } else if (!intel_idle_acpi_cst_extract()) { @@ -2388,6 +2485,10 @@ static int __init intel_idle_init(void) if (!intel_idle_cpuidle_devices) return -ENOMEM; + retval = intel_idle_sysfs_init(); + if (retval) + pr_warn("failed to initialized sysfs"); + intel_idle_cpuidle_driver_init(&intel_idle_driver); retval = cpuidle_register_driver(&intel_idle_driver); @@ -2412,6 +2513,7 @@ hp_setup_fail: intel_idle_cpuidle_devices_uninit(); cpuidle_unregister_driver(&intel_idle_driver); init_driver_fail: + intel_idle_sysfs_uninit(); free_percpu(intel_idle_cpuidle_devices); return retval; diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 73e9a3b2f29b..edbd60501cf0 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -40,17 +40,14 @@ static DEFINE_XARRAY_ALLOC1(opp_configs); static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table) { struct opp_device *opp_dev; - bool found = false; - mutex_lock(&opp_table->lock); + guard(mutex)(&opp_table->lock); + list_for_each_entry(opp_dev, &opp_table->dev_list, node) - if (opp_dev->dev == dev) { - found = true; - break; - } + if (opp_dev->dev == dev) + return true; - mutex_unlock(&opp_table->lock); - return found; + return false; } static struct opp_table *_find_opp_table_unlocked(struct device *dev) @@ -58,10 +55,8 @@ static struct opp_table *_find_opp_table_unlocked(struct device *dev) struct opp_table *opp_table; list_for_each_entry(opp_table, &opp_tables, node) { - if (_find_opp_dev(dev, opp_table)) { - _get_opp_table_kref(opp_table); - return opp_table; - } + if (_find_opp_dev(dev, opp_table)) + return dev_pm_opp_get_opp_table_ref(opp_table); } return ERR_PTR(-ENODEV); @@ -80,18 +75,13 @@ static struct opp_table *_find_opp_table_unlocked(struct device *dev) */ struct opp_table *_find_opp_table(struct device *dev) { - struct opp_table *opp_table; - if (IS_ERR_OR_NULL(dev)) { pr_err("%s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); } - mutex_lock(&opp_table_lock); - opp_table = _find_opp_table_unlocked(dev); - mutex_unlock(&opp_table_lock); - - return opp_table; + guard(mutex)(&opp_table_lock); + return _find_opp_table_unlocked(dev); } /* @@ -319,18 +309,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); */ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) { - struct opp_table *opp_table; - unsigned long clock_latency_ns; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return 0; - clock_latency_ns = opp_table->clock_latency_ns_max; - - dev_pm_opp_put_opp_table(opp_table); - - return clock_latency_ns; + return opp_table->clock_latency_ns_max; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); @@ -342,7 +327,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); */ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); struct dev_pm_opp *opp; struct regulator *reg; unsigned long latency_ns = 0; @@ -358,33 +343,31 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) /* Regulator may not be required for the device */ if (!opp_table->regulators) - goto put_opp_table; + return 0; count = opp_table->regulator_count; uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); if (!uV) - goto put_opp_table; - - mutex_lock(&opp_table->lock); + return 0; - for (i = 0; i < count; i++) { - uV[i].min = ~0; - uV[i].max = 0; + scoped_guard(mutex, &opp_table->lock) { + for (i = 0; i < count; i++) { + uV[i].min = ~0; + uV[i].max = 0; - list_for_each_entry(opp, &opp_table->opp_list, node) { - if (!opp->available) - continue; + list_for_each_entry(opp, &opp_table->opp_list, node) { + if (!opp->available) + continue; - if (opp->supplies[i].u_volt_min < uV[i].min) - uV[i].min = opp->supplies[i].u_volt_min; - if (opp->supplies[i].u_volt_max > uV[i].max) - uV[i].max = opp->supplies[i].u_volt_max; + if (opp->supplies[i].u_volt_min < uV[i].min) + uV[i].min = opp->supplies[i].u_volt_min; + if (opp->supplies[i].u_volt_max > uV[i].max) + uV[i].max = opp->supplies[i].u_volt_max; + } } } - mutex_unlock(&opp_table->lock); - /* * The caller needs to ensure that opp_table (and hence the regulator) * isn't freed, while we are executing this routine. @@ -397,8 +380,6 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) } kfree(uV); -put_opp_table: - dev_pm_opp_put_opp_table(opp_table); return latency_ns; } @@ -428,7 +409,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency); */ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); unsigned long freq = 0; opp_table = _find_opp_table(dev); @@ -438,8 +419,6 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) if (opp_table->suspend_opp && opp_table->suspend_opp->available) freq = dev_pm_opp_get_freq(opp_table->suspend_opp); - dev_pm_opp_put_opp_table(opp_table); - return freq; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq); @@ -449,15 +428,13 @@ int _get_opp_count(struct opp_table *opp_table) struct dev_pm_opp *opp; int count = 0; - mutex_lock(&opp_table->lock); + guard(mutex)(&opp_table->lock); list_for_each_entry(opp, &opp_table->opp_list, node) { if (opp->available) count++; } - mutex_unlock(&opp_table->lock); - return count; } @@ -470,21 +447,16 @@ int _get_opp_count(struct opp_table *opp_table) */ int dev_pm_opp_get_opp_count(struct device *dev) { - struct opp_table *opp_table; - int count; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { - count = PTR_ERR(opp_table); - dev_dbg(dev, "%s: OPP table not found (%d)\n", - __func__, count); - return count; + dev_dbg(dev, "%s: OPP table not found (%ld)\n", + __func__, PTR_ERR(opp_table)); + return PTR_ERR(opp_table); } - count = _get_opp_count(opp_table); - dev_pm_opp_put_opp_table(opp_table); - - return count; + return _get_opp_count(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); @@ -551,7 +523,7 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table, if (assert && !assert(opp_table, index)) return ERR_PTR(-EINVAL); - mutex_lock(&opp_table->lock); + guard(mutex)(&opp_table->lock); list_for_each_entry(temp_opp, &opp_table->opp_list, node) { if (temp_opp->available == available) { @@ -566,8 +538,6 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table, dev_pm_opp_get(opp); } - mutex_unlock(&opp_table->lock); - return opp; } @@ -578,8 +548,7 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available, unsigned long opp_key, unsigned long key), bool (*assert)(struct opp_table *opp_table, unsigned int index)) { - struct opp_table *opp_table; - struct dev_pm_opp *opp; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { @@ -588,12 +557,8 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available, return ERR_CAST(opp_table); } - opp = _opp_table_find_key(opp_table, key, index, available, read, - compare, assert); - - dev_pm_opp_put_opp_table(opp_table); - - return opp; + return _opp_table_find_key(opp_table, key, index, available, read, + compare, assert); } static struct dev_pm_opp *_find_key_exact(struct device *dev, @@ -1187,10 +1152,9 @@ static void _find_current_opp(struct device *dev, struct opp_table *opp_table) * make special checks to validate current_opp. */ if (IS_ERR(opp)) { - mutex_lock(&opp_table->lock); - opp = list_first_entry(&opp_table->opp_list, struct dev_pm_opp, node); - dev_pm_opp_get(opp); - mutex_unlock(&opp_table->lock); + guard(mutex)(&opp_table->lock); + opp = dev_pm_opp_get(list_first_entry(&opp_table->opp_list, + struct dev_pm_opp, node)); } opp_table->current_opp = opp; @@ -1329,8 +1293,7 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, dev_pm_opp_put(old_opp); /* Make sure current_opp doesn't get freed */ - dev_pm_opp_get(opp); - opp_table->current_opp = opp; + opp_table->current_opp = dev_pm_opp_get(opp); return ret; } @@ -1348,11 +1311,10 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table, */ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); + struct dev_pm_opp *opp __free(put_opp) = NULL; unsigned long freq = 0, temp_freq; - struct dev_pm_opp *opp = NULL; bool forced = false; - int ret; opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { @@ -1369,9 +1331,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) * equivalent to a clk_set_rate() */ if (!_get_opp_count(opp_table)) { - ret = opp_table->config_clks(dev, opp_table, NULL, - &target_freq, false); - goto put_opp_table; + return opp_table->config_clks(dev, opp_table, NULL, + &target_freq, false); } freq = clk_round_rate(opp_table->clk, target_freq); @@ -1386,10 +1347,9 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) temp_freq = freq; opp = _find_freq_ceil(opp_table, &temp_freq); if (IS_ERR(opp)) { - ret = PTR_ERR(opp); - dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", - __func__, freq, ret); - goto put_opp_table; + dev_err(dev, "%s: failed to find OPP for freq %lu (%ld)\n", + __func__, freq, PTR_ERR(opp)); + return PTR_ERR(opp); } /* @@ -1402,14 +1362,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) forced = opp_table->current_rate_single_clk != freq; } - ret = _set_opp(dev, opp_table, opp, &freq, forced); - - if (freq) - dev_pm_opp_put(opp); - -put_opp_table: - dev_pm_opp_put_opp_table(opp_table); - return ret; + return _set_opp(dev, opp_table, opp, &freq, forced); } EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); @@ -1425,8 +1378,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); */ int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) { - struct opp_table *opp_table; - int ret; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { @@ -1434,10 +1386,7 @@ int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) return PTR_ERR(opp_table); } - ret = _set_opp(dev, opp_table, opp, NULL, false); - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return _set_opp(dev, opp_table, opp, NULL, false); } EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp); @@ -1462,9 +1411,8 @@ struct opp_device *_add_opp_dev(const struct device *dev, /* Initialize opp-dev */ opp_dev->dev = dev; - mutex_lock(&opp_table->lock); - list_add(&opp_dev->node, &opp_table->dev_list); - mutex_unlock(&opp_table->lock); + scoped_guard(mutex, &opp_table->lock) + list_add(&opp_dev->node, &opp_table->dev_list); /* Create debugfs entries for the opp_table */ opp_debug_register(opp_dev, opp_table); @@ -1688,14 +1636,10 @@ static void _opp_table_kref_release(struct kref *kref) kfree(opp_table); } -void _get_opp_table_kref(struct opp_table *opp_table) +struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table) { kref_get(&opp_table->kref); -} - -void dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table) -{ - _get_opp_table_kref(opp_table); + return opp_table; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table_ref); @@ -1729,9 +1673,10 @@ static void _opp_kref_release(struct kref *kref) kfree(opp); } -void dev_pm_opp_get(struct dev_pm_opp *opp) +struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp) { kref_get(&opp->kref); + return opp; } EXPORT_SYMBOL_GPL(dev_pm_opp_get); @@ -1750,27 +1695,25 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put); */ void dev_pm_opp_remove(struct device *dev, unsigned long freq) { + struct opp_table *opp_table __free(put_opp_table); struct dev_pm_opp *opp = NULL, *iter; - struct opp_table *opp_table; opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return; if (!assert_single_clk(opp_table, 0)) - goto put_table; - - mutex_lock(&opp_table->lock); + return; - list_for_each_entry(iter, &opp_table->opp_list, node) { - if (iter->rates[0] == freq) { - opp = iter; - break; + scoped_guard(mutex, &opp_table->lock) { + list_for_each_entry(iter, &opp_table->opp_list, node) { + if (iter->rates[0] == freq) { + opp = iter; + break; + } } } - mutex_unlock(&opp_table->lock); - if (opp) { dev_pm_opp_put(opp); @@ -1780,32 +1723,26 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", __func__, freq); } - -put_table: - /* Drop the reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove); static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table, bool dynamic) { - struct dev_pm_opp *opp = NULL, *temp; + struct dev_pm_opp *opp; + + guard(mutex)(&opp_table->lock); - mutex_lock(&opp_table->lock); - list_for_each_entry(temp, &opp_table->opp_list, node) { + list_for_each_entry(opp, &opp_table->opp_list, node) { /* * Refcount must be dropped only once for each OPP by OPP core, * do that with help of "removed" flag. */ - if (!temp->removed && dynamic == temp->dynamic) { - opp = temp; - break; - } + if (!opp->removed && dynamic == opp->dynamic) + return opp; } - mutex_unlock(&opp_table->lock); - return opp; + return NULL; } /* @@ -1829,20 +1766,14 @@ static void _opp_remove_all(struct opp_table *opp_table, bool dynamic) bool _opp_remove_all_static(struct opp_table *opp_table) { - mutex_lock(&opp_table->lock); - - if (!opp_table->parsed_static_opps) { - mutex_unlock(&opp_table->lock); - return false; - } + scoped_guard(mutex, &opp_table->lock) { + if (!opp_table->parsed_static_opps) + return false; - if (--opp_table->parsed_static_opps) { - mutex_unlock(&opp_table->lock); - return true; + if (--opp_table->parsed_static_opps) + return true; } - mutex_unlock(&opp_table->lock); - _opp_remove_all(opp_table, false); return true; } @@ -1855,16 +1786,13 @@ bool _opp_remove_all_static(struct opp_table *opp_table) */ void dev_pm_opp_remove_all_dynamic(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return; _opp_remove_all(opp_table, true); - - /* Drop the reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic); @@ -2049,17 +1977,15 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct list_head *head; int ret; - mutex_lock(&opp_table->lock); - head = &opp_table->opp_list; + scoped_guard(mutex, &opp_table->lock) { + head = &opp_table->opp_list; - ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); - if (ret) { - mutex_unlock(&opp_table->lock); - return ret; - } + ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); + if (ret) + return ret; - list_add(&new_opp->node, head); - mutex_unlock(&opp_table->lock); + list_add(&new_opp->node, head); + } new_opp->opp_table = opp_table; kref_init(&new_opp->kref); @@ -2161,8 +2087,8 @@ static int _opp_set_supported_hw(struct opp_table *opp_table, if (opp_table->supported_hw) return 0; - opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions), - GFP_KERNEL); + opp_table->supported_hw = kmemdup_array(versions, count, + sizeof(*versions), GFP_KERNEL); if (!opp_table->supported_hw) return -ENOMEM; @@ -2706,18 +2632,16 @@ struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, return ERR_PTR(-EBUSY); for (i = 0; i < src_table->required_opp_count; i++) { - if (src_table->required_opp_tables[i] == dst_table) { - mutex_lock(&src_table->lock); + if (src_table->required_opp_tables[i] != dst_table) + continue; + scoped_guard(mutex, &src_table->lock) { list_for_each_entry(opp, &src_table->opp_list, node) { if (opp == src_opp) { - dest_opp = opp->required_opps[i]; - dev_pm_opp_get(dest_opp); + dest_opp = dev_pm_opp_get(opp->required_opps[i]); break; } } - - mutex_unlock(&src_table->lock); break; } } @@ -2749,7 +2673,6 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, unsigned int pstate) { struct dev_pm_opp *opp; - int dest_pstate = -EINVAL; int i; /* @@ -2783,22 +2706,17 @@ int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, return -EINVAL; } - mutex_lock(&src_table->lock); + guard(mutex)(&src_table->lock); list_for_each_entry(opp, &src_table->opp_list, node) { - if (opp->level == pstate) { - dest_pstate = opp->required_opps[i]->level; - goto unlock; - } + if (opp->level == pstate) + return opp->required_opps[i]->level; } pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table, dst_table); -unlock: - mutex_unlock(&src_table->lock); - - return dest_pstate; + return -EINVAL; } /** @@ -2853,46 +2771,38 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add_dynamic); static int _opp_set_availability(struct device *dev, unsigned long freq, bool availability_req) { - struct opp_table *opp_table; - struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); - int r = 0; + struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp; + struct opp_table *opp_table __free(put_opp_table); /* Find the opp_table */ opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { - r = PTR_ERR(opp_table); - dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); - return r; + dev_warn(dev, "%s: Device OPP not found (%ld)\n", __func__, + PTR_ERR(opp_table)); + return PTR_ERR(opp_table); } - if (!assert_single_clk(opp_table, 0)) { - r = -EINVAL; - goto put_table; - } + if (!assert_single_clk(opp_table, 0)) + return -EINVAL; - mutex_lock(&opp_table->lock); + scoped_guard(mutex, &opp_table->lock) { + /* Do we have the frequency? */ + list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { + if (tmp_opp->rates[0] == freq) { + opp = dev_pm_opp_get(tmp_opp); - /* Do we have the frequency? */ - list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { - if (tmp_opp->rates[0] == freq) { - opp = tmp_opp; - break; - } - } + /* Is update really needed? */ + if (opp->available == availability_req) + return 0; - if (IS_ERR(opp)) { - r = PTR_ERR(opp); - goto unlock; + opp->available = availability_req; + break; + } + } } - /* Is update really needed? */ - if (opp->available == availability_req) - goto unlock; - - opp->available = availability_req; - - dev_pm_opp_get(opp); - mutex_unlock(&opp_table->lock); + if (IS_ERR(opp)) + return PTR_ERR(opp); /* Notify the change of the OPP availability */ if (availability_req) @@ -2902,14 +2812,7 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_DISABLE, opp); - dev_pm_opp_put(opp); - goto put_table; - -unlock: - mutex_unlock(&opp_table->lock); -put_table: - dev_pm_opp_put_opp_table(opp_table); - return r; + return 0; } /** @@ -2929,9 +2832,9 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, unsigned long u_volt_max) { - struct opp_table *opp_table; - struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); - int r = 0; + struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp; + struct opp_table *opp_table __free(put_opp_table); + int r; /* Find the opp_table */ opp_table = _find_opp_table(dev); @@ -2941,49 +2844,36 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, return r; } - if (!assert_single_clk(opp_table, 0)) { - r = -EINVAL; - goto put_table; - } + if (!assert_single_clk(opp_table, 0)) + return -EINVAL; - mutex_lock(&opp_table->lock); + scoped_guard(mutex, &opp_table->lock) { + /* Do we have the frequency? */ + list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { + if (tmp_opp->rates[0] == freq) { + opp = dev_pm_opp_get(tmp_opp); - /* Do we have the frequency? */ - list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { - if (tmp_opp->rates[0] == freq) { - opp = tmp_opp; - break; - } - } + /* Is update really needed? */ + if (opp->supplies->u_volt == u_volt) + return 0; - if (IS_ERR(opp)) { - r = PTR_ERR(opp); - goto adjust_unlock; - } - - /* Is update really needed? */ - if (opp->supplies->u_volt == u_volt) - goto adjust_unlock; + opp->supplies->u_volt = u_volt; + opp->supplies->u_volt_min = u_volt_min; + opp->supplies->u_volt_max = u_volt_max; - opp->supplies->u_volt = u_volt; - opp->supplies->u_volt_min = u_volt_min; - opp->supplies->u_volt_max = u_volt_max; + break; + } + } + } - dev_pm_opp_get(opp); - mutex_unlock(&opp_table->lock); + if (IS_ERR(opp)) + return PTR_ERR(opp); /* Notify the voltage change of the OPP */ blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE, opp); - dev_pm_opp_put(opp); - goto put_table; - -adjust_unlock: - mutex_unlock(&opp_table->lock); -put_table: - dev_pm_opp_put_opp_table(opp_table); - return r; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); @@ -2997,9 +2887,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); */ int dev_pm_opp_sync_regulators(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); struct regulator *reg; - int i, ret = 0; + int ret, i; /* Device may not have OPP table */ opp_table = _find_opp_table(dev); @@ -3008,23 +2898,20 @@ int dev_pm_opp_sync_regulators(struct device *dev) /* Regulator may not be required for the device */ if (unlikely(!opp_table->regulators)) - goto put_table; + return 0; /* Nothing to sync if voltage wasn't changed */ if (!opp_table->enabled) - goto put_table; + return 0; for (i = 0; i < opp_table->regulator_count; i++) { reg = opp_table->regulators[i]; ret = regulator_sync_voltage(reg); if (ret) - break; + return ret; } -put_table: - /* Drop reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); @@ -3076,18 +2963,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_disable); */ int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) { - struct opp_table *opp_table; - int ret; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); - ret = blocking_notifier_chain_register(&opp_table->head, nb); - - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return blocking_notifier_chain_register(&opp_table->head, nb); } EXPORT_SYMBOL(dev_pm_opp_register_notifier); @@ -3101,18 +2983,13 @@ EXPORT_SYMBOL(dev_pm_opp_register_notifier); int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb) { - struct opp_table *opp_table; - int ret; + struct opp_table *opp_table __free(put_opp_table); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); - ret = blocking_notifier_chain_unregister(&opp_table->head, nb); - - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return blocking_notifier_chain_unregister(&opp_table->head, nb); } EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); @@ -3125,7 +3002,7 @@ EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); */ void dev_pm_opp_remove_table(struct device *dev) { - struct opp_table *opp_table; + struct opp_table *opp_table __free(put_opp_table); /* Check for existing table for 'dev' */ opp_table = _find_opp_table(dev); @@ -3146,8 +3023,5 @@ void dev_pm_opp_remove_table(struct device *dev) **/ if (_opp_remove_all_static(opp_table)) dev_pm_opp_put_opp_table(opp_table); - - /* Drop reference taken by _find_opp_table() */ - dev_pm_opp_put_opp_table(opp_table); } EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c index 12c429b407ca..97989d4fe336 100644 --- a/drivers/opp/cpu.c +++ b/drivers/opp/cpu.c @@ -43,7 +43,6 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **opp_table) { - struct dev_pm_opp *opp; struct cpufreq_frequency_table *freq_table = NULL; int i, max_opps, ret = 0; unsigned long rate; @@ -57,6 +56,8 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, return -ENOMEM; for (i = 0, rate = 0; i < max_opps; i++, rate++) { + struct dev_pm_opp *opp __free(put_opp); + /* find next rate */ opp = dev_pm_opp_find_freq_ceil(dev, &rate); if (IS_ERR(opp)) { @@ -69,8 +70,6 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, /* Is Boost/turbo opp ? */ if (dev_pm_opp_is_turbo(opp)) freq_table[i].flags = CPUFREQ_BOOST_FREQ; - - dev_pm_opp_put(opp); } freq_table[i].driver_data = i; @@ -155,10 +154,10 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) { + struct opp_table *opp_table __free(put_opp_table); struct opp_device *opp_dev; - struct opp_table *opp_table; struct device *dev; - int cpu, ret = 0; + int cpu; opp_table = _find_opp_table(cpu_dev); if (IS_ERR(opp_table)) @@ -186,9 +185,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; } - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); @@ -204,33 +201,26 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); */ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { + struct opp_table *opp_table __free(put_opp_table); struct opp_device *opp_dev; - struct opp_table *opp_table; - int ret = 0; opp_table = _find_opp_table(cpu_dev); if (IS_ERR(opp_table)) return PTR_ERR(opp_table); - if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) { - ret = -EINVAL; - goto put_opp_table; - } + if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) + return -EINVAL; cpumask_clear(cpumask); if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { - mutex_lock(&opp_table->lock); + guard(mutex)(&opp_table->lock); list_for_each_entry(opp_dev, &opp_table->dev_list, node) cpumask_set_cpu(opp_dev->dev->id, cpumask); - mutex_unlock(&opp_table->lock); } else { cpumask_set_cpu(cpu_dev->id, cpumask); } -put_opp_table: - dev_pm_opp_put_opp_table(opp_table); - - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index a24f76f5fd01..505d79821584 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -45,7 +45,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node); struct opp_table *_managed_opp(struct device *dev, int index) { struct opp_table *opp_table, *managed_table = NULL; - struct device_node *np; + struct device_node *np __free(device_node); np = _opp_of_get_opp_desc_node(dev->of_node, index); if (!np) @@ -60,17 +60,13 @@ struct opp_table *_managed_opp(struct device *dev, int index) * But the OPPs will be considered as shared only if the * OPP table contains a "opp-shared" property. */ - if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { - _get_opp_table_kref(opp_table); - managed_table = opp_table; - } + if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) + managed_table = dev_pm_opp_get_opp_table_ref(opp_table); break; } } - of_node_put(np); - return managed_table; } @@ -80,18 +76,13 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table, { struct dev_pm_opp *opp; - mutex_lock(&opp_table->lock); + guard(mutex)(&opp_table->lock); list_for_each_entry(opp, &opp_table->opp_list, node) { - if (opp->np == opp_np) { - dev_pm_opp_get(opp); - mutex_unlock(&opp_table->lock); - return opp; - } + if (opp->np == opp_np) + return dev_pm_opp_get(opp); } - mutex_unlock(&opp_table->lock); - return NULL; } @@ -104,27 +95,20 @@ static struct device_node *of_parse_required_opp(struct device_node *np, /* The caller must call dev_pm_opp_put_opp_table() after the table is used */ static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np) { + struct device_node *opp_table_np __free(device_node); struct opp_table *opp_table; - struct device_node *opp_table_np; opp_table_np = of_get_parent(opp_np); if (!opp_table_np) - goto err; + return ERR_PTR(-ENODEV); - /* It is safe to put the node now as all we need now is its address */ - of_node_put(opp_table_np); + guard(mutex)(&opp_table_lock); - mutex_lock(&opp_table_lock); list_for_each_entry(opp_table, &opp_tables, node) { - if (opp_table_np == opp_table->np) { - _get_opp_table_kref(opp_table); - mutex_unlock(&opp_table_lock); - return opp_table; - } + if (opp_table_np == opp_table->np) + return dev_pm_opp_get_opp_table_ref(opp_table); } - mutex_unlock(&opp_table_lock); -err: return ERR_PTR(-ENODEV); } @@ -149,9 +133,8 @@ static void _opp_table_free_required_tables(struct opp_table *opp_table) opp_table->required_opp_count = 0; opp_table->required_opp_tables = NULL; - mutex_lock(&opp_table_lock); + guard(mutex)(&opp_table_lock); list_del(&opp_table->lazy); - mutex_unlock(&opp_table_lock); } /* @@ -163,7 +146,7 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, struct device_node *opp_np) { struct opp_table **required_opp_tables; - struct device_node *required_np, *np; + struct device_node *np __free(device_node); bool lazy = false; int count, i, size; @@ -171,30 +154,32 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, np = of_get_next_available_child(opp_np, NULL); if (!np) { dev_warn(dev, "Empty OPP table\n"); - return; } count = of_count_phandle_with_args(np, "required-opps", NULL); if (count <= 0) - goto put_np; + return; size = sizeof(*required_opp_tables) + sizeof(*opp_table->required_devs); required_opp_tables = kcalloc(count, size, GFP_KERNEL); if (!required_opp_tables) - goto put_np; + return; opp_table->required_opp_tables = required_opp_tables; opp_table->required_devs = (void *)(required_opp_tables + count); opp_table->required_opp_count = count; for (i = 0; i < count; i++) { + struct device_node *required_np __free(device_node); + required_np = of_parse_required_opp(np, i); - if (!required_np) - goto free_required_tables; + if (!required_np) { + _opp_table_free_required_tables(opp_table); + return; + } required_opp_tables[i] = _find_table_of_opp_np(required_np); - of_node_put(required_np); if (IS_ERR(required_opp_tables[i])) lazy = true; @@ -206,23 +191,15 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, * The OPP table is not held while allocating the table, take it * now to avoid corruption to the lazy_opp_tables list. */ - mutex_lock(&opp_table_lock); + guard(mutex)(&opp_table_lock); list_add(&opp_table->lazy, &lazy_opp_tables); - mutex_unlock(&opp_table_lock); } - - goto put_np; - -free_required_tables: - _opp_table_free_required_tables(opp_table); -put_np: - of_node_put(np); } void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index) { - struct device_node *np, *opp_np; + struct device_node *np __free(device_node), *opp_np; u32 val; /* @@ -243,8 +220,6 @@ void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, /* Get OPP table node */ opp_np = _opp_of_get_opp_desc_node(np, index); - of_node_put(np); - if (!opp_np) return; @@ -298,15 +273,13 @@ void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp) static int _link_required_opps(struct dev_pm_opp *opp, struct opp_table *required_table, int index) { - struct device_node *np; + struct device_node *np __free(device_node); np = of_parse_required_opp(opp->np, index); if (unlikely(!np)) return -ENODEV; opp->required_opps[index] = _find_opp_of_np(required_table, np); - of_node_put(np); - if (!opp->required_opps[index]) { pr_err("%s: Unable to find required OPP node: %pOF (%d)\n", __func__, opp->np, index); @@ -370,19 +343,22 @@ static int lazy_link_required_opps(struct opp_table *opp_table, static void lazy_link_required_opp_table(struct opp_table *new_table) { struct opp_table *opp_table, *temp, **required_opp_tables; - struct device_node *required_np, *opp_np, *required_table_np; struct dev_pm_opp *opp; int i, ret; - mutex_lock(&opp_table_lock); + guard(mutex)(&opp_table_lock); list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) { + struct device_node *opp_np __free(device_node); bool lazy = false; /* opp_np can't be invalid here */ opp_np = of_get_next_available_child(opp_table->np, NULL); for (i = 0; i < opp_table->required_opp_count; i++) { + struct device_node *required_np __free(device_node) = NULL; + struct device_node *required_table_np __free(device_node) = NULL; + required_opp_tables = opp_table->required_opp_tables; /* Required opp-table is already parsed */ @@ -393,9 +369,6 @@ static void lazy_link_required_opp_table(struct opp_table *new_table) required_np = of_parse_required_opp(opp_np, i); required_table_np = of_get_parent(required_np); - of_node_put(required_table_np); - of_node_put(required_np); - /* * Newly added table isn't the required opp-table for * opp_table. @@ -405,8 +378,7 @@ static void lazy_link_required_opp_table(struct opp_table *new_table) continue; } - required_opp_tables[i] = new_table; - _get_opp_table_kref(new_table); + required_opp_tables[i] = dev_pm_opp_get_opp_table_ref(new_table); /* Link OPPs now */ ret = lazy_link_required_opps(opp_table, new_table, i); @@ -417,8 +389,6 @@ static void lazy_link_required_opp_table(struct opp_table *new_table) } } - of_node_put(opp_np); - /* All required opp-tables found, remove from lazy list */ if (!lazy) { list_del_init(&opp_table->lazy); @@ -427,22 +397,22 @@ static void lazy_link_required_opp_table(struct opp_table *new_table) _required_opps_available(opp, opp_table->required_opp_count); } } - - mutex_unlock(&opp_table_lock); } static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) { - struct device_node *np, *opp_np; + struct device_node *opp_np __free(device_node) = NULL; + struct device_node *np __free(device_node) = NULL; struct property *prop; if (!opp_table) { + struct device_node *np __free(device_node); + np = of_node_get(dev->of_node); if (!np) return -ENODEV; opp_np = _opp_of_get_opp_desc_node(np, 0); - of_node_put(np); } else { opp_np = of_node_get(opp_table->np); } @@ -453,15 +423,12 @@ static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) /* Checking only first OPP is sufficient */ np = of_get_next_available_child(opp_np, NULL); - of_node_put(opp_np); if (!np) { dev_err(dev, "OPP table empty\n"); return -EINVAL; } prop = of_find_property(np, "opp-peak-kBps", NULL); - of_node_put(np); - if (!prop || !prop->length) return 0; @@ -471,7 +438,7 @@ static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table) { - struct device_node *np; + struct device_node *np __free(device_node) = of_node_get(dev->of_node); int ret, i, count, num_paths; struct icc_path **paths; @@ -481,15 +448,13 @@ int dev_pm_opp_of_find_icc_paths(struct device *dev, else if (ret <= 0) return ret; - ret = 0; - - np = of_node_get(dev->of_node); if (!np) return 0; + ret = 0; + count = of_count_phandle_with_args(np, "interconnects", "#interconnect-cells"); - of_node_put(np); if (count < 0) return 0; @@ -992,15 +957,14 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table) struct dev_pm_opp *opp; /* OPP table is already initialized for the device */ - mutex_lock(&opp_table->lock); - if (opp_table->parsed_static_opps) { - opp_table->parsed_static_opps++; - mutex_unlock(&opp_table->lock); - return 0; - } + scoped_guard(mutex, &opp_table->lock) { + if (opp_table->parsed_static_opps) { + opp_table->parsed_static_opps++; + return 0; + } - opp_table->parsed_static_opps = 1; - mutex_unlock(&opp_table->lock); + opp_table->parsed_static_opps = 1; + } /* We have opp-table node now, iterate over it and add OPPs */ for_each_available_child_of_node(opp_table->np, np) { @@ -1040,15 +1004,14 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) const __be32 *val; int nr, ret = 0; - mutex_lock(&opp_table->lock); - if (opp_table->parsed_static_opps) { - opp_table->parsed_static_opps++; - mutex_unlock(&opp_table->lock); - return 0; - } + scoped_guard(mutex, &opp_table->lock) { + if (opp_table->parsed_static_opps) { + opp_table->parsed_static_opps++; + return 0; + } - opp_table->parsed_static_opps = 1; - mutex_unlock(&opp_table->lock); + opp_table->parsed_static_opps = 1; + } prop = of_find_property(dev->of_node, "operating-points", NULL); if (!prop) { @@ -1306,8 +1269,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { - struct device_node *np, *tmp_np, *cpu_np; - int cpu, ret = 0; + struct device_node *np __free(device_node); + int cpu; /* Get OPP descriptor node */ np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); @@ -1320,9 +1283,12 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, /* OPPs are shared ? */ if (!of_property_read_bool(np, "opp-shared")) - goto put_cpu_node; + return 0; for_each_possible_cpu(cpu) { + struct device_node *cpu_np __free(device_node) = NULL; + struct device_node *tmp_np __free(device_node) = NULL; + if (cpu == cpu_dev->id) continue; @@ -1330,29 +1296,22 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, if (!cpu_np) { dev_err(cpu_dev, "%s: failed to get cpu%d node\n", __func__, cpu); - ret = -ENOENT; - goto put_cpu_node; + return -ENOENT; } /* Get OPP descriptor node */ tmp_np = _opp_of_get_opp_desc_node(cpu_np, 0); - of_node_put(cpu_np); if (!tmp_np) { pr_err("%pOF: Couldn't find opp node\n", cpu_np); - ret = -ENOENT; - goto put_cpu_node; + return -ENOENT; } /* CPUs are sharing opp node */ if (np == tmp_np) cpumask_set_cpu(cpu, cpumask); - - of_node_put(tmp_np); } -put_cpu_node: - of_node_put(np); - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); @@ -1369,9 +1328,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); */ int of_get_required_opp_performance_state(struct device_node *np, int index) { - struct dev_pm_opp *opp; - struct device_node *required_np; - struct opp_table *opp_table; + struct device_node *required_np __free(device_node); + struct opp_table *opp_table __free(put_opp_table) = NULL; + struct dev_pm_opp *opp __free(put_opp) = NULL; int pstate = -EINVAL; required_np = of_parse_required_opp(np, index); @@ -1382,13 +1341,13 @@ int of_get_required_opp_performance_state(struct device_node *np, int index) if (IS_ERR(opp_table)) { pr_err("%s: Failed to find required OPP table %pOF: %ld\n", __func__, np, PTR_ERR(opp_table)); - goto put_required_np; + return PTR_ERR(opp_table); } /* The OPP tables must belong to a genpd */ if (unlikely(!opp_table->is_genpd)) { pr_err("%s: Performance state is only valid for genpds.\n", __func__); - goto put_required_np; + return -EINVAL; } opp = _find_opp_of_np(opp_table, required_np); @@ -1399,15 +1358,8 @@ int of_get_required_opp_performance_state(struct device_node *np, int index) } else { pstate = opp->level; } - dev_pm_opp_put(opp); - } - dev_pm_opp_put_opp_table(opp_table); - -put_required_np: - of_node_put(required_np); - return pstate; } EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state); @@ -1424,7 +1376,7 @@ EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state); */ bool dev_pm_opp_of_has_required_opp(struct device *dev) { - struct device_node *opp_np, *np; + struct device_node *np __free(device_node) = NULL, *opp_np __free(device_node); int count; opp_np = _opp_of_get_opp_desc_node(dev->of_node, 0); @@ -1432,14 +1384,12 @@ bool dev_pm_opp_of_has_required_opp(struct device *dev) return false; np = of_get_next_available_child(opp_np, NULL); - of_node_put(opp_np); if (!np) { dev_warn(dev, "Empty OPP table\n"); return false; } count = of_count_phandle_with_args(np, "required-opps", NULL); - of_node_put(np); return count > 0; } @@ -1475,7 +1425,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); static int __maybe_unused _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz) { - struct dev_pm_opp *opp; + struct dev_pm_opp *opp __free(put_opp); unsigned long opp_freq, opp_power; /* Find the right frequency and related OPP */ @@ -1485,7 +1435,6 @@ _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz) return -EINVAL; opp_power = dev_pm_opp_get_power(opp); - dev_pm_opp_put(opp); if (!opp_power) return -EINVAL; @@ -1516,8 +1465,8 @@ _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz) int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW, unsigned long *kHz) { - struct dev_pm_opp *opp; - struct device_node *np; + struct dev_pm_opp *opp __free(put_opp) = NULL; + struct device_node *np __free(device_node); unsigned long mV, Hz; u32 cap; u64 tmp; @@ -1528,7 +1477,6 @@ int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW, return -EINVAL; ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); - of_node_put(np); if (ret) return -EINVAL; @@ -1538,7 +1486,6 @@ int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW, return -EINVAL; mV = dev_pm_opp_get_voltage(opp) / 1000; - dev_pm_opp_put(opp); if (!mV) return -EINVAL; @@ -1555,20 +1502,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_calc_power); static bool _of_has_opp_microwatt_property(struct device *dev) { - unsigned long power, freq = 0; - struct dev_pm_opp *opp; + struct dev_pm_opp *opp __free(put_opp); + unsigned long freq = 0; /* Check if at least one OPP has needed property */ opp = dev_pm_opp_find_freq_ceil(dev, &freq); if (IS_ERR(opp)) return false; - power = dev_pm_opp_get_power(opp); - dev_pm_opp_put(opp); - if (!power) - return false; - - return true; + return !!dev_pm_opp_get_power(opp); } /** @@ -1584,8 +1526,8 @@ static bool _of_has_opp_microwatt_property(struct device *dev) */ int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus) { + struct device_node *np __free(device_node) = NULL; struct em_data_callback em_cb; - struct device_node *np; int ret, nr_opp; u32 cap; @@ -1620,7 +1562,6 @@ int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus) * user about the inconsistent configuration. */ ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); - of_node_put(np); if (ret || !cap) { dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n"); ret = -EINVAL; diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 5c7c81190e41..9eba63e01a9e 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -251,7 +251,6 @@ struct opp_table { /* Routines internal to opp core */ bool _opp_remove_all_static(struct opp_table *opp_table); -void _get_opp_table_kref(struct opp_table *opp_table); int _get_opp_count(struct opp_table *opp_table); struct opp_table *_find_opp_table(struct device *dev); struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table); diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index f01e4ef6619d..e9a9df1431af 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -1483,6 +1483,8 @@ static int ucsi_ccg_probe(struct i2c_client *client) i2c_set_clientdata(client, uc); + device_disable_async_suspend(uc->dev); + pm_runtime_set_active(uc->dev); pm_runtime_enable(uc->dev); pm_runtime_use_autosuspend(uc->dev); |