From 6e103c0cfeb9ab8d40822a015da9769595096411 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 17 Aug 2016 19:14:20 +0200 Subject: arm/perf: Use multi instance instead of custom list Signed-off-by: Sebastian Andrzej Siewior Cc: Peter Zijlstra Cc: Mark Rutland Cc: Will Deacon Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160817171420.sdwk2qivxunzryz4@linutronix.de Signed-off-by: Thomas Gleixner --- drivers/perf/arm_pmu.c | 44 ++++++++++++++++++-------------------------- 1 file changed, 18 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index c494613c1909..b2f742f84111 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -688,28 +688,20 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) return 0; } -static DEFINE_SPINLOCK(arm_pmu_lock); -static LIST_HEAD(arm_pmu_list); - /* * PMU hardware loses all context when a CPU goes offline. * When a CPU is hotplugged back in, since some hardware registers are * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading * junk values out of them. */ -static int arm_perf_starting_cpu(unsigned int cpu) +static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) { - struct arm_pmu *pmu; - - spin_lock(&arm_pmu_lock); - list_for_each_entry(pmu, &arm_pmu_list, entry) { + struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); - if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) - continue; - if (pmu->reset) - pmu->reset(pmu); - } - spin_unlock(&arm_pmu_lock); + if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) + return 0; + if (pmu->reset) + pmu->reset(pmu); return 0; } @@ -821,9 +813,10 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) if (!cpu_hw_events) return -ENOMEM; - spin_lock(&arm_pmu_lock); - list_add_tail(&cpu_pmu->entry, &arm_pmu_list); - spin_unlock(&arm_pmu_lock); + err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, + &cpu_pmu->node); + if (err) + goto out_free; err = cpu_pm_pmu_register(cpu_pmu); if (err) @@ -859,9 +852,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) return 0; out_unregister: - spin_lock(&arm_pmu_lock); - list_del(&cpu_pmu->entry); - spin_unlock(&arm_pmu_lock); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, + &cpu_pmu->node); +out_free: free_percpu(cpu_hw_events); return err; } @@ -869,9 +862,8 @@ out_unregister: static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) { cpu_pm_pmu_unregister(cpu_pmu); - spin_lock(&arm_pmu_lock); - list_del(&cpu_pmu->entry); - spin_unlock(&arm_pmu_lock); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, + &cpu_pmu->node); free_percpu(cpu_pmu->hw_events); } @@ -1068,9 +1060,9 @@ static int arm_pmu_hp_init(void) { int ret; - ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING, - "AP_PERF_ARM_STARTING", - arm_perf_starting_cpu, NULL); + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING, + "AP_PERF_ARM_STARTING", + arm_perf_starting_cpu, NULL); if (ret) pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", ret); -- cgit v1.2.3 From b230f0db913136f465a951806f2978b179df95d5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 12 Aug 2016 19:49:41 +0200 Subject: bus/arm-cci: Use cpu-hp's multi instance support instead custom list Signed-off-by: Sebastian Andrzej Siewior Cc: Mark Rutland Cc: Suzuki K Poulose Cc: Peter Zijlstra Cc: Will Deacon Cc: rt@linutronix.de Cc: Olof Johansson Link: http://lkml.kernel.org/r/1471024183-12666-5-git-send-email-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner --- drivers/bus/arm-cci.c | 45 ++++++++++++++++++--------------------------- 1 file changed, 18 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 5755907f836f..4c44ba2d3412 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -144,15 +144,12 @@ struct cci_pmu { int num_cntrs; atomic_t active_events; struct mutex reserve_mutex; - struct list_head entry; + struct hlist_node node; cpumask_t cpus; }; #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) -static DEFINE_MUTEX(cci_pmu_mutex); -static LIST_HEAD(cci_pmu_list); - enum cci_models { #ifdef CONFIG_ARM_CCI400_PMU CCI400_R0, @@ -1506,25 +1503,21 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) return perf_pmu_register(&cci_pmu->pmu, name, -1); } -static int cci_pmu_offline_cpu(unsigned int cpu) +static int cci_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) { - struct cci_pmu *cci_pmu; + struct cci_pmu *cci_pmu = hlist_entry_safe(node, struct cci_pmu, node); unsigned int target; - mutex_lock(&cci_pmu_mutex); - list_for_each_entry(cci_pmu, &cci_pmu_list, entry) { - if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus)) - continue; - target = cpumask_any_but(cpu_online_mask, cpu); - if (target >= nr_cpu_ids) - continue; - /* - * TODO: migrate context once core races on event->ctx have - * been fixed. - */ - cpumask_set_cpu(target, &cci_pmu->cpus); - } - mutex_unlock(&cci_pmu_mutex); + if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus)) + return 0; + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + /* + * TODO: migrate context once core races on event->ctx have + * been fixed. + */ + cpumask_set_cpu(target, &cci_pmu->cpus); return 0; } @@ -1768,10 +1761,8 @@ static int cci_pmu_probe(struct platform_device *pdev) if (ret) return ret; - mutex_lock(&cci_pmu_mutex); - list_add(&cci_pmu->entry, &cci_pmu_list); - mutex_unlock(&cci_pmu_mutex); - + cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, + &cci_pmu->node); pr_info("ARM %s PMU driver probed", cci_pmu->model->name); return 0; } @@ -1804,9 +1795,9 @@ static int __init cci_platform_init(void) { int ret; - ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, - "AP_PERF_ARM_CCI_ONLINE", NULL, - cci_pmu_offline_cpu); + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCI_ONLINE, + "AP_PERF_ARM_CCI_ONLINE", NULL, + cci_pmu_offline_cpu); if (ret) return ret; -- cgit v1.2.3 From 8df038725ad5351a9730759e0a24a5c5d96be661 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 12 Aug 2016 19:49:42 +0200 Subject: bus/arm-ccn: Use cpu-hp's multi instance support instead custom list Signed-off-by: Sebastian Andrzej Siewior Cc: Mark Rutland Cc: Pawel Moll Cc: Arnd Bergmann Cc: Suzuki K Poulose Cc: Peter Zijlstra Cc: Will Deacon Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/1471024183-12666-6-git-send-email-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner --- drivers/bus/arm-ccn.c | 54 ++++++++++++++++++++------------------------------- 1 file changed, 21 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index 97a9185af433..e0ad47534126 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -167,7 +167,7 @@ struct arm_ccn_dt { struct hrtimer hrtimer; cpumask_t cpu; - struct list_head entry; + struct hlist_node node; struct pmu pmu; }; @@ -189,9 +189,6 @@ struct arm_ccn { struct arm_ccn_dt dt; }; -static DEFINE_MUTEX(arm_ccn_mutex); -static LIST_HEAD(arm_ccn_list); - static int arm_ccn_node_to_xp(int node) { return node / CCN_NUM_XP_PORTS; @@ -1173,30 +1170,24 @@ static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer) } -static int arm_ccn_pmu_offline_cpu(unsigned int cpu) +static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) { - struct arm_ccn_dt *dt; + struct arm_ccn_dt *dt = hlist_entry_safe(node, struct arm_ccn_dt, node); + struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); unsigned int target; - mutex_lock(&arm_ccn_mutex); - list_for_each_entry(dt, &arm_ccn_list, entry) { - struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); - - if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) - continue; - target = cpumask_any_but(cpu_online_mask, cpu); - if (target >= nr_cpu_ids) - continue; - perf_pmu_migrate_context(&dt->pmu, cpu, target); - cpumask_set_cpu(target, &dt->cpu); - if (ccn->irq) - WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0); - } - mutex_unlock(&arm_ccn_mutex); + if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) + return 0; + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + perf_pmu_migrate_context(&dt->pmu, cpu, target); + cpumask_set_cpu(target, &dt->cpu); + if (ccn->irq) + WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0); return 0; } - static DEFINE_IDA(arm_ccn_pmu_ida); static int arm_ccn_pmu_init(struct arm_ccn *ccn) @@ -1278,9 +1269,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) if (err) goto error_pmu_register; - mutex_lock(&arm_ccn_mutex); - list_add(&ccn->dt.entry, &arm_ccn_list); - mutex_unlock(&arm_ccn_mutex); + cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, + &ccn->dt.node); return 0; error_pmu_register: @@ -1296,10 +1286,8 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) { int i; - mutex_lock(&arm_ccn_mutex); - list_del(&ccn->dt.entry); - mutex_unlock(&arm_ccn_mutex); - + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, + &ccn->dt.node); if (ccn->irq) irq_set_affinity_hint(ccn->irq, NULL); for (i = 0; i < ccn->num_xps; i++) @@ -1527,9 +1515,9 @@ static int __init arm_ccn_init(void) { int i, ret; - ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, - "AP_PERF_ARM_CCN_ONLINE", NULL, - arm_ccn_pmu_offline_cpu); + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE, + "AP_PERF_ARM_CCN_ONLINE", NULL, + arm_ccn_pmu_offline_cpu); if (ret) return ret; @@ -1541,7 +1529,7 @@ static int __init arm_ccn_init(void) static void __exit arm_ccn_exit(void) { - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE); platform_driver_unregister(&arm_ccn_driver); } -- cgit v1.2.3 From 8017c279196ab29174bafc104ac4ebbd42c7ca7f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 12 Aug 2016 19:49:43 +0200 Subject: net/virtio-net: Convert to hotplug state machine Install the callbacks via the state machine. The driver supports multiple instances and therefore the new cpuhp_state_add_instance_nocalls() infrastrucure is used. The driver currently uses get_online_cpus() to avoid missing a CPU hotplug event while invoking virtnet_set_affinity(). This could be avoided by using cpuhp_state_add_instance() variant which holds the hotplug lock and invokes callback during registration. This is more or less a 1:1 conversion of the current code. Signed-off-by: Sebastian Andrzej Siewior Cc: Mark Rutland Cc: "Michael S. Tsirkin" Cc: Peter Zijlstra Cc: netdev@vger.kernel.org Cc: Will Deacon Cc: virtualization@lists.linux-foundation.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/1471024183-12666-7-git-send-email-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner --- drivers/net/virtio_net.c | 110 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 86 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 1b5f531eeb25..fad84f3f4109 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -138,8 +138,9 @@ struct virtnet_info { /* Does the affinity hint is set for virtqueues? */ bool affinity_hint_set; - /* CPU hot plug notifier */ - struct notifier_block nb; + /* CPU hotplug instances for online & dead */ + struct hlist_node node; + struct hlist_node node_dead; /* Control VQ buffers: protected by the rtnl lock */ struct virtio_net_ctrl_hdr ctrl_hdr; @@ -1237,25 +1238,53 @@ static void virtnet_set_affinity(struct virtnet_info *vi) vi->affinity_hint_set = true; } -static int virtnet_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) { - struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); + struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, + node); + virtnet_set_affinity(vi); + return 0; +} - switch(action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - case CPU_DOWN_FAILED: - case CPU_DEAD: - virtnet_set_affinity(vi); - break; - case CPU_DOWN_PREPARE: - virtnet_clean_affinity(vi, (long)hcpu); - break; - default: - break; - } +static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) +{ + struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, + node_dead); + virtnet_set_affinity(vi); + return 0; +} - return NOTIFY_OK; +static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) +{ + struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, + node); + + virtnet_clean_affinity(vi, cpu); + return 0; +} + +static enum cpuhp_state virtionet_online; + +static int virtnet_cpu_notif_add(struct virtnet_info *vi) +{ + int ret; + + ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); + if (ret) + return ret; + ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, + &vi->node_dead); + if (!ret) + return ret; + cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); + return ret; +} + +static void virtnet_cpu_notif_remove(struct virtnet_info *vi) +{ + cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); + cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, + &vi->node_dead); } static void virtnet_get_ringparam(struct net_device *dev, @@ -1879,8 +1908,7 @@ static int virtnet_probe(struct virtio_device *vdev) virtio_device_ready(vdev); - vi->nb.notifier_call = &virtnet_cpu_callback; - err = register_hotcpu_notifier(&vi->nb); + err = virtnet_cpu_notif_add(vi); if (err) { pr_debug("virtio_net: registering cpu notifier failed\n"); goto free_unregister_netdev; @@ -1934,7 +1962,7 @@ static void virtnet_remove(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; - unregister_hotcpu_notifier(&vi->nb); + virtnet_cpu_notif_remove(vi); /* Make sure no work handler is accessing the device. */ flush_work(&vi->config_work); @@ -1953,7 +1981,7 @@ static int virtnet_freeze(struct virtio_device *vdev) struct virtnet_info *vi = vdev->priv; int i; - unregister_hotcpu_notifier(&vi->nb); + virtnet_cpu_notif_remove(vi); /* Make sure no work handler is accessing the device */ flush_work(&vi->config_work); @@ -1997,7 +2025,7 @@ static int virtnet_restore(struct virtio_device *vdev) virtnet_set_queues(vi, vi->curr_queue_pairs); rtnl_unlock(); - err = register_hotcpu_notifier(&vi->nb); + err = virtnet_cpu_notif_add(vi); if (err) return err; @@ -2039,7 +2067,41 @@ static struct virtio_driver virtio_net_driver = { #endif }; -module_virtio_driver(virtio_net_driver); +static __init int virtio_net_driver_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "AP_VIRT_NET_ONLINE", + virtnet_cpu_online, + virtnet_cpu_down_prep); + if (ret < 0) + goto out; + virtionet_online = ret; + ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "VIRT_NET_DEAD", + NULL, virtnet_cpu_dead); + if (ret) + goto err_dead; + + ret = register_virtio_driver(&virtio_net_driver); + if (ret) + goto err_virtio; + return 0; +err_virtio: + cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); +err_dead: + cpuhp_remove_multi_state(virtionet_online); +out: + return ret; +} +module_init(virtio_net_driver_init); + +static __exit void virtio_net_driver_exit(void) +{ + cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); + cpuhp_remove_multi_state(virtionet_online); + unregister_virtio_driver(&virtio_net_driver); +} +module_exit(virtio_net_driver_exit); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio network driver"); -- cgit v1.2.3 From 84a3f4db039e7c4bfe8ae9bebdebdf2a4e09bf86 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 18 Aug 2016 14:57:23 +0200 Subject: net/mvneta: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Sebastian Andrzej Siewior Cc: Thomas Petazzoni Cc: Peter Zijlstra Cc: netdev@vger.kernel.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160818125731.27256-9-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner --- drivers/net/ethernet/marvell/mvneta.c | 232 +++++++++++++++++++++------------- 1 file changed, 143 insertions(+), 89 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index d41c28d00b57..b74548728fb5 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -382,7 +382,8 @@ struct mvneta_port { struct mvneta_rx_queue *rxqs; struct mvneta_tx_queue *txqs; struct net_device *dev; - struct notifier_block cpu_notifier; + struct hlist_node node_online; + struct hlist_node node_dead; int rxq_def; /* Protect the access to the percpu interrupt registers, * ensuring that the configuration remains coherent. @@ -574,6 +575,7 @@ struct mvneta_rx_queue { int next_desc_to_proc; }; +static enum cpuhp_state online_hpstate; /* The hardware supports eight (8) rx queues, but we are only allowing * the first one to be used. Therefore, let's just allocate one queue. */ @@ -3311,101 +3313,104 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) } }; -static int mvneta_percpu_notifier(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) { - struct mvneta_port *pp = container_of(nfb, struct mvneta_port, - cpu_notifier); - int cpu = (unsigned long)hcpu, other_cpu; + int other_cpu; + struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, + node_online); struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: - spin_lock(&pp->lock); - /* Configuring the driver for a new CPU while the - * driver is stopping is racy, so just avoid it. - */ - if (pp->is_stopped) { - spin_unlock(&pp->lock); - break; - } - netif_tx_stop_all_queues(pp->dev); - /* We have to synchronise on tha napi of each CPU - * except the one just being waked up - */ - for_each_online_cpu(other_cpu) { - if (other_cpu != cpu) { - struct mvneta_pcpu_port *other_port = - per_cpu_ptr(pp->ports, other_cpu); + spin_lock(&pp->lock); + /* + * Configuring the driver for a new CPU while the driver is + * stopping is racy, so just avoid it. + */ + if (pp->is_stopped) { + spin_unlock(&pp->lock); + return 0; + } + netif_tx_stop_all_queues(pp->dev); - napi_synchronize(&other_port->napi); - } + /* + * We have to synchronise on tha napi of each CPU except the one + * just being woken up + */ + for_each_online_cpu(other_cpu) { + if (other_cpu != cpu) { + struct mvneta_pcpu_port *other_port = + per_cpu_ptr(pp->ports, other_cpu); + + napi_synchronize(&other_port->napi); } + } - /* Mask all ethernet port interrupts */ - on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); - napi_enable(&port->napi); + /* Mask all ethernet port interrupts */ + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); + napi_enable(&port->napi); + /* + * Enable per-CPU interrupts on the CPU that is + * brought up. + */ + mvneta_percpu_enable(pp); - /* Enable per-CPU interrupts on the CPU that is - * brought up. - */ - mvneta_percpu_enable(pp); + /* + * Enable per-CPU interrupt on the one CPU we care + * about. + */ + mvneta_percpu_elect(pp); - /* Enable per-CPU interrupt on the one CPU we care - * about. - */ - mvneta_percpu_elect(pp); - - /* Unmask all ethernet port interrupts */ - on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); - mvreg_write(pp, MVNETA_INTR_MISC_MASK, - MVNETA_CAUSE_PHY_STATUS_CHANGE | - MVNETA_CAUSE_LINK_CHANGE | - MVNETA_CAUSE_PSC_SYNC_CHANGE); - netif_tx_start_all_queues(pp->dev); - spin_unlock(&pp->lock); - break; - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - netif_tx_stop_all_queues(pp->dev); - /* Thanks to this lock we are sure that any pending - * cpu election is done - */ - spin_lock(&pp->lock); - /* Mask all ethernet port interrupts */ - on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); - spin_unlock(&pp->lock); + /* Unmask all ethernet port interrupts */ + on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, + MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE | + MVNETA_CAUSE_PSC_SYNC_CHANGE); + netif_tx_start_all_queues(pp->dev); + spin_unlock(&pp->lock); + return 0; +} - napi_synchronize(&port->napi); - napi_disable(&port->napi); - /* Disable per-CPU interrupts on the CPU that is - * brought down. - */ - mvneta_percpu_disable(pp); +static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node) +{ + struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, + node_online); + struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - /* Check if a new CPU must be elected now this on is down */ - spin_lock(&pp->lock); - mvneta_percpu_elect(pp); - spin_unlock(&pp->lock); - /* Unmask all ethernet port interrupts */ - on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); - mvreg_write(pp, MVNETA_INTR_MISC_MASK, - MVNETA_CAUSE_PHY_STATUS_CHANGE | - MVNETA_CAUSE_LINK_CHANGE | - MVNETA_CAUSE_PSC_SYNC_CHANGE); - netif_tx_start_all_queues(pp->dev); - break; - } + /* + * Thanks to this lock we are sure that any pending cpu election is + * done. + */ + spin_lock(&pp->lock); + /* Mask all ethernet port interrupts */ + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); + spin_unlock(&pp->lock); - return NOTIFY_OK; + napi_synchronize(&port->napi); + napi_disable(&port->napi); + /* Disable per-CPU interrupts on the CPU that is brought down. */ + mvneta_percpu_disable(pp); + return 0; +} + +static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node) +{ + struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, + node_dead); + + /* Check if a new CPU must be elected now this on is down */ + spin_lock(&pp->lock); + mvneta_percpu_elect(pp); + spin_unlock(&pp->lock); + /* Unmask all ethernet port interrupts */ + on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, + MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE | + MVNETA_CAUSE_PSC_SYNC_CHANGE); + netif_tx_start_all_queues(pp->dev); + return 0; } static int mvneta_open(struct net_device *dev) @@ -3442,7 +3447,15 @@ static int mvneta_open(struct net_device *dev) /* Register a CPU notifier to handle the case where our CPU * might be taken offline. */ - register_cpu_notifier(&pp->cpu_notifier); + ret = cpuhp_state_add_instance_nocalls(online_hpstate, + &pp->node_online); + if (ret) + goto err_free_irq; + + ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, + &pp->node_dead); + if (ret) + goto err_free_online_hp; /* In default link is down */ netif_carrier_off(pp->dev); @@ -3450,15 +3463,19 @@ static int mvneta_open(struct net_device *dev) ret = mvneta_mdio_probe(pp); if (ret < 0) { netdev_err(dev, "cannot probe MDIO bus\n"); - goto err_free_irq; + goto err_free_dead_hp; } mvneta_start_dev(pp); return 0; +err_free_dead_hp: + cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, + &pp->node_dead); +err_free_online_hp: + cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online); err_free_irq: - unregister_cpu_notifier(&pp->cpu_notifier); on_each_cpu(mvneta_percpu_disable, pp, true); free_percpu_irq(pp->dev->irq, pp->ports); err_cleanup_txqs: @@ -3484,7 +3501,10 @@ static int mvneta_stop(struct net_device *dev) mvneta_stop_dev(pp); mvneta_mdio_remove(pp); - unregister_cpu_notifier(&pp->cpu_notifier); + + cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online); + cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, + &pp->node_dead); on_each_cpu(mvneta_percpu_disable, pp, true); free_percpu_irq(dev->irq, pp->ports); mvneta_cleanup_rxqs(pp); @@ -4024,7 +4044,6 @@ static int mvneta_probe(struct platform_device *pdev) err = of_property_read_string(dn, "managed", &managed); pp->use_inband_status = (err == 0 && strcmp(managed, "in-band-status") == 0); - pp->cpu_notifier.notifier_call = mvneta_percpu_notifier; pp->rxq_def = rxq_def; @@ -4227,7 +4246,42 @@ static struct platform_driver mvneta_driver = { }, }; -module_platform_driver(mvneta_driver); +static int __init mvneta_driver_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online", + mvneta_cpu_online, + mvneta_cpu_down_prepare); + if (ret < 0) + goto out; + online_hpstate = ret; + ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead", + NULL, mvneta_cpu_dead); + if (ret) + goto err_dead; + + ret = platform_driver_register(&mvneta_driver); + if (ret) + goto err; + return 0; + +err: + cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); +err_dead: + cpuhp_remove_multi_state(online_hpstate); +out: + return ret; +} +module_init(mvneta_driver_init); + +static void __exit mvneta_driver_exit(void) +{ + platform_driver_unregister(&mvneta_driver); + cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD); + cpuhp_remove_multi_state(online_hpstate); +} +module_exit(mvneta_driver_exit); MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); MODULE_AUTHOR("Rami Rosen , Thomas Petazzoni "); -- cgit v1.2.3 From 29c6d1bbd7a2cd88a197ea7cef171f616e198526 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 18 Aug 2016 14:57:24 +0200 Subject: md/raid5: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Sebastian Andrzej Siewior Cc: Peter Zijlstra Cc: Neil Brown Cc: linux-raid@vger.kernel.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160818125731.27256-10-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner --- drivers/md/raid5.c | 84 +++++++++++++++++++----------------------------------- drivers/md/raid5.h | 4 +-- 2 files changed, 30 insertions(+), 58 deletions(-) (limited to 'drivers') diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 8912407a4dd0..aae8064fd9e6 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6330,22 +6330,20 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu return 0; } -static void raid5_free_percpu(struct r5conf *conf) +static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node) { - unsigned long cpu; + struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); + + free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); + return 0; +} +static void raid5_free_percpu(struct r5conf *conf) +{ if (!conf->percpu) return; -#ifdef CONFIG_HOTPLUG_CPU - unregister_cpu_notifier(&conf->cpu_notify); -#endif - - get_online_cpus(); - for_each_possible_cpu(cpu) - free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); - put_online_cpus(); - + cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); free_percpu(conf->percpu); } @@ -6364,64 +6362,28 @@ static void free_conf(struct r5conf *conf) kfree(conf); } -#ifdef CONFIG_HOTPLUG_CPU -static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, - void *hcpu) +static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) { - struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify); - long cpu = (long)hcpu; + struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - if (alloc_scratch_buffer(conf, percpu)) { - pr_err("%s: failed memory allocation for cpu%ld\n", - __func__, cpu); - return notifier_from_errno(-ENOMEM); - } - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); - break; - default: - break; + if (alloc_scratch_buffer(conf, percpu)) { + pr_err("%s: failed memory allocation for cpu%u\n", + __func__, cpu); + return -ENOMEM; } - return NOTIFY_OK; + return 0; } -#endif static int raid5_alloc_percpu(struct r5conf *conf) { - unsigned long cpu; int err = 0; conf->percpu = alloc_percpu(struct raid5_percpu); if (!conf->percpu) return -ENOMEM; -#ifdef CONFIG_HOTPLUG_CPU - conf->cpu_notify.notifier_call = raid456_cpu_notify; - conf->cpu_notify.priority = 0; - err = register_cpu_notifier(&conf->cpu_notify); - if (err) - return err; -#endif - - get_online_cpus(); - for_each_present_cpu(cpu) { - err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); - if (err) { - pr_err("%s: failed memory allocation for cpu%ld\n", - __func__, cpu); - break; - } - } - put_online_cpus(); - + err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); if (!err) { conf->scribble_disks = max(conf->raid_disks, conf->previous_raid_disks); @@ -7953,10 +7915,21 @@ static struct md_personality raid4_personality = static int __init raid5_init(void) { + int ret; + raid5_wq = alloc_workqueue("raid5wq", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0); if (!raid5_wq) return -ENOMEM; + + ret = cpuhp_setup_state_multi(CPUHP_MD_RAID5_PREPARE, + "md/raid5:prepare", + raid456_cpu_up_prepare, + raid456_cpu_dead); + if (ret) { + destroy_workqueue(raid5_wq); + return ret; + } register_md_personality(&raid6_personality); register_md_personality(&raid5_personality); register_md_personality(&raid4_personality); @@ -7968,6 +7941,7 @@ static void raid5_exit(void) unregister_md_personality(&raid6_personality); unregister_md_personality(&raid5_personality); unregister_md_personality(&raid4_personality); + cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE); destroy_workqueue(raid5_wq); } diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 517d4b68a1be..57ec49f0839e 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -512,9 +512,7 @@ struct r5conf { } __percpu *percpu; int scribble_disks; int scribble_sectors; -#ifdef CONFIG_HOTPLUG_CPU - struct notifier_block cpu_notify; -#endif + struct hlist_node node; /* * Free stripes pool -- cgit v1.2.3 From 529351fd3c50215a462e5e604d7ceaaf27a8a0e5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 18 Aug 2016 14:57:25 +0200 Subject: cpuidle/pseries: Convert to hotplug state machine Install the callbacks via the state machine. Signed-off-by: Sebastian Andrzej Siewior Cc: linux-pm@vger.kernel.org Cc: Peter Zijlstra Cc: Daniel Lezcano Cc: "Rafael J. Wysocki" Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160818125731.27256-11-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner --- drivers/cpuidle/cpuidle-pseries.c | 51 ++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c index 07135e009d8b..166ccd711ec9 100644 --- a/drivers/cpuidle/cpuidle-pseries.c +++ b/drivers/cpuidle/cpuidle-pseries.c @@ -171,40 +171,30 @@ static struct cpuidle_state shared_states[] = { .enter = &shared_cede_loop }, }; -static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n, - unsigned long action, void *hcpu) +static int pseries_cpuidle_cpu_online(unsigned int cpu) { - int hotcpu = (unsigned long)hcpu; - struct cpuidle_device *dev = - per_cpu(cpuidle_devices, hotcpu); + struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); if (dev && cpuidle_get_driver()) { - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - cpuidle_pause_and_lock(); - cpuidle_enable_device(dev); - cpuidle_resume_and_unlock(); - break; + cpuidle_pause_and_lock(); + cpuidle_enable_device(dev); + cpuidle_resume_and_unlock(); + } + return 0; +} - case CPU_DEAD: - case CPU_DEAD_FROZEN: - cpuidle_pause_and_lock(); - cpuidle_disable_device(dev); - cpuidle_resume_and_unlock(); - break; +static int pseries_cpuidle_cpu_dead(unsigned int cpu) +{ + struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); - default: - return NOTIFY_DONE; - } + if (dev && cpuidle_get_driver()) { + cpuidle_pause_and_lock(); + cpuidle_disable_device(dev); + cpuidle_resume_and_unlock(); } - return NOTIFY_OK; + return 0; } -static struct notifier_block setup_hotplug_notifier = { - .notifier_call = pseries_cpuidle_add_cpu_notifier, -}; - /* * pseries_cpuidle_driver_init() */ @@ -273,7 +263,14 @@ static int __init pseries_processor_idle_init(void) return retval; } - register_cpu_notifier(&setup_hotplug_notifier); + retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "cpuidle/pseries:online", + pseries_cpuidle_cpu_online, NULL); + WARN_ON(retval < 0); + retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD, + "cpuidle/pseries:DEAD", NULL, + pseries_cpuidle_cpu_dead); + WARN_ON(retval < 0); printk(KERN_DEBUG "pseries_idle_driver registered\n"); return 0; } -- cgit v1.2.3 From 10fcca9d8704a04c6e86398f930fa28e0fb03ce4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 24 Aug 2016 11:12:59 +0200 Subject: cpuidle/powernv: Convert to hotplug state machine MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Install the callbacks via the state machine. v1…v2: - Use only CPUHP_CPUIDLE_DEAD (requested by Daniel Lezcano) Signed-off-by: Sebastian Andrzej Siewior Cc: linux-pm@vger.kernel.org Cc: Peter Zijlstra Cc: Daniel Lezcano Cc: "Rafael J. Wysocki" Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160824091259.ozyslcopxvbfdqzy@linutronix.de Signed-off-by: Thomas Gleixner --- drivers/cpuidle/cpuidle-powernv.c | 51 ++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index f7ca891b5b59..7fe442ca38f4 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -119,40 +119,30 @@ static struct cpuidle_state powernv_states[CPUIDLE_STATE_MAX] = { .enter = snooze_loop }, }; -static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n, - unsigned long action, void *hcpu) +static int powernv_cpuidle_cpu_online(unsigned int cpu) { - int hotcpu = (unsigned long)hcpu; - struct cpuidle_device *dev = - per_cpu(cpuidle_devices, hotcpu); + struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); if (dev && cpuidle_get_driver()) { - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - cpuidle_pause_and_lock(); - cpuidle_enable_device(dev); - cpuidle_resume_and_unlock(); - break; + cpuidle_pause_and_lock(); + cpuidle_enable_device(dev); + cpuidle_resume_and_unlock(); + } + return 0; +} - case CPU_DEAD: - case CPU_DEAD_FROZEN: - cpuidle_pause_and_lock(); - cpuidle_disable_device(dev); - cpuidle_resume_and_unlock(); - break; +static int powernv_cpuidle_cpu_dead(unsigned int cpu) +{ + struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); - default: - return NOTIFY_DONE; - } + if (dev && cpuidle_get_driver()) { + cpuidle_pause_and_lock(); + cpuidle_disable_device(dev); + cpuidle_resume_and_unlock(); } - return NOTIFY_OK; + return 0; } -static struct notifier_block setup_hotplug_notifier = { - .notifier_call = powernv_cpuidle_add_cpu_notifier, -}; - /* * powernv_cpuidle_driver_init() */ @@ -355,7 +345,14 @@ static int __init powernv_processor_idle_init(void) return retval; } - register_cpu_notifier(&setup_hotplug_notifier); + retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "cpuidle/powernv:online", + powernv_cpuidle_cpu_online, NULL); + WARN_ON(retval < 0); + retval = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_DEAD, + "cpuidle/powernv:dead", NULL, + powernv_cpuidle_cpu_dead); + WARN_ON(retval < 0); printk(KERN_DEBUG "powernv_idle_driver registered\n"); return 0; } -- cgit v1.2.3 From dfc616d8b3df3013c579e023e67f29ada60bdd50 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 24 Aug 2016 11:14:44 +0200 Subject: cpuidle/coupled: Convert to hotplug state machine Install the callbacks via the state machine. Signed-off-by: Sebastian Andrzej Siewior Cc: linux-pm@vger.kernel.org Cc: Peter Zijlstra Cc: Daniel Lezcano Cc: "Rafael J. Wysocki" Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160824091444.brdr5zpbxjvh6n3f@linutronix.de Signed-off-by: Thomas Gleixner --- drivers/cpuidle/coupled.c | 75 ++++++++++++++++++++--------------------------- 1 file changed, 31 insertions(+), 44 deletions(-) (limited to 'drivers') diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index d5657d50ac40..71e586d7df71 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -749,65 +749,52 @@ static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled) put_cpu(); } -/** - * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions - * @nb: notifier block - * @action: hotplug transition - * @hcpu: target cpu number - * - * Called when a cpu is brought on or offline using hotplug. Updates the - * coupled cpu set appropriately - */ -static int cpuidle_coupled_cpu_notify(struct notifier_block *nb, - unsigned long action, void *hcpu) +static int coupled_cpu_online(unsigned int cpu) { - int cpu = (unsigned long)hcpu; struct cpuidle_device *dev; - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - case CPU_DOWN_PREPARE: - case CPU_ONLINE: - case CPU_DEAD: - case CPU_UP_CANCELED: - case CPU_DOWN_FAILED: - break; - default: - return NOTIFY_OK; - } - mutex_lock(&cpuidle_lock); dev = per_cpu(cpuidle_devices, cpu); - if (!dev || !dev->coupled) - goto out; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - case CPU_DOWN_PREPARE: - cpuidle_coupled_prevent_idle(dev->coupled); - break; - case CPU_ONLINE: - case CPU_DEAD: + if (dev && dev->coupled) { cpuidle_coupled_update_online_cpus(dev->coupled); - /* Fall through */ - case CPU_UP_CANCELED: - case CPU_DOWN_FAILED: cpuidle_coupled_allow_idle(dev->coupled); - break; } -out: mutex_unlock(&cpuidle_lock); - return NOTIFY_OK; + return 0; } -static struct notifier_block cpuidle_coupled_cpu_notifier = { - .notifier_call = cpuidle_coupled_cpu_notify, -}; +static int coupled_cpu_up_prepare(unsigned int cpu) +{ + struct cpuidle_device *dev; + + mutex_lock(&cpuidle_lock); + + dev = per_cpu(cpuidle_devices, cpu); + if (dev && dev->coupled) + cpuidle_coupled_prevent_idle(dev->coupled); + + mutex_unlock(&cpuidle_lock); + return 0; +} static int __init cpuidle_coupled_init(void) { - return register_cpu_notifier(&cpuidle_coupled_cpu_notifier); + int ret; + + ret = cpuhp_setup_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE, + "cpuidle/coupled:prepare", + coupled_cpu_up_prepare, + coupled_cpu_online); + if (ret) + return ret; + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "cpuidle/coupled:online", + coupled_cpu_online, + coupled_cpu_up_prepare); + if (ret < 0) + cpuhp_remove_state_nocalls(CPUHP_CPUIDLE_COUPLED_PREPARE); + return ret; } core_initcall(cpuidle_coupled_init); -- cgit v1.2.3 From e8483b578b229774382a95891439b2ebd9c92fc5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 18 Aug 2016 14:57:28 +0200 Subject: MIPS/BUS/CDMM: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Sebastian Andrzej Siewior Cc: James Hogan Cc: Peter Zijlstra Cc: Ralf Baechle Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160818125731.27256-14-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner --- drivers/bus/mips_cdmm.c | 70 +++++++++---------------------------------------- 1 file changed, 12 insertions(+), 58 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c index cad49bc38b3e..1b14256376d2 100644 --- a/drivers/bus/mips_cdmm.c +++ b/drivers/bus/mips_cdmm.c @@ -596,19 +596,20 @@ BUILD_PERDEV_HELPER(cpu_down) /* int mips_cdmm_cpu_down_helper(...) */ BUILD_PERDEV_HELPER(cpu_up) /* int mips_cdmm_cpu_up_helper(...) */ /** - * mips_cdmm_bus_down() - Tear down the CDMM bus. - * @data: Pointer to unsigned int CPU number. + * mips_cdmm_cpu_down_prep() - Callback for CPUHP DOWN_PREP: + * Tear down the CDMM bus. + * @cpu: unsigned int CPU number. * * This function is executed on the hotplugged CPU and calls the CDMM * driver cpu_down callback for all devices on that CPU. */ -static long mips_cdmm_bus_down(void *data) +static int mips_cdmm_cpu_down_prep(unsigned int cpu) { struct mips_cdmm_bus *bus; long ret; /* Inform all the devices on the bus */ - ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data, + ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu, mips_cdmm_cpu_down_helper); /* @@ -623,8 +624,8 @@ static long mips_cdmm_bus_down(void *data) } /** - * mips_cdmm_bus_up() - Bring up the CDMM bus. - * @data: Pointer to unsigned int CPU number. + * mips_cdmm_cpu_online() - Callback for CPUHP ONLINE: Bring up the CDMM bus. + * @cpu: unsigned int CPU number. * * This work_on_cpu callback function is executed on a given CPU to discover * CDMM devices on that CPU, or to call the CDMM driver cpu_up callback for all @@ -634,7 +635,7 @@ static long mips_cdmm_bus_down(void *data) * initialisation. When CPUs are brought online the function is * invoked directly on the hotplugged CPU. */ -static long mips_cdmm_bus_up(void *data) +static int mips_cdmm_cpu_online(unsigned int cpu) { struct mips_cdmm_bus *bus; long ret; @@ -651,50 +652,12 @@ static long mips_cdmm_bus_up(void *data) mips_cdmm_bus_discover(bus); else /* Inform all the devices on the bus */ - ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, data, + ret = bus_for_each_dev(&mips_cdmm_bustype, NULL, &cpu, mips_cdmm_cpu_up_helper); return ret; } -/** - * mips_cdmm_cpu_notify() - Take action when a CPU is going online or offline. - * @nb: CPU notifier block . - * @action: Event that has taken place (CPU_*). - * @data: CPU number. - * - * This notifier is used to keep the CDMM buses updated as CPUs are offlined and - * onlined. When CPUs go offline or come back online, so does their CDMM bus, so - * devices must be informed. Also when CPUs come online for the first time the - * devices on the CDMM bus need discovering. - * - * Returns: NOTIFY_OK if event was used. - * NOTIFY_DONE if we didn't care. - */ -static int mips_cdmm_cpu_notify(struct notifier_block *nb, - unsigned long action, void *data) -{ - unsigned int cpu = (unsigned int)data; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - case CPU_DOWN_FAILED: - mips_cdmm_bus_up(&cpu); - break; - case CPU_DOWN_PREPARE: - mips_cdmm_bus_down(&cpu); - break; - default: - return NOTIFY_DONE; - } - - return NOTIFY_OK; -} - -static struct notifier_block mips_cdmm_cpu_nb = { - .notifier_call = mips_cdmm_cpu_notify, -}; - /** * mips_cdmm_init() - Initialise CDMM bus. * @@ -703,7 +666,6 @@ static struct notifier_block mips_cdmm_cpu_nb = { */ static int __init mips_cdmm_init(void) { - unsigned int cpu; int ret; /* Register the bus */ @@ -712,19 +674,11 @@ static int __init mips_cdmm_init(void) return ret; /* We want to be notified about new CPUs */ - ret = register_cpu_notifier(&mips_cdmm_cpu_nb); - if (ret) { + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "bus/cdmm:online", + mips_cdmm_cpu_online, mips_cdmm_cpu_down_prep); + if (ret < 0) pr_warn("cdmm: Failed to register CPU notifier\n"); - goto out; - } - - /* Discover devices on CDMM of online CPUs */ - for_each_online_cpu(cpu) - work_on_cpu(cpu, mips_cdmm_bus_up, &cpu); - return 0; -out: - bus_unregister(&mips_cdmm_bustype); return ret; } subsys_initcall(mips_cdmm_init); -- cgit v1.2.3 From a4e0591ece7d88634a802c4076db8c0debbde805 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 6 Sep 2016 19:04:45 +0200 Subject: oprofile/timer: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Since the online target runs always on the target CPU we can drop smp_call_function_single(). The functions is invoked with interrupts off to keep the old calling convention. If the maintainer things that this function can be called with interrupts enabled then it can be removed :) Signed-off-by: Sebastian Andrzej Siewior Cc: Robert Richter Cc: Peter Zijlstra Cc: oprofile-list@lists.sf.net Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160906170457.32393-10-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner --- drivers/oprofile/timer_int.c | 44 +++++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c index bdef916e5dda..2498a6cd7c24 100644 --- a/drivers/oprofile/timer_int.c +++ b/drivers/oprofile/timer_int.c @@ -74,37 +74,39 @@ static void oprofile_hrtimer_stop(void) put_online_cpus(); } -static int oprofile_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) +static int oprofile_timer_online(unsigned int cpu) { - long cpu = (long) hcpu; - - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - smp_call_function_single(cpu, __oprofile_hrtimer_start, - NULL, 1); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - __oprofile_hrtimer_stop(cpu); - break; - } - return NOTIFY_OK; + local_irq_disable(); + __oprofile_hrtimer_start(NULL); + local_irq_enable(); + return 0; } -static struct notifier_block __refdata oprofile_cpu_notifier = { - .notifier_call = oprofile_cpu_notify, -}; +static int oprofile_timer_prep_down(unsigned int cpu) +{ + __oprofile_hrtimer_stop(cpu); + return 0; +} + +static enum cpuhp_state hp_online; static int oprofile_hrtimer_setup(void) { - return register_hotcpu_notifier(&oprofile_cpu_notifier); + int ret; + + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "oprofile/timer:online", + oprofile_timer_online, + oprofile_timer_prep_down); + if (ret < 0) + return ret; + hp_online = ret; + return 0; } static void oprofile_hrtimer_shutdown(void) { - unregister_hotcpu_notifier(&oprofile_cpu_notifier); + cpuhp_remove_state_nocalls(hp_online); } int oprofile_timer_init(struct oprofile_operations *ops) -- cgit v1.2.3 From 8904f5a5afc4dd74e8fe2ab3eeb98018ef02f3e6 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 6 Sep 2016 19:04:46 +0200 Subject: virtio scsi: Convert to hotplug state machine Install the callbacks via the state machine. It uses the multi instance infrastructure of the hotplug code to handle each interface. virtscsi_set_affinity() is removed from virtscsi_init() because virtscsi_cpu_notif_add() (the function which registers the instance) is invoked right after it and the cpuhp_state_add_instance() functions invokes the startup callback on all online CPUs. The same thing can not be applied virtscsi_cpu_notif_remove() because virtscsi_remove_vqs() invokes virtscsi_set_affinity() with affinity = false as argument but the old CPU_DEAD state invoked the function with affinity = true (which does not match the DEAD callback). Signed-off-by: Sebastian Andrzej Siewior Cc: "James E.J. Bottomley" Cc: linux-scsi@vger.kernel.org Cc: "Martin K. Petersen" Cc: "Michael S. Tsirkin" Cc: Peter Zijlstra Cc: virtualization@lists.linux-foundation.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160906170457.32393-11-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner --- drivers/scsi/virtio_scsi.c | 76 ++++++++++++++++++++++++++++++---------------- 1 file changed, 49 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 7dbbb29d24c6..deefab3a94d0 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -107,8 +107,8 @@ struct virtio_scsi { /* If the affinity hint is set for virtqueues */ bool affinity_hint_set; - /* CPU hotplug notifier */ - struct notifier_block nb; + struct hlist_node node; + struct hlist_node node_dead; /* Protected by event_vq lock */ bool stop_events; @@ -118,6 +118,7 @@ struct virtio_scsi { struct virtio_scsi_vq req_vqs[]; }; +static enum cpuhp_state virtioscsi_online; static struct kmem_cache *virtscsi_cmd_cache; static mempool_t *virtscsi_cmd_pool; @@ -852,21 +853,33 @@ static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) put_online_cpus(); } -static int virtscsi_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int virtscsi_cpu_online(unsigned int cpu, struct hlist_node *node) { - struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb); - switch(action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - case CPU_DEAD: - case CPU_DEAD_FROZEN: - __virtscsi_set_affinity(vscsi, true); - break; - default: - break; - } - return NOTIFY_OK; + struct virtio_scsi *vscsi = hlist_entry_safe(node, struct virtio_scsi, + node); + __virtscsi_set_affinity(vscsi, true); + return 0; +} + +static int virtscsi_cpu_notif_add(struct virtio_scsi *vi) +{ + int ret; + + ret = cpuhp_state_add_instance(virtioscsi_online, &vi->node); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(CPUHP_VIRT_SCSI_DEAD, &vi->node_dead); + if (ret) + cpuhp_state_remove_instance(virtioscsi_online, &vi->node); + return ret; +} + +static void virtscsi_cpu_notif_remove(struct virtio_scsi *vi) +{ + cpuhp_state_remove_instance_nocalls(virtioscsi_online, &vi->node); + cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_SCSI_DEAD, + &vi->node_dead); } static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, @@ -929,8 +942,6 @@ static int virtscsi_init(struct virtio_device *vdev, virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE], vqs[i]); - virtscsi_set_affinity(vscsi, true); - virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); @@ -987,12 +998,9 @@ static int virtscsi_probe(struct virtio_device *vdev) if (err) goto virtscsi_init_failed; - vscsi->nb.notifier_call = &virtscsi_cpu_callback; - err = register_hotcpu_notifier(&vscsi->nb); - if (err) { - pr_err("registering cpu notifier failed\n"); + err = virtscsi_cpu_notif_add(vscsi); + if (err) goto scsi_add_host_failed; - } cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); @@ -1049,7 +1057,7 @@ static void virtscsi_remove(struct virtio_device *vdev) scsi_remove_host(shost); - unregister_hotcpu_notifier(&vscsi->nb); + virtscsi_cpu_notif_remove(vscsi); virtscsi_remove_vqs(vdev); scsi_host_put(shost); @@ -1061,7 +1069,7 @@ static int virtscsi_freeze(struct virtio_device *vdev) struct Scsi_Host *sh = virtio_scsi_host(vdev); struct virtio_scsi *vscsi = shost_priv(sh); - unregister_hotcpu_notifier(&vscsi->nb); + virtscsi_cpu_notif_remove(vscsi); virtscsi_remove_vqs(vdev); return 0; } @@ -1076,12 +1084,11 @@ static int virtscsi_restore(struct virtio_device *vdev) if (err) return err; - err = register_hotcpu_notifier(&vscsi->nb); + err = virtscsi_cpu_notif_add(vscsi); if (err) { vdev->config->del_vqs(vdev); return err; } - virtio_device_ready(vdev); if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) @@ -1136,6 +1143,16 @@ static int __init init(void) pr_err("mempool_create() for virtscsi_cmd_pool failed\n"); goto error; } + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "scsi/virtio:online", + virtscsi_cpu_online, NULL); + if (ret < 0) + goto error; + virtioscsi_online = ret; + ret = cpuhp_setup_state_multi(CPUHP_VIRT_SCSI_DEAD, "scsi/virtio:dead", + NULL, virtscsi_cpu_online); + if (ret) + goto error; ret = register_virtio_driver(&virtio_scsi_driver); if (ret < 0) goto error; @@ -1151,12 +1168,17 @@ error: kmem_cache_destroy(virtscsi_cmd_cache); virtscsi_cmd_cache = NULL; } + if (virtioscsi_online) + cpuhp_remove_multi_state(virtioscsi_online); + cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD); return ret; } static void __exit fini(void) { unregister_virtio_driver(&virtio_scsi_driver); + cpuhp_remove_multi_state(virtioscsi_online); + cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD); mempool_destroy(virtscsi_cmd_pool); kmem_cache_destroy(virtscsi_cmd_cache); } -- cgit v1.2.3 From 64f3bf2f85c5690228200d6b94eb6847049af70d Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 6 Sep 2016 19:04:47 +0200 Subject: ACPI/processor: Convert to hotplug state machine Install the callbacks via the state machine. Signed-off-by: Sebastian Andrzej Siewior Acked-by: "Rafael J. Wysocki" Cc: Peter Zijlstra Cc: linux-acpi@vger.kernel.org Cc: rt@linutronix.de Cc: Len Brown Link: http://lkml.kernel.org/r/20160906170457.32393-12-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner --- drivers/acpi/processor_driver.c | 91 +++++++++++++++++++------------------ drivers/acpi/processor_throttling.c | 4 +- 2 files changed, 49 insertions(+), 46 deletions(-) (limited to 'drivers') diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 0553aeebb228..13e5ac415abf 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -110,55 +110,46 @@ static void acpi_processor_notify(acpi_handle handle, u32 event, void *data) static int __acpi_processor_start(struct acpi_device *device); -static int acpi_cpu_soft_notify(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int acpi_soft_cpu_online(unsigned int cpu) { - unsigned int cpu = (unsigned long)hcpu; struct acpi_processor *pr = per_cpu(processors, cpu); struct acpi_device *device; - action &= ~CPU_TASKS_FROZEN; - - switch (action) { - case CPU_ONLINE: - case CPU_DEAD: - break; - default: - return NOTIFY_DONE; - } if (!pr || acpi_bus_get_device(pr->handle, &device)) - return NOTIFY_DONE; - - if (action == CPU_ONLINE) { - /* - * CPU got physically hotplugged and onlined for the first time: - * Initialize missing things. - */ - if (pr->flags.need_hotplug_init) { - int ret; - - pr_info("Will online and init hotplugged CPU: %d\n", - pr->id); - pr->flags.need_hotplug_init = 0; - ret = __acpi_processor_start(device); - WARN(ret, "Failed to start CPU: %d\n", pr->id); - } else { - /* Normal CPU soft online event. */ - acpi_processor_ppc_has_changed(pr, 0); - acpi_processor_hotplug(pr); - acpi_processor_reevaluate_tstate(pr, action); - acpi_processor_tstate_has_changed(pr); - } - } else if (action == CPU_DEAD) { - /* Invalidate flag.throttling after the CPU is offline. */ - acpi_processor_reevaluate_tstate(pr, action); + return 0; + /* + * CPU got physically hotplugged and onlined for the first time: + * Initialize missing things. + */ + if (pr->flags.need_hotplug_init) { + int ret; + + pr_info("Will online and init hotplugged CPU: %d\n", + pr->id); + pr->flags.need_hotplug_init = 0; + ret = __acpi_processor_start(device); + WARN(ret, "Failed to start CPU: %d\n", pr->id); + } else { + /* Normal CPU soft online event. */ + acpi_processor_ppc_has_changed(pr, 0); + acpi_processor_hotplug(pr); + acpi_processor_reevaluate_tstate(pr, false); + acpi_processor_tstate_has_changed(pr); } - return NOTIFY_OK; + return 0; } -static struct notifier_block acpi_cpu_notifier = { - .notifier_call = acpi_cpu_soft_notify, -}; +static int acpi_soft_cpu_dead(unsigned int cpu) +{ + struct acpi_processor *pr = per_cpu(processors, cpu); + struct acpi_device *device; + + if (!pr || acpi_bus_get_device(pr->handle, &device)) + return 0; + + acpi_processor_reevaluate_tstate(pr, true); + return 0; +} #ifdef CONFIG_ACPI_CPU_FREQ_PSS static int acpi_pss_perf_init(struct acpi_processor *pr, @@ -303,7 +294,7 @@ static int acpi_processor_stop(struct device *dev) * This is needed for the powernow-k8 driver, that works even without * ACPI, but needs symbols from this driver */ - +static enum cpuhp_state hp_online; static int __init acpi_processor_driver_init(void) { int result = 0; @@ -315,11 +306,22 @@ static int __init acpi_processor_driver_init(void) if (result < 0) return result; - register_hotcpu_notifier(&acpi_cpu_notifier); + result = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "acpi/cpu-drv:online", + acpi_soft_cpu_online, NULL); + if (result < 0) + goto err; + hp_online = result; + cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead", + NULL, acpi_soft_cpu_dead); + acpi_thermal_cpufreq_init(); acpi_processor_ppc_init(); acpi_processor_throttling_init(); return 0; +err: + driver_unregister(&acpi_processor_driver); + return result; } static void __exit acpi_processor_driver_exit(void) @@ -329,7 +331,8 @@ static void __exit acpi_processor_driver_exit(void) acpi_processor_ppc_exit(); acpi_thermal_cpufreq_exit(); - unregister_hotcpu_notifier(&acpi_cpu_notifier); + cpuhp_remove_state_nocalls(hp_online); + cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD); driver_unregister(&acpi_processor_driver); } diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index c72e64893d03..d51ca1c05619 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -375,11 +375,11 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr) * 3. TSD domain */ void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, - unsigned long action) + bool is_dead) { int result = 0; - if (action == CPU_DEAD) { + if (is_dead) { /* When one CPU is offline, the T-state throttling * will be invalidated. */ -- cgit v1.2.3 From 27622b061eb4bb4d16b5d61219ac10a792010321 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 6 Sep 2016 19:04:48 +0200 Subject: cpufreq: Convert to hotplug state machine Install the callbacks via the state machine. Signed-off-by: Sebastian Andrzej Siewior Acked-by: "Rafael J. Wysocki" Cc: linux-pm@vger.kernel.org Cc: Peter Zijlstra Cc: Viresh Kumar --- drivers/cpufreq/cpufreq.c | 38 ++++++++++++-------------------------- 1 file changed, 12 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 3dd4884c6f9e..e0bc632a259e 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1358,7 +1358,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) return add_cpu_dev_symlink(policy, cpu); } -static void cpufreq_offline(unsigned int cpu) +static int cpufreq_offline(unsigned int cpu) { struct cpufreq_policy *policy; int ret; @@ -1368,7 +1368,7 @@ static void cpufreq_offline(unsigned int cpu) policy = cpufreq_cpu_get_raw(cpu); if (!policy) { pr_debug("%s: No cpu_data found\n", __func__); - return; + return 0; } down_write(&policy->rwsem); @@ -1417,6 +1417,7 @@ static void cpufreq_offline(unsigned int cpu) unlock: up_write(&policy->rwsem); + return 0; } /** @@ -2332,28 +2333,6 @@ unlock: } EXPORT_SYMBOL(cpufreq_update_policy); -static int cpufreq_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - case CPU_DOWN_FAILED: - cpufreq_online(cpu); - break; - - case CPU_DOWN_PREPARE: - cpufreq_offline(cpu); - break; - } - return NOTIFY_OK; -} - -static struct notifier_block __refdata cpufreq_cpu_notifier = { - .notifier_call = cpufreq_cpu_callback, -}; - /********************************************************************* * BOOST * *********************************************************************/ @@ -2455,6 +2434,7 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); /********************************************************************* * REGISTER / UNREGISTER CPUFREQ DRIVER * *********************************************************************/ +static enum cpuhp_state hp_online; /** * cpufreq_register_driver - register a CPU Frequency driver @@ -2517,7 +2497,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) goto err_if_unreg; } - register_hotcpu_notifier(&cpufreq_cpu_notifier); + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online", + cpufreq_online, + cpufreq_offline); + if (ret < 0) + goto err_if_unreg; + hp_online = ret; + pr_debug("driver %s up and running\n", driver_data->name); goto out; @@ -2556,7 +2542,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) get_online_cpus(); subsys_interface_unregister(&cpufreq_interface); remove_boost_sysfs_file(); - unregister_hotcpu_notifier(&cpufreq_cpu_notifier); + cpuhp_remove_state_nocalls(hp_online); write_lock_irqsave(&cpufreq_driver_lock, flags); -- cgit v1.2.3 From 5372e054a1928fe704cf0a5e2e139645a777b50a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 20 Sep 2016 16:56:28 +0200 Subject: cpufreq: Fix up conversion to hotplug state machine The function cpufreq_register_driver() returns zero on success and since commit 27622b061eb4 ("cpufreq: Convert to hotplug state machine") erroneously a positive number. Due to the "if (x) assume_error" construct all callers assumed an error and as a consequence the cpu freq kworker crashes with a NULL pointer dereference. Reset the return value back to zero in the success case. Fixes: 27622b061eb4 ("cpufreq: Convert to hotplug state machine") Reported-by: Borislav Petkov Reported-and-tested-by: Ingo Molnar Signed-off-by: Sebastian Andrzej Siewior Cc: peterz@infradead.org Cc: rjw@rjwysocki.net Link: http://lkml.kernel.org/r/20160920145628.lp2bmq72ip3oiash@linutronix.de Signed-off-by: Thomas Gleixner --- drivers/cpufreq/cpufreq.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index e0bc632a259e..8b44de4d7438 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2503,6 +2503,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) if (ret < 0) goto err_if_unreg; hp_online = ret; + ret = 0; pr_debug("driver %s up and running\n", driver_data->name); goto out; -- cgit v1.2.3