summaryrefslogtreecommitdiff
path: root/drivers/acpi
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2009-01-09 03:39:43 -0500
committerLen Brown <len.brown@intel.com>2009-01-09 03:39:43 -0500
commitb2576e1d4408e134e2188c967b1f28af39cd79d4 (patch)
tree004f3c82faab760f304ce031d6d2f572e7746a50 /drivers/acpi
parent3cc8a5f4ba91f67bbdb81a43a99281a26aab8d77 (diff)
parent2150edc6c5cf00f7adb54538b9ea2a3e9cedca3f (diff)
Merge branch 'linus' into release
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/acpi_memhotplug.c2
-rw-r--r--drivers/acpi/acpica/acmacros.h4
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/rscreate.c2
-rw-r--r--drivers/acpi/acpica/utobject.c4
-rw-r--r--drivers/acpi/pci_irq.c56
-rw-r--r--drivers/acpi/pci_root.c20
-rw-r--r--drivers/acpi/processor_core.c14
-rw-r--r--drivers/acpi/processor_idle.c7
-rw-r--r--drivers/acpi/processor_perflib.c28
-rw-r--r--drivers/acpi/processor_throttling.c80
13 files changed, 167 insertions, 56 deletions
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 63a17b55b39b..7a0f4aa4fa1e 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -20,7 +20,7 @@
*
*
* ACPI based HotPlug driver that supports Memory Hotplug
- * This driver fields notifications from firmare for memory add
+ * This driver fields notifications from firmware for memory add
* and remove operations and alerts the VM of the affected memory
* ranges.
*/
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index c57286a3aceb..9c127e8e2d6d 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -283,8 +283,8 @@
#define ACPI_INSERT_BITS(target, mask, source) target = ((target & (~(mask))) | (source & mask))
/*
- * An struct acpi_namespace_node can appear in some contexts
- * where a pointer to an union acpi_operand_object can also
+ * A struct acpi_namespace_node can appear in some contexts
+ * where a pointer to a union acpi_operand_object can also
* appear. This macro is used to distinguish them.
*
* The "Descriptor" field is the first field in both structures.
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 33c66eb3ae39..a226f74d4a5c 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -405,7 +405,7 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
*
* RETURN: Status
*
- * DESCRIPTION: Construct an union acpi_operand_object of type def_field and
+ * DESCRIPTION: Construct a union acpi_operand_object of type def_field and
* connect it to the parent Node.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 3673d2b2c4ac..f6105a6d6126 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -147,7 +147,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
stack_desc = *stack_ptr;
- /* This is an union acpi_operand_object */
+ /* This is a union acpi_operand_object */
switch (ACPI_GET_OBJECT_TYPE(stack_desc)) {
case ACPI_TYPE_LOCAL_REFERENCE:
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 7c6d7e53e41e..e35e9b4f6a4e 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -275,7 +275,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
*
* PARAMETERS: *source_desc - Value to be stored
* *dest_desc - Where to store it. Must be an NS node
- * or an union acpi_operand_object of type
+ * or a union acpi_operand_object of type
* Reference;
* walk_state - Current walk state
*
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 9eb3a2a68408..61566b1a0616 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -125,7 +125,7 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
*
* FUNCTION: acpi_rs_create_pci_routing_table
*
- * PARAMETERS: package_object - Pointer to an union acpi_operand_object
+ * PARAMETERS: package_object - Pointer to a union acpi_operand_object
* package
* output_buffer - Pointer to the user's buffer
*
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 4279ed3a8782..fd5ea7543e5b 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -298,7 +298,7 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
*
* RETURN: TRUE if object is valid, FALSE otherwise
*
- * DESCRIPTION: Validate a pointer to be an union acpi_operand_object
+ * DESCRIPTION: Validate a pointer to be a union acpi_operand_object
*
******************************************************************************/
@@ -390,7 +390,7 @@ void acpi_ut_delete_object_desc(union acpi_operand_object *object)
{
ACPI_FUNCTION_TRACE_PTR(ut_delete_object_desc, object);
- /* Object must be an union acpi_operand_object */
+ /* Object must be a union acpi_operand_object */
if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) {
ACPI_ERROR((AE_INFO,
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 11acaee14d66..bf79d83bdfbb 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -384,6 +384,27 @@ acpi_pci_free_irq(struct acpi_prt_entry *entry,
return irq;
}
+#ifdef CONFIG_X86_IO_APIC
+extern int noioapicquirk;
+
+static int bridge_has_boot_interrupt_variant(struct pci_bus *bus)
+{
+ struct pci_bus *bus_it;
+
+ for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) {
+ if (!bus_it->self)
+ return 0;
+
+ printk(KERN_INFO "vendor=%04x device=%04x\n", bus_it->self->vendor,
+ bus_it->self->device);
+
+ if (bus_it->self->irq_reroute_variant)
+ return bus_it->self->irq_reroute_variant;
+ }
+ return 0;
+}
+#endif /* CONFIG_X86_IO_APIC */
+
/*
* acpi_pci_irq_lookup
* success: return IRQ >= 0
@@ -413,6 +434,41 @@ acpi_pci_irq_lookup(struct pci_bus *bus,
}
ret = func(entry, triggering, polarity, link);
+
+#ifdef CONFIG_X86_IO_APIC
+ /*
+ * Some chipsets (e.g. intel 6700PXH) generate a legacy INTx when the
+ * IRQ entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel
+ * does during interrupt handling). When this INTx generation cannot be
+ * disabled, we reroute these interrupts to their legacy equivalent to
+ * get rid of spurious interrupts.
+ */
+ if (!noioapicquirk) {
+ switch (bridge_has_boot_interrupt_variant(bus)) {
+ case 0:
+ /* no rerouting necessary */
+ break;
+
+ case INTEL_IRQ_REROUTE_VARIANT:
+ /*
+ * Remap according to INTx routing table in 6700PXH
+ * specs, intel order number 302628-002, section
+ * 2.15.2. Other chipsets (80332, ...) have the same
+ * mapping and are handled here as well.
+ */
+ printk(KERN_INFO "pci irq %d -> rerouted to legacy "
+ "irq %d\n", ret, (ret % 4) + 16);
+ ret = (ret % 4) + 16;
+ break;
+
+ default:
+ printk(KERN_INFO "not rerouting irq %d to legacy irq: "
+ "unknown mapping\n", ret);
+ break;
+ }
+ }
+#endif /* CONFIG_X86_IO_APIC */
+
return ret;
}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 642554b1b60c..5b38a026d122 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -31,6 +31,7 @@
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/pci.h>
+#include <linux/pci-acpi.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -193,6 +194,7 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
unsigned long long value = 0;
acpi_handle handle = NULL;
struct acpi_device *child;
+ u32 flags, base_flags;
if (!device)
@@ -210,6 +212,13 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
device->ops.bind = acpi_pci_bind;
+ /*
+ * All supported architectures that use ACPI have support for
+ * PCI domains, so we indicate this in _OSC support capabilities.
+ */
+ flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
+ pci_acpi_osc_support(device->handle, flags);
+
/*
* Segment
* -------
@@ -335,6 +344,17 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
list_for_each_entry(child, &device->children, node)
acpi_pci_bridge_scan(child);
+ /* Indicate support for various _OSC capabilities. */
+ if (pci_ext_cfg_avail(root->bus->self))
+ flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
+ if (pcie_aspm_enabled())
+ flags |= OSC_ACTIVE_STATE_PWR_SUPPORT |
+ OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
+ if (pci_msi_enabled())
+ flags |= OSC_MSI_SUPPORT;
+ if (flags != base_flags)
+ pci_acpi_osc_support(device->handle, flags);
+
end:
if (result) {
if (!list_empty(&root->node))
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 34948362f41d..0cc2fd31e376 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -826,6 +826,11 @@ static int acpi_processor_add(struct acpi_device *device)
if (!pr)
return -ENOMEM;
+ if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
+ kfree(pr);
+ return -ENOMEM;
+ }
+
pr->handle = device->handle;
strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
@@ -845,10 +850,8 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
pr = acpi_driver_data(device);
- if (pr->id >= nr_cpu_ids) {
- kfree(pr);
- return 0;
- }
+ if (pr->id >= nr_cpu_ids)
+ goto free;
if (type == ACPI_BUS_REMOVAL_EJECT) {
if (acpi_processor_handle_eject(pr))
@@ -873,6 +876,9 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
per_cpu(processors, pr->id) = NULL;
per_cpu(processor_device_array, pr->id) = NULL;
+
+free:
+ free_cpumask_var(pr->throttling.shared_cpu_map);
kfree(pr);
return 0;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 5f8d746a9b81..66a9d8145562 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -41,6 +41,7 @@
#include <linux/pm_qos_params.h>
#include <linux/clockchips.h>
#include <linux/cpuidle.h>
+#include <linux/irqflags.h>
/*
* Include the apic definitions for x86 to have the APIC timer related defines
@@ -374,15 +375,15 @@ static int tsc_halts_in_c(int state)
{
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
+ case X86_VENDOR_INTEL:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
*/
- if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+ if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
return 0;
+
/*FALL THROUGH*/
- case X86_VENDOR_INTEL:
- /* Several cases known where TSC halts in C2 too */
default:
return state > ACPI_STATE_C1;
}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 0d7b772bef50..846e227592d4 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -588,12 +588,15 @@ int acpi_processor_preregister_performance(
int count, count_target;
int retval = 0;
unsigned int i, j;
- cpumask_t covered_cpus;
+ cpumask_var_t covered_cpus;
struct acpi_processor *pr;
struct acpi_psd_package *pdomain;
struct acpi_processor *match_pr;
struct acpi_psd_package *match_pdomain;
+ if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
+ return -ENOMEM;
+
mutex_lock(&performance_mutex);
retval = 0;
@@ -617,7 +620,7 @@ int acpi_processor_preregister_performance(
}
pr->performance = percpu_ptr(performance, i);
- cpu_set(i, pr->performance->shared_cpu_map);
+ cpumask_set_cpu(i, pr->performance->shared_cpu_map);
if (acpi_processor_get_psd(pr)) {
retval = -EINVAL;
continue;
@@ -650,18 +653,18 @@ int acpi_processor_preregister_performance(
}
}
- cpus_clear(covered_cpus);
+ cpumask_clear(covered_cpus);
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
continue;
- if (cpu_isset(i, covered_cpus))
+ if (cpumask_test_cpu(i, covered_cpus))
continue;
pdomain = &(pr->performance->domain_info);
- cpu_set(i, pr->performance->shared_cpu_map);
- cpu_set(i, covered_cpus);
+ cpumask_set_cpu(i, pr->performance->shared_cpu_map);
+ cpumask_set_cpu(i, covered_cpus);
if (pdomain->num_processors <= 1)
continue;
@@ -699,8 +702,8 @@ int acpi_processor_preregister_performance(
goto err_ret;
}
- cpu_set(j, covered_cpus);
- cpu_set(j, pr->performance->shared_cpu_map);
+ cpumask_set_cpu(j, covered_cpus);
+ cpumask_set_cpu(j, pr->performance->shared_cpu_map);
count++;
}
@@ -718,8 +721,8 @@ int acpi_processor_preregister_performance(
match_pr->performance->shared_type =
pr->performance->shared_type;
- match_pr->performance->shared_cpu_map =
- pr->performance->shared_cpu_map;
+ cpumask_copy(match_pr->performance->shared_cpu_map,
+ pr->performance->shared_cpu_map);
}
}
@@ -731,14 +734,15 @@ err_ret:
/* Assume no coordination on any error parsing domain info */
if (retval) {
- cpus_clear(pr->performance->shared_cpu_map);
- cpu_set(i, pr->performance->shared_cpu_map);
+ cpumask_clear(pr->performance->shared_cpu_map);
+ cpumask_set_cpu(i, pr->performance->shared_cpu_map);
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
}
pr->performance = NULL; /* Will be set for real in register */
}
mutex_unlock(&performance_mutex);
+ free_cpumask_var(covered_cpus);
return retval;
}
EXPORT_SYMBOL(acpi_processor_preregister_performance);
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index a0c38c94a8a0..d27838171f4a 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -61,11 +61,14 @@ static int acpi_processor_update_tsd_coord(void)
int count, count_target;
int retval = 0;
unsigned int i, j;
- cpumask_t covered_cpus;
+ cpumask_var_t covered_cpus;
struct acpi_processor *pr, *match_pr;
struct acpi_tsd_package *pdomain, *match_pdomain;
struct acpi_processor_throttling *pthrottling, *match_pthrottling;
+ if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
+ return -ENOMEM;
+
/*
* Now that we have _TSD data from all CPUs, lets setup T-state
* coordination between all CPUs.
@@ -91,19 +94,19 @@ static int acpi_processor_update_tsd_coord(void)
if (retval)
goto err_ret;
- cpus_clear(covered_cpus);
+ cpumask_clear(covered_cpus);
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
continue;
- if (cpu_isset(i, covered_cpus))
+ if (cpumask_test_cpu(i, covered_cpus))
continue;
pthrottling = &pr->throttling;
pdomain = &(pthrottling->domain_info);
- cpu_set(i, pthrottling->shared_cpu_map);
- cpu_set(i, covered_cpus);
+ cpumask_set_cpu(i, pthrottling->shared_cpu_map);
+ cpumask_set_cpu(i, covered_cpus);
/*
* If the number of processor in the TSD domain is 1, it is
* unnecessary to parse the coordination for this CPU.
@@ -144,8 +147,8 @@ static int acpi_processor_update_tsd_coord(void)
goto err_ret;
}
- cpu_set(j, covered_cpus);
- cpu_set(j, pthrottling->shared_cpu_map);
+ cpumask_set_cpu(j, covered_cpus);
+ cpumask_set_cpu(j, pthrottling->shared_cpu_map);
count++;
}
for_each_possible_cpu(j) {
@@ -165,12 +168,14 @@ static int acpi_processor_update_tsd_coord(void)
* If some CPUS have the same domain, they
* will have the same shared_cpu_map.
*/
- match_pthrottling->shared_cpu_map =
- pthrottling->shared_cpu_map;
+ cpumask_copy(match_pthrottling->shared_cpu_map,
+ pthrottling->shared_cpu_map);
}
}
err_ret:
+ free_cpumask_var(covered_cpus);
+
for_each_possible_cpu(i) {
pr = per_cpu(processors, i);
if (!pr)
@@ -182,8 +187,8 @@ err_ret:
*/
if (retval) {
pthrottling = &(pr->throttling);
- cpus_clear(pthrottling->shared_cpu_map);
- cpu_set(i, pthrottling->shared_cpu_map);
+ cpumask_clear(pthrottling->shared_cpu_map);
+ cpumask_set_cpu(i, pthrottling->shared_cpu_map);
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
}
}
@@ -567,7 +572,7 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
pthrottling = &pr->throttling;
pthrottling->tsd_valid_flag = 1;
pthrottling->shared_type = pdomain->coord_type;
- cpu_set(pr->id, pthrottling->shared_cpu_map);
+ cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
/*
* If the coordination type is not defined in ACPI spec,
* the tsd_valid_flag will be clear and coordination type
@@ -826,7 +831,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
static int acpi_processor_get_throttling(struct acpi_processor *pr)
{
- cpumask_t saved_mask;
+ cpumask_var_t saved_mask;
int ret;
if (!pr)
@@ -834,14 +839,20 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
if (!pr->flags.throttling)
return -ENODEV;
+
+ if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
+ return -ENOMEM;
+
/*
* Migrate task to the cpu pointed by pr.
*/
- saved_mask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
+ cpumask_copy(saved_mask, &current->cpus_allowed);
+ /* FIXME: use work_on_cpu() */
+ set_cpus_allowed_ptr(current, cpumask_of(pr->id));
ret = pr->throttling.acpi_processor_get_throttling(pr);
/* restore the previous state */
- set_cpus_allowed_ptr(current, &saved_mask);
+ set_cpus_allowed_ptr(current, saved_mask);
+ free_cpumask_var(saved_mask);
return ret;
}
@@ -986,13 +997,13 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
{
- cpumask_t saved_mask;
+ cpumask_var_t saved_mask;
int ret = 0;
unsigned int i;
struct acpi_processor *match_pr;
struct acpi_processor_throttling *p_throttling;
struct throttling_tstate t_state;
- cpumask_t online_throttling_cpus;
+ cpumask_var_t online_throttling_cpus;
if (!pr)
return -EINVAL;
@@ -1003,17 +1014,25 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
- saved_mask = current->cpus_allowed;
+ if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
+ free_cpumask_var(saved_mask);
+ return -ENOMEM;
+ }
+
+ cpumask_copy(saved_mask, &current->cpus_allowed);
t_state.target_state = state;
p_throttling = &(pr->throttling);
- cpus_and(online_throttling_cpus, cpu_online_map,
- p_throttling->shared_cpu_map);
+ cpumask_and(online_throttling_cpus, cpu_online_mask,
+ p_throttling->shared_cpu_map);
/*
* The throttling notifier will be called for every
* affected cpu in order to get one proper T-state.
* The notifier event is THROTTLING_PRECHANGE.
*/
- for_each_cpu_mask_nr(i, online_throttling_cpus) {
+ for_each_cpu(i, online_throttling_cpus) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
&t_state);
@@ -1025,7 +1044,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
+ /* FIXME: use work_on_cpu() */
+ set_cpus_allowed_ptr(current, cpumask_of(pr->id));
ret = p_throttling->acpi_processor_set_throttling(pr,
t_state.target_state);
} else {
@@ -1034,7 +1054,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* it is necessary to set T-state for every affected
* cpus.
*/
- for_each_cpu_mask_nr(i, online_throttling_cpus) {
+ for_each_cpu(i, online_throttling_cpus) {
match_pr = per_cpu(processors, i);
/*
* If the pointer is invalid, we will report the
@@ -1056,7 +1076,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
continue;
}
t_state.cpu = i;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
+ /* FIXME: use work_on_cpu() */
+ set_cpus_allowed_ptr(current, cpumask_of(i));
ret = match_pr->throttling.
acpi_processor_set_throttling(
match_pr, t_state.target_state);
@@ -1068,13 +1089,16 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
* affected cpu to update the T-states.
* The notifier event is THROTTLING_POSTCHANGE
*/
- for_each_cpu_mask_nr(i, online_throttling_cpus) {
+ for_each_cpu(i, online_throttling_cpus) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
&t_state);
}
/* restore the previous state */
- set_cpus_allowed_ptr(current, &saved_mask);
+ /* FIXME: use work_on_cpu() */
+ set_cpus_allowed_ptr(current, saved_mask);
+ free_cpumask_var(online_throttling_cpus);
+ free_cpumask_var(saved_mask);
return ret;
}
@@ -1120,7 +1144,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
if (acpi_processor_get_tsd(pr)) {
pthrottling = &pr->throttling;
pthrottling->tsd_valid_flag = 0;
- cpu_set(pr->id, pthrottling->shared_cpu_map);
+ cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
}