diff options
author | Paul Mackerras <paulus@samba.org> | 2007-04-24 11:46:09 +1000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-04-24 11:46:09 +1000 |
commit | b142eb3a5aa4f4779597b7a913c002287fa6ee08 (patch) | |
tree | e4d911c1cace545c766c3429f67d2b39356dbff5 /arch/powerpc/platforms | |
parent | 13177c8b7eaf7ab238e79533c746153ae116f5f8 (diff) | |
parent | c6d344819ea26c4df1cf2572232706667e1d99ea (diff) |
Merge branch 'for-2.6.22' of master.kernel.org:/pub/scm/linux/kernel/git/arnd/cell-2.6 into for-2.6.22
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r-- | arch/powerpc/platforms/cell/cbe_cpufreq.c | 81 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/cbe_regs.c | 165 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/cbe_regs.h | 5 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/cbe_thermal.c | 177 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/ras.c | 160 |
5 files changed, 530 insertions, 58 deletions
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c index 9c5d63b7e76c..4495973bff59 100644 --- a/arch/powerpc/platforms/cell/cbe_cpufreq.c +++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c @@ -28,6 +28,8 @@ #include <asm/processor.h> #include <asm/prom.h> #include <asm/time.h> +#include <asm/pmi.h> +#include <asm/of_platform.h> #include "cbe_regs.h" @@ -68,6 +70,38 @@ static u64 MIC_Slow_Next_Timer_table[] = { * hardware specific functions */ +static struct of_device *pmi_dev; + +static int set_pmode_pmi(int cpu, unsigned int pmode) +{ + int ret; + pmi_message_t pmi_msg; +#ifdef DEBUG + u64 time; +#endif + + pmi_msg.type = PMI_TYPE_FREQ_CHANGE; + pmi_msg.data1 = cbe_cpu_to_node(cpu); + pmi_msg.data2 = pmode; + +#ifdef DEBUG + time = (u64) get_cycles(); +#endif + + pmi_send_message(pmi_dev, pmi_msg); + ret = pmi_msg.data2; + + pr_debug("PMI returned slow mode %d\n", ret); + +#ifdef DEBUG + time = (u64) get_cycles() - time; /* actual cycles (not cpu cycles!) */ + time = 1000000000 * time / CLOCK_TICK_RATE; /* time in ns (10^-9) */ + pr_debug("had to wait %lu ns for a transition\n", time); +#endif + return ret; +} + + static int get_pmode(int cpu) { int ret; @@ -79,7 +113,7 @@ static int get_pmode(int cpu) return ret; } -static int set_pmode(int cpu, unsigned int pmode) +static int set_pmode_reg(int cpu, unsigned int pmode) { struct cbe_pmd_regs __iomem *pmd_regs; struct cbe_mic_tm_regs __iomem *mic_tm_regs; @@ -120,6 +154,39 @@ static int set_pmode(int cpu, unsigned int pmode) return 0; } +static int set_pmode(int cpu, unsigned int slow_mode) { + if(pmi_dev) + return set_pmode_pmi(cpu, slow_mode); + else + return set_pmode_reg(cpu, slow_mode); +} + +static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg) +{ + struct cpufreq_policy policy; + u8 cpu; + u8 cbe_pmode_new; + + BUG_ON (pmi_msg.type != PMI_TYPE_FREQ_CHANGE); + + cpu = cbe_node_to_cpu(pmi_msg.data1); + cbe_pmode_new = pmi_msg.data2; + + cpufreq_get_policy(&policy, cpu); + + policy.max = min(policy.max, cbe_freqs[cbe_pmode_new].frequency); + policy.min = min(policy.min, policy.max); + + pr_debug("cbe_handle_pmi: new policy.min=%d policy.max=%d\n", policy.min, policy.max); + cpufreq_set_policy(&policy); +} + +static struct pmi_handler cbe_pmi_handler = { + .type = PMI_TYPE_FREQ_CHANGE, + .handle_pmi_message = cbe_cpufreq_handle_pmi, +}; + + /* * cpufreq functions */ @@ -234,11 +301,23 @@ static struct cpufreq_driver cbe_cpufreq_driver = { static int __init cbe_cpufreq_init(void) { + struct device_node *np; + + np = of_find_node_by_type(NULL, "ibm,pmi"); + + pmi_dev = of_find_device_by_node(np); + + if (pmi_dev) + pmi_register_handler(pmi_dev, &cbe_pmi_handler); + return cpufreq_register_driver(&cbe_cpufreq_driver); } static void __exit cbe_cpufreq_exit(void) { + if(pmi_dev) + pmi_unregister_handler(pmi_dev, &cbe_pmi_handler); + cpufreq_unregister_driver(&cbe_cpufreq_driver); } diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c index 7c94af4ac439..12c9674b4b1f 100644 --- a/arch/powerpc/platforms/cell/cbe_regs.c +++ b/arch/powerpc/platforms/cell/cbe_regs.c @@ -14,6 +14,8 @@ #include <asm/pgtable.h> #include <asm/prom.h> #include <asm/ptrace.h> +#include <asm/of_device.h> +#include <asm/of_platform.h> #include "cbe_regs.h" @@ -27,6 +29,7 @@ static struct cbe_regs_map { struct device_node *cpu_node; + struct device_node *be_node; struct cbe_pmd_regs __iomem *pmd_regs; struct cbe_iic_regs __iomem *iic_regs; struct cbe_mic_tm_regs __iomem *mic_tm_regs; @@ -37,30 +40,43 @@ static int cbe_regs_map_count; static struct cbe_thread_map { struct device_node *cpu_node; + struct device_node *be_node; struct cbe_regs_map *regs; + unsigned int thread_id; + unsigned int cbe_id; } cbe_thread_map[NR_CPUS]; +static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = CPU_MASK_NONE }; +static cpumask_t cbe_first_online_cpu = CPU_MASK_NONE; + static struct cbe_regs_map *cbe_find_map(struct device_node *np) { int i; struct device_node *tmp_np; - if (strcasecmp(np->type, "spe") == 0) { - if (np->data == NULL) { - /* walk up path until cpu node was found */ - tmp_np = np->parent; - while (tmp_np != NULL && strcasecmp(tmp_np->type, "cpu") != 0) - tmp_np = tmp_np->parent; + if (strcasecmp(np->type, "spe")) { + for (i = 0; i < cbe_regs_map_count; i++) + if (cbe_regs_maps[i].cpu_node == np || + cbe_regs_maps[i].be_node == np) + return &cbe_regs_maps[i]; + return NULL; + } - np->data = cbe_find_map(tmp_np); - } + if (np->data) return np->data; - } - for (i = 0; i < cbe_regs_map_count; i++) - if (cbe_regs_maps[i].cpu_node == np) - return &cbe_regs_maps[i]; - return NULL; + /* walk up path until cpu or be node was found */ + tmp_np = np; + do { + tmp_np = tmp_np->parent; + /* on a correct devicetree we wont get up to root */ + BUG_ON(!tmp_np); + } while (strcasecmp(tmp_np->type, "cpu") && + strcasecmp(tmp_np->type, "be")); + + np->data = cbe_find_map(tmp_np); + + return np->data; } struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np) @@ -130,49 +146,69 @@ struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu) } EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs); -/* FIXME - * This is little more than a stub at the moment. It should be - * fleshed out so that it works for both SMT and non-SMT, no - * matter if the passed cpu is odd or even. - * For SMT enabled, returns 0 for even-numbered cpu; otherwise 1. - * For SMT disabled, returns 0 for all cpus. - */ u32 cbe_get_hw_thread_id(int cpu) { - return (cpu & 1); + return cbe_thread_map[cpu].thread_id; } EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id); -void __init cbe_regs_init(void) +u32 cbe_cpu_to_node(int cpu) { - int i; - struct device_node *cpu; + return cbe_thread_map[cpu].cbe_id; +} +EXPORT_SYMBOL_GPL(cbe_cpu_to_node); - /* Build local fast map of CPUs */ - for_each_possible_cpu(i) - cbe_thread_map[i].cpu_node = of_get_cpu_node(i, NULL); +u32 cbe_node_to_cpu(int node) +{ + return find_first_bit( (unsigned long *) &cbe_local_mask[node], sizeof(cpumask_t)); +} +EXPORT_SYMBOL_GPL(cbe_node_to_cpu); - /* Find maps for each device tree CPU */ - for_each_node_by_type(cpu, "cpu") { - struct cbe_regs_map *map = &cbe_regs_maps[cbe_regs_map_count++]; +static struct device_node *cbe_get_be_node(int cpu_id) +{ + struct device_node *np; + + for_each_node_by_type (np, "be") { + int len,i; + const phandle *cpu_handle; + + cpu_handle = of_get_property(np, "cpus", &len); + + for (i=0; i<len; i++) + if (of_find_node_by_phandle(cpu_handle[i]) == of_get_cpu_node(cpu_id, NULL)) + return np; + } + + return NULL; +} + +void __init cbe_fill_regs_map(struct cbe_regs_map *map) +{ + if(map->be_node) { + struct device_node *be, *np; + + be = map->be_node; + for_each_node_by_type(np, "pervasive") + if (of_get_parent(np) == be) + map->pmd_regs = of_iomap(np, 0); + + for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller") + if (of_get_parent(np) == be) + map->iic_regs = of_iomap(np, 2); + + for_each_node_by_type(np, "mic-tm") + if (of_get_parent(np) == be) + map->mic_tm_regs = of_iomap(np, 0); + } else { + struct device_node *cpu; /* That hack must die die die ! */ const struct address_prop { unsigned long address; unsigned int len; } __attribute__((packed)) *prop; - - if (cbe_regs_map_count > MAX_CBE) { - printk(KERN_ERR "cbe_regs: More BE chips than supported" - "!\n"); - cbe_regs_map_count--; - return; - } - map->cpu_node = cpu; - for_each_possible_cpu(i) - if (cbe_thread_map[i].cpu_node == cpu) - cbe_thread_map[i].regs = map; + cpu = map->cpu_node; prop = of_get_property(cpu, "pervasive", NULL); if (prop != NULL) @@ -188,3 +224,50 @@ void __init cbe_regs_init(void) } } + +void __init cbe_regs_init(void) +{ + int i; + unsigned int thread_id; + struct device_node *cpu; + + /* Build local fast map of CPUs */ + for_each_possible_cpu(i) { + cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id); + cbe_thread_map[i].be_node = cbe_get_be_node(i); + cbe_thread_map[i].thread_id = thread_id; + } + + /* Find maps for each device tree CPU */ + for_each_node_by_type(cpu, "cpu") { + struct cbe_regs_map *map; + unsigned int cbe_id; + + cbe_id = cbe_regs_map_count++; + map = &cbe_regs_maps[cbe_id]; + + if (cbe_regs_map_count > MAX_CBE) { + printk(KERN_ERR "cbe_regs: More BE chips than supported" + "!\n"); + cbe_regs_map_count--; + return; + } + map->cpu_node = cpu; + + for_each_possible_cpu(i) { + struct cbe_thread_map *thread = &cbe_thread_map[i]; + + if (thread->cpu_node == cpu) { + thread->regs = map; + thread->cbe_id = cbe_id; + map->be_node = thread->be_node; + cpu_set(i, cbe_local_mask[cbe_id]); + if(thread->thread_id == 0) + cpu_set(i, cbe_first_online_cpu); + } + } + + cbe_fill_regs_map(map); + } +} + diff --git a/arch/powerpc/platforms/cell/cbe_regs.h b/arch/powerpc/platforms/cell/cbe_regs.h index 440a7ecc66ea..17d597144877 100644 --- a/arch/powerpc/platforms/cell/cbe_regs.h +++ b/arch/powerpc/platforms/cell/cbe_regs.h @@ -255,6 +255,11 @@ struct cbe_mic_tm_regs { extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np); extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu); +/* some utility functions to deal with SMT */ +extern u32 cbe_get_hw_thread_id(int cpu); +extern u32 cbe_cpu_to_node(int cpu); +extern u32 cbe_node_to_cpu(int node); + /* Init this module early */ extern void cbe_regs_init(void); diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c index e8bcd2a767ce..f370f0fa6f4c 100644 --- a/arch/powerpc/platforms/cell/cbe_thermal.c +++ b/arch/powerpc/platforms/cell/cbe_thermal.c @@ -1,6 +1,31 @@ /* * thermal support for the cell processor * + * This module adds some sysfs attributes to cpu and spu nodes. + * Base for measurements are the digital thermal sensors (DTS) + * located on the chip. + * The accuracy is 2 degrees, starting from 65 up to 125 degrees celsius + * The attributes can be found under + * /sys/devices/system/cpu/cpuX/thermal + * /sys/devices/system/spu/spuX/thermal + * + * The following attributes are added for each node: + * temperature: + * contains the current temperature measured by the DTS + * throttle_begin: + * throttling begins when temperature is greater or equal to + * throttle_begin. Setting this value to 125 prevents throttling. + * throttle_end: + * throttling is being ceased, if the temperature is lower than + * throttle_end. Due to a delay between applying throttling and + * a reduced temperature this value should be less than throttle_begin. + * A value equal to throttle_begin provides only a very little hysteresis. + * throttle_full_stop: + * If the temperatrue is greater or equal to throttle_full_stop, + * full throttling is applied to the cpu or spu. This value should be + * greater than throttle_begin and throttle_end. Setting this value to + * 65 prevents the unit from running code at all. + * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * * Author: Christian Krafft <krafft@de.ibm.com> @@ -31,6 +56,26 @@ #include "cbe_regs.h" #include "spu_priv1_mmio.h" +#define TEMP_MIN 65 +#define TEMP_MAX 125 + +#define SYSDEV_PREFIX_ATTR(_prefix,_name,_mode) \ +struct sysdev_attribute attr_ ## _prefix ## _ ## _name = { \ + .attr = { .name = __stringify(_name), .mode = _mode }, \ + .show = _prefix ## _show_ ## _name, \ + .store = _prefix ## _store_ ## _name, \ +}; + +static inline u8 reg_to_temp(u8 reg_value) +{ + return ((reg_value & 0x3f) << 1) + TEMP_MIN; +} + +static inline u8 temp_to_reg(u8 temp) +{ + return ((temp - TEMP_MIN) >> 1) & 0x3f; +} + static struct cbe_pmd_regs __iomem *get_pmd_regs(struct sys_device *sysdev) { struct spu *spu; @@ -58,20 +103,81 @@ static u8 spu_read_register_value(struct sys_device *sysdev, union spe_reg __iom static ssize_t spu_show_temp(struct sys_device *sysdev, char *buf) { - int value; + u8 value; struct cbe_pmd_regs __iomem *pmd_regs; pmd_regs = get_pmd_regs(sysdev); value = spu_read_register_value(sysdev, &pmd_regs->ts_ctsr1); - /* clear all other bits */ + + return sprintf(buf, "%d\n", reg_to_temp(value)); +} + +static ssize_t show_throttle(struct cbe_pmd_regs __iomem *pmd_regs, char *buf, int pos) +{ + u64 value; + + value = in_be64(&pmd_regs->tm_tpr.val); + /* access the corresponding byte */ + value >>= pos; value &= 0x3F; - /* temp is stored in steps of 2 degrees */ - value *= 2; - /* base temp is 65 degrees */ - value += 65; - return sprintf(buf, "%d\n", (int) value); + return sprintf(buf, "%d\n", reg_to_temp(value)); +} + +static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos) +{ + u64 reg_value; + int temp; + u64 new_value; + int ret; + + ret = sscanf(buf, "%u", &temp); + + if (ret != 1 || temp < TEMP_MIN || temp > TEMP_MAX) + return -EINVAL; + + new_value = temp_to_reg(temp); + + reg_value = in_be64(&pmd_regs->tm_tpr.val); + + /* zero out bits for new value */ + reg_value &= ~(0xffull << pos); + /* set bits to new value */ + reg_value |= new_value << pos; + + out_be64(&pmd_regs->tm_tpr.val, reg_value); + return size; +} + +static ssize_t spu_show_throttle_end(struct sys_device *sysdev, char *buf) +{ + return show_throttle(get_pmd_regs(sysdev), buf, 0); +} + +static ssize_t spu_show_throttle_begin(struct sys_device *sysdev, char *buf) +{ + return show_throttle(get_pmd_regs(sysdev), buf, 8); +} + +static ssize_t spu_show_throttle_full_stop(struct sys_device *sysdev, char *buf) +{ + return show_throttle(get_pmd_regs(sysdev), buf, 16); +} + +static ssize_t spu_store_throttle_end(struct sys_device *sysdev, const char *buf, size_t size) +{ + return store_throttle(get_pmd_regs(sysdev), buf, size, 0); +} + +static ssize_t spu_store_throttle_begin(struct sys_device *sysdev, const char *buf, size_t size) +{ + return store_throttle(get_pmd_regs(sysdev), buf, size, 8); +} + +static ssize_t spu_store_throttle_full_stop(struct sys_device *sysdev, const char *buf, size_t size) +{ + return store_throttle(get_pmd_regs(sysdev), buf, size, 16); } static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos) @@ -82,16 +188,9 @@ static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos) pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id); value = in_be64(&pmd_regs->ts_ctsr2); - /* access the corresponding byte */ - value >>= pos; - /* clear all other bits */ - value &= 0x3F; - /* temp is stored in steps of 2 degrees */ - value *= 2; - /* base temp is 65 degrees */ - value += 65; + value = (value >> pos) & 0x3f; - return sprintf(buf, "%d\n", (int) value); + return sprintf(buf, "%d\n", reg_to_temp(value)); } @@ -108,13 +207,52 @@ static ssize_t ppe_show_temp1(struct sys_device *sysdev, char *buf) return ppe_show_temp(sysdev, buf, 0); } +static ssize_t ppe_show_throttle_end(struct sys_device *sysdev, char *buf) +{ + return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 32); +} + +static ssize_t ppe_show_throttle_begin(struct sys_device *sysdev, char *buf) +{ + return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 40); +} + +static ssize_t ppe_show_throttle_full_stop(struct sys_device *sysdev, char *buf) +{ + return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 48); +} + +static ssize_t ppe_store_throttle_end(struct sys_device *sysdev, const char *buf, size_t size) +{ + return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 32); +} + +static ssize_t ppe_store_throttle_begin(struct sys_device *sysdev, const char *buf, size_t size) +{ + return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 40); +} + +static ssize_t ppe_store_throttle_full_stop(struct sys_device *sysdev, const char *buf, size_t size) +{ + return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 48); +} + + static struct sysdev_attribute attr_spu_temperature = { .attr = {.name = "temperature", .mode = 0400 }, .show = spu_show_temp, }; +static SYSDEV_PREFIX_ATTR(spu, throttle_end, 0600); +static SYSDEV_PREFIX_ATTR(spu, throttle_begin, 0600); +static SYSDEV_PREFIX_ATTR(spu, throttle_full_stop, 0600); + + static struct attribute *spu_attributes[] = { &attr_spu_temperature.attr, + &attr_spu_throttle_end.attr, + &attr_spu_throttle_begin.attr, + &attr_spu_throttle_full_stop.attr, NULL, }; @@ -133,9 +271,16 @@ static struct sysdev_attribute attr_ppe_temperature1 = { .show = ppe_show_temp1, }; +static SYSDEV_PREFIX_ATTR(ppe, throttle_end, 0600); +static SYSDEV_PREFIX_ATTR(ppe, throttle_begin, 0600); +static SYSDEV_PREFIX_ATTR(ppe, throttle_full_stop, 0600); + static struct attribute *ppe_attributes[] = { &attr_ppe_temperature0.attr, &attr_ppe_temperature1.attr, + &attr_ppe_throttle_end.attr, + &attr_ppe_throttle_begin.attr, + &attr_ppe_throttle_full_stop.attr, NULL, }; diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 0984c7071695..b5ebc916388b 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c @@ -3,11 +3,13 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/smp.h> +#include <linux/reboot.h> #include <asm/reg.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/machdep.h> +#include <asm/rtas.h> #include "ras.h" #include "cbe_regs.h" @@ -82,6 +84,164 @@ static int cbe_machine_check_handler(struct pt_regs *regs) return 0; } +struct ptcal_area { + struct list_head list; + int nid; + int order; + struct page *pages; +}; + +static LIST_HEAD(ptcal_list); + +static int ptcal_start_tok, ptcal_stop_tok; + +static int __init cbe_ptcal_enable_on_node(int nid, int order) +{ + struct ptcal_area *area; + int ret = -ENOMEM; + unsigned long addr; + +#ifdef CONFIG_CRASH_DUMP + rtas_call(ptcal_stop_tok, 1, 1, NULL, nid); +#endif + + area = kmalloc(sizeof(*area), GFP_KERNEL); + if (!area) + goto out_err; + + area->nid = nid; + area->order = order; + area->pages = alloc_pages_node(area->nid, GFP_KERNEL, area->order); + + if (!area->pages) + goto out_free_area; + + addr = __pa(page_address(area->pages)); + + ret = -EIO; + if (rtas_call(ptcal_start_tok, 3, 1, NULL, area->nid, + (unsigned int)(addr >> 32), + (unsigned int)(addr & 0xffffffff))) { + printk(KERN_ERR "%s: error enabling PTCAL on node %d!\n", + __FUNCTION__, nid); + goto out_free_pages; + } + + list_add(&area->list, &ptcal_list); + + return 0; + +out_free_pages: + __free_pages(area->pages, area->order); +out_free_area: + kfree(area); +out_err: + return ret; +} + +static int __init cbe_ptcal_enable(void) +{ + const u32 *size; + struct device_node *np; + int order, found_mic = 0; + + np = of_find_node_by_path("/rtas"); + if (!np) + return -ENODEV; + + size = get_property(np, "ibm,cbe-ptcal-size", NULL); + if (!size) + return -ENODEV; + + pr_debug("%s: enabling PTCAL, size = 0x%x\n", __FUNCTION__, *size); + order = get_order(*size); + of_node_put(np); + + /* support for malta device trees, with be@/mic@ nodes */ + for_each_node_by_type(np, "mic-tm") { + cbe_ptcal_enable_on_node(of_node_to_nid(np), order); + found_mic = 1; + } + + if (found_mic) + return 0; + + /* support for older device tree - use cpu nodes */ + for_each_node_by_type(np, "cpu") { + const u32 *nid = get_property(np, "node-id", NULL); + if (!nid) { + printk(KERN_ERR "%s: node %s is missing node-id?\n", + __FUNCTION__, np->full_name); + continue; + } + cbe_ptcal_enable_on_node(*nid, order); + found_mic = 1; + } + + return found_mic ? 0 : -ENODEV; +} + +static int cbe_ptcal_disable(void) +{ + struct ptcal_area *area, *tmp; + int ret = 0; + + pr_debug("%s: disabling PTCAL\n", __FUNCTION__); + + list_for_each_entry_safe(area, tmp, &ptcal_list, list) { + /* disable ptcal on this node */ + if (rtas_call(ptcal_stop_tok, 1, 1, NULL, area->nid)) { + printk(KERN_ERR "%s: error disabling PTCAL " + "on node %d!\n", __FUNCTION__, + area->nid); + ret = -EIO; + continue; + } + + /* ensure we can access the PTCAL area */ + memset(page_address(area->pages), 0, + 1 << (area->order + PAGE_SHIFT)); + + /* clean up */ + list_del(&area->list); + __free_pages(area->pages, area->order); + kfree(area); + } + + return ret; +} + +static int cbe_ptcal_notify_reboot(struct notifier_block *nb, + unsigned long code, void *data) +{ + return cbe_ptcal_disable(); +} + +static struct notifier_block cbe_ptcal_reboot_notifier = { + .notifier_call = cbe_ptcal_notify_reboot +}; + +int __init cbe_ptcal_init(void) +{ + int ret; + ptcal_start_tok = rtas_token("ibm,cbe-start-ptcal"); + ptcal_stop_tok = rtas_token("ibm,cbe-stop-ptcal"); + + if (ptcal_start_tok == RTAS_UNKNOWN_SERVICE + || ptcal_stop_tok == RTAS_UNKNOWN_SERVICE) + return -ENODEV; + + ret = register_reboot_notifier(&cbe_ptcal_reboot_notifier); + if (ret) { + printk(KERN_ERR "Can't disable PTCAL, so not enabling\n"); + return ret; + } + + return cbe_ptcal_enable(); +} + +arch_initcall(cbe_ptcal_init); + void __init cbe_ras_init(void) { unsigned long hid0; |