diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/misc/cxl/cxl.h | 15 | ||||
-rw-r--r-- | drivers/misc/cxl/fault.c | 8 | ||||
-rw-r--r-- | drivers/misc/cxl/irq.c | 144 | ||||
-rw-r--r-- | drivers/misc/cxl/native.c | 14 | ||||
-rw-r--r-- | drivers/rtc/Kconfig | 11 | ||||
-rw-r--r-- | drivers/rtc/Makefile | 1 | ||||
-rw-r--r-- | drivers/rtc/rtc-opal.c | 261 |
7 files changed, 414 insertions, 40 deletions
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 3d2b8677ec8a..b5b6bda44a00 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -336,6 +336,8 @@ struct cxl_sste { struct cxl_afu { irq_hw_number_t psl_hwirq; irq_hw_number_t serr_hwirq; + char *err_irq_name; + char *psl_irq_name; unsigned int serr_virq; void __iomem *p1n_mmio; void __iomem *p2n_mmio; @@ -379,6 +381,12 @@ struct cxl_afu { bool enabled; }; + +struct cxl_irq_name { + struct list_head list; + char *name; +}; + /* * This is a cxl context. If the PSL is in dedicated mode, there will be one * of these per AFU. If in AFU directed there can be lots of these. @@ -403,6 +411,7 @@ struct cxl_context { unsigned long *irq_bitmap; /* Accessed from IRQ context */ struct cxl_irq_ranges irqs; + struct list_head irq_names; u64 fault_addr; u64 fault_dsisr; u64 afu_err; @@ -444,6 +453,7 @@ struct cxl { struct dentry *trace; struct dentry *psl_err_chk; struct dentry *debugfs; + char *irq_name; struct bin_attribute cxl_attr; int adapter_num; int user_irqs; @@ -563,9 +573,6 @@ int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode); int cxl_afu_deactivate_mode(struct cxl_afu *afu); int cxl_afu_select_best_mode(struct cxl_afu *afu); -unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, - irq_handler_t handler, void *cookie); -void cxl_unmap_irq(unsigned int virq, void *cookie); int cxl_register_psl_irq(struct cxl_afu *afu); void cxl_release_psl_irq(struct cxl_afu *afu); int cxl_register_psl_err_irq(struct cxl *adapter); @@ -612,7 +619,7 @@ int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr); int cxl_detach_process(struct cxl_context *ctx); -int cxl_get_irq(struct cxl_context *ctx, struct cxl_irq_info *info); +int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info); int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); int cxl_check_error(struct cxl_afu *afu); diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c index c99e896604ee..f8684bca2d79 100644 --- a/drivers/misc/cxl/fault.c +++ b/drivers/misc/cxl/fault.c @@ -133,7 +133,7 @@ static void cxl_handle_page_fault(struct cxl_context *ctx, { unsigned flt = 0; int result; - unsigned long access, flags; + unsigned long access, flags, inv_flags = 0; if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { pr_devel("copro_handle_mm_fault failed: %#x\n", result); @@ -149,8 +149,12 @@ static void cxl_handle_page_fault(struct cxl_context *ctx, access |= _PAGE_RW; if ((!ctx->kernel) || ~(dar & (1ULL << 63))) access |= _PAGE_USER; + + if (dsisr & DSISR_NOHPTE) + inv_flags |= HPTE_NOHPTE_UPDATE; + local_irq_save(flags); - hash_page_mm(mm, dar, access, 0x300); + hash_page_mm(mm, dar, access, 0x300, inv_flags); local_irq_restore(flags); pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c index 336020c8e1af..c294925f73ee 100644 --- a/drivers/misc/cxl/irq.c +++ b/drivers/misc/cxl/irq.c @@ -92,20 +92,13 @@ static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 da return IRQ_HANDLED; } -static irqreturn_t cxl_irq(int irq, void *data) +static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info) { struct cxl_context *ctx = data; - struct cxl_irq_info irq_info; u64 dsisr, dar; - int result; - - if ((result = cxl_get_irq(ctx, &irq_info))) { - WARN(1, "Unable to get CXL IRQ Info: %i\n", result); - return IRQ_HANDLED; - } - dsisr = irq_info.dsisr; - dar = irq_info.dar; + dsisr = irq_info->dsisr; + dar = irq_info->dar; pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); @@ -149,9 +142,9 @@ static irqreturn_t cxl_irq(int irq, void *data) if (dsisr & CXL_PSL_DSISR_An_UR) pr_devel("CXL interrupt: AURP PTE not found\n"); if (dsisr & CXL_PSL_DSISR_An_PE) - return handle_psl_slice_error(ctx, dsisr, irq_info.errstat); + return handle_psl_slice_error(ctx, dsisr, irq_info->errstat); if (dsisr & CXL_PSL_DSISR_An_AE) { - pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info.afu_err); + pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info->afu_err); if (ctx->pending_afu_err) { /* @@ -163,10 +156,10 @@ static irqreturn_t cxl_irq(int irq, void *data) */ dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error " "undelivered to pe %i: %.llx\n", - ctx->pe, irq_info.afu_err); + ctx->pe, irq_info->afu_err); } else { spin_lock(&ctx->lock); - ctx->afu_err = irq_info.afu_err; + ctx->afu_err = irq_info->afu_err; ctx->pending_afu_err = 1; spin_unlock(&ctx->lock); @@ -182,24 +175,43 @@ static irqreturn_t cxl_irq(int irq, void *data) return IRQ_HANDLED; } +static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info) +{ + if (irq_info->dsisr & CXL_PSL_DSISR_TRANS) + cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); + else + cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); + + return IRQ_HANDLED; +} + static irqreturn_t cxl_irq_multiplexed(int irq, void *data) { struct cxl_afu *afu = data; struct cxl_context *ctx; + struct cxl_irq_info irq_info; int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff; int ret; + if ((ret = cxl_get_irq(afu, &irq_info))) { + WARN(1, "Unable to get CXL IRQ Info: %i\n", ret); + return fail_psl_irq(afu, &irq_info); + } + rcu_read_lock(); ctx = idr_find(&afu->contexts_idr, ph); if (ctx) { - ret = cxl_irq(irq, ctx); + ret = cxl_irq(irq, ctx, &irq_info); rcu_read_unlock(); return ret; } rcu_read_unlock(); - WARN(1, "Unable to demultiplex CXL PSL IRQ\n"); - return IRQ_HANDLED; + WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %.16llx DAR" + " %.16llx\n(Possible AFU HW issue - was a term/remove acked" + " with outstanding transactions?)\n", ph, irq_info.dsisr, + irq_info.dar); + return fail_psl_irq(afu, &irq_info); } static irqreturn_t cxl_irq_afu(int irq, void *data) @@ -243,7 +255,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data) } unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, - irq_handler_t handler, void *cookie) + irq_handler_t handler, void *cookie, const char *name) { unsigned int virq; int result; @@ -259,7 +271,7 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq); - result = request_irq(virq, handler, 0, "cxl", cookie); + result = request_irq(virq, handler, 0, name, cookie); if (result) { dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result); return 0; @@ -278,14 +290,15 @@ static int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler, void *cookie, irq_hw_number_t *dest_hwirq, - unsigned int *dest_virq) + unsigned int *dest_virq, + const char *name) { int hwirq, virq; if ((hwirq = cxl_alloc_one_irq(adapter)) < 0) return hwirq; - if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie))) + if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name))) goto err; *dest_hwirq = hwirq; @@ -302,10 +315,19 @@ int cxl_register_psl_err_irq(struct cxl *adapter) { int rc; + adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", + dev_name(&adapter->dev)); + if (!adapter->irq_name) + return -ENOMEM; + if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter, &adapter->err_hwirq, - &adapter->err_virq))) + &adapter->err_virq, + adapter->irq_name))) { + kfree(adapter->irq_name); + adapter->irq_name = NULL; return rc; + } cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff); @@ -317,6 +339,7 @@ void cxl_release_psl_err_irq(struct cxl *adapter) cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); cxl_unmap_irq(adapter->err_virq, adapter); cxl_release_one_irq(adapter, adapter->err_hwirq); + kfree(adapter->irq_name); } int cxl_register_serr_irq(struct cxl_afu *afu) @@ -324,10 +347,18 @@ int cxl_register_serr_irq(struct cxl_afu *afu) u64 serr; int rc; + afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", + dev_name(&afu->dev)); + if (!afu->err_irq_name) + return -ENOMEM; + if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu, &afu->serr_hwirq, - &afu->serr_virq))) + &afu->serr_virq, afu->err_irq_name))) { + kfree(afu->err_irq_name); + afu->err_irq_name = NULL; return rc; + } serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); @@ -341,24 +372,50 @@ void cxl_release_serr_irq(struct cxl_afu *afu) cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); cxl_unmap_irq(afu->serr_virq, afu); cxl_release_one_irq(afu->adapter, afu->serr_hwirq); + kfree(afu->err_irq_name); } int cxl_register_psl_irq(struct cxl_afu *afu) { - return cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu, - &afu->psl_hwirq, &afu->psl_virq); + int rc; + + afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s", + dev_name(&afu->dev)); + if (!afu->psl_irq_name) + return -ENOMEM; + + if ((rc = cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu, + &afu->psl_hwirq, &afu->psl_virq, + afu->psl_irq_name))) { + kfree(afu->psl_irq_name); + afu->psl_irq_name = NULL; + } + return rc; } void cxl_release_psl_irq(struct cxl_afu *afu) { cxl_unmap_irq(afu->psl_virq, afu); cxl_release_one_irq(afu->adapter, afu->psl_hwirq); + kfree(afu->psl_irq_name); +} + +void afu_irq_name_free(struct cxl_context *ctx) +{ + struct cxl_irq_name *irq_name, *tmp; + + list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) { + kfree(irq_name->name); + list_del(&irq_name->list); + kfree(irq_name); + } } int afu_register_irqs(struct cxl_context *ctx, u32 count) { irq_hw_number_t hwirq; - int rc, r, i; + int rc, r, i, j = 1; + struct cxl_irq_name *irq_name; if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count))) return rc; @@ -372,15 +429,47 @@ int afu_register_irqs(struct cxl_context *ctx, u32 count) sizeof(*ctx->irq_bitmap), GFP_KERNEL); if (!ctx->irq_bitmap) return -ENOMEM; + + /* + * Allocate names first. If any fail, bail out before allocating + * actual hardware IRQs. + */ + INIT_LIST_HEAD(&ctx->irq_names); + for (r = 1; r < CXL_IRQ_RANGES; r++) { + for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { + irq_name = kmalloc(sizeof(struct cxl_irq_name), + GFP_KERNEL); + if (!irq_name) + goto out; + irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i", + dev_name(&ctx->afu->dev), + ctx->pe, j); + if (!irq_name->name) { + kfree(irq_name); + goto out; + } + /* Add to tail so next look get the correct order */ + list_add_tail(&irq_name->list, &ctx->irq_names); + j++; + } + } + + /* We've allocated all memory now, so let's do the irq allocations */ + irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); for (r = 1; r < CXL_IRQ_RANGES; r++) { hwirq = ctx->irqs.offset[r]; for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { cxl_map_irq(ctx->afu->adapter, hwirq, - cxl_irq_afu, ctx); + cxl_irq_afu, ctx, irq_name->name); + irq_name = list_next_entry(irq_name, list); } } return 0; + +out: + afu_irq_name_free(ctx); + return -ENOMEM; } void afu_release_irqs(struct cxl_context *ctx) @@ -398,5 +487,6 @@ void afu_release_irqs(struct cxl_context *ctx) } } + afu_irq_name_free(ctx); cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); } diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index d47532e8f4f1..9a5a442269a8 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -637,18 +637,18 @@ int cxl_detach_process(struct cxl_context *ctx) return detach_process_native_afu_directed(ctx); } -int cxl_get_irq(struct cxl_context *ctx, struct cxl_irq_info *info) +int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info) { u64 pidtid; - info->dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); - info->dar = cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An); - info->dsr = cxl_p2n_read(ctx->afu, CXL_PSL_DSR_An); - pidtid = cxl_p2n_read(ctx->afu, CXL_PSL_PID_TID_An); + info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); + info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); + info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An); + pidtid = cxl_p2n_read(afu, CXL_PSL_PID_TID_An); info->pid = pidtid >> 32; info->tid = pidtid & 0xffffffff; - info->afu_err = cxl_p2n_read(ctx->afu, CXL_AFU_ERR_An); - info->errstat = cxl_p2n_read(ctx->afu, CXL_PSL_ErrStat_An); + info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An); + info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); return 0; } diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 4511ddc1ac31..f15cddfeb897 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -987,6 +987,17 @@ config RTC_DRV_NUC900 If you say yes here you get support for the RTC subsystem of the NUC910/NUC920 used in embedded systems. +config RTC_DRV_OPAL + tristate "IBM OPAL RTC driver" + depends on PPC_POWERNV + default y + help + If you say yes here you get support for the PowerNV platform RTC + driver based on OPAL interfaces. + + This driver can also be built as a module. If so, the module + will be called rtc-opal. + comment "on-CPU RTC drivers" config RTC_DRV_DAVINCI diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index b188323c096a..c8ef3e1e6ccd 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -92,6 +92,7 @@ obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o +obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o obj-$(CONFIG_RTC_DRV_PALMAS) += rtc-palmas.o obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c new file mode 100644 index 000000000000..95f652165fe9 --- /dev/null +++ b/drivers/rtc/rtc-opal.c @@ -0,0 +1,261 @@ +/* + * IBM OPAL RTC driver + * Copyright (C) 2014 IBM + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + */ + +#define DRVNAME "rtc-opal" +#define pr_fmt(fmt) DRVNAME ": " fmt + +#include <linux/module.h> +#include <linux/err.h> +#include <linux/rtc.h> +#include <linux/delay.h> +#include <linux/bcd.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <asm/opal.h> +#include <asm/firmware.h> + +static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm) +{ + tm->tm_year = ((bcd2bin(y_m_d >> 24) * 100) + + bcd2bin((y_m_d >> 16) & 0xff)) - 1900; + tm->tm_mon = bcd2bin((y_m_d >> 8) & 0xff) - 1; + tm->tm_mday = bcd2bin(y_m_d & 0xff); + tm->tm_hour = bcd2bin((h_m_s_ms >> 56) & 0xff); + tm->tm_min = bcd2bin((h_m_s_ms >> 48) & 0xff); + tm->tm_sec = bcd2bin((h_m_s_ms >> 40) & 0xff); + + GregorianDay(tm); +} + +static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms) +{ + *y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) / 100)) << 24; + *y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) % 100)) << 16; + *y_m_d |= ((u32)bin2bcd((tm->tm_mon + 1))) << 8; + *y_m_d |= ((u32)bin2bcd(tm->tm_mday)); + + *h_m_s_ms |= ((u64)bin2bcd(tm->tm_hour)) << 56; + *h_m_s_ms |= ((u64)bin2bcd(tm->tm_min)) << 48; + *h_m_s_ms |= ((u64)bin2bcd(tm->tm_sec)) << 40; +} + +static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) +{ + long rc = OPAL_BUSY; + u32 y_m_d; + u64 h_m_s_ms; + __be32 __y_m_d; + __be64 __h_m_s_ms; + + while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { + rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); + if (rc == OPAL_BUSY_EVENT) + opal_poll_events(NULL); + else + msleep(10); + } + + if (rc != OPAL_SUCCESS) + return -EIO; + + y_m_d = be32_to_cpu(__y_m_d); + h_m_s_ms = be64_to_cpu(__h_m_s_ms); + opal_to_tm(y_m_d, h_m_s_ms, tm); + + return 0; +} + +static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) +{ + long rc = OPAL_BUSY; + u32 y_m_d = 0; + u64 h_m_s_ms = 0; + + tm_to_opal(tm, &y_m_d, &h_m_s_ms); + while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { + rc = opal_rtc_write(y_m_d, h_m_s_ms); + if (rc == OPAL_BUSY_EVENT) + opal_poll_events(NULL); + else + msleep(10); + } + + return rc == OPAL_SUCCESS ? 0 : -EIO; +} + +/* + * TPO Timed Power-On + * + * TPO get/set OPAL calls care about the hour and min and to make it consistent + * with the rtc utility time conversion functions, we use the 'u64' to store + * its value and perform bit shift by 32 before use.. + */ +static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm) +{ + __be32 __y_m_d, __h_m; + struct opal_msg msg; + int rc, token; + u64 h_m_s_ms; + u32 y_m_d; + + token = opal_async_get_token_interruptible(); + if (token < 0) { + if (token != -ERESTARTSYS) + pr_err("Failed to get the async token\n"); + + return token; + } + + rc = opal_tpo_read(token, &__y_m_d, &__h_m); + if (rc != OPAL_ASYNC_COMPLETION) { + rc = -EIO; + goto exit; + } + + rc = opal_async_wait_response(token, &msg); + if (rc) { + rc = -EIO; + goto exit; + } + + rc = be64_to_cpu(msg.params[1]); + if (rc != OPAL_SUCCESS) { + rc = -EIO; + goto exit; + } + + y_m_d = be32_to_cpu(__y_m_d); + h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32); + opal_to_tm(y_m_d, h_m_s_ms, &alarm->time); + +exit: + opal_async_release_token(token); + return rc; +} + +/* Set Timed Power-On */ +static int opal_set_tpo_time(struct device *dev, struct rtc_wkalrm *alarm) +{ + u64 h_m_s_ms = 0, token; + struct opal_msg msg; + u32 y_m_d = 0; + int rc; + + tm_to_opal(&alarm->time, &y_m_d, &h_m_s_ms); + + token = opal_async_get_token_interruptible(); + if (token < 0) { + if (token != -ERESTARTSYS) + pr_err("Failed to get the async token\n"); + + return token; + } + + /* TPO, we care about hour and minute */ + rc = opal_tpo_write(token, y_m_d, + (u32)((h_m_s_ms >> 32) & 0xffff0000)); + if (rc != OPAL_ASYNC_COMPLETION) { + rc = -EIO; + goto exit; + } + + rc = opal_async_wait_response(token, &msg); + if (rc) { + rc = -EIO; + goto exit; + } + + rc = be64_to_cpu(msg.params[1]); + if (rc != OPAL_SUCCESS) + rc = -EIO; + +exit: + opal_async_release_token(token); + return rc; +} + +static const struct rtc_class_ops opal_rtc_ops = { + .read_time = opal_get_rtc_time, + .set_time = opal_set_rtc_time, + .read_alarm = opal_get_tpo_time, + .set_alarm = opal_set_tpo_time, +}; + +static int opal_rtc_probe(struct platform_device *pdev) +{ + struct rtc_device *rtc; + + if (pdev->dev.of_node && of_get_property(pdev->dev.of_node, "has-tpo", + NULL)) + device_set_wakeup_capable(&pdev->dev, true); + + rtc = devm_rtc_device_register(&pdev->dev, DRVNAME, &opal_rtc_ops, + THIS_MODULE); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + rtc->uie_unsupported = 1; + + return 0; +} + +static const struct of_device_id opal_rtc_match[] = { + { + .compatible = "ibm,opal-rtc", + }, + { } +}; +MODULE_DEVICE_TABLE(of, opal_rtc_match); + +static const struct platform_device_id opal_rtc_driver_ids[] = { + { + .name = "opal-rtc", + }, + { } +}; +MODULE_DEVICE_TABLE(platform, opal_rtc_driver_ids); + +static struct platform_driver opal_rtc_driver = { + .probe = opal_rtc_probe, + .id_table = opal_rtc_driver_ids, + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + .of_match_table = opal_rtc_match, + }, +}; + +static int __init opal_rtc_init(void) +{ + if (!firmware_has_feature(FW_FEATURE_OPAL)) + return -ENODEV; + + return platform_driver_register(&opal_rtc_driver); +} + +static void __exit opal_rtc_exit(void) +{ + platform_driver_unregister(&opal_rtc_driver); +} + +MODULE_AUTHOR("Neelesh Gupta <neelegup@linux.vnet.ibm.com>"); +MODULE_DESCRIPTION("IBM OPAL RTC driver"); +MODULE_LICENSE("GPL"); + +module_init(opal_rtc_init); +module_exit(opal_rtc_exit); |