From 0aed55af88345b5d673240f90e671d79662fb01e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 May 2017 12:22:50 -0700 Subject: x86, uaccess: introduce copy_from_iter_flushcache for pmem / cache-bypass operations The pmem driver has a need to transfer data with a persistent memory destination and be able to rely on the fact that the destination writes are not cached. It is sufficient for the writes to be flushed to a cpu-store-buffer (non-temporal / "movnt" in x86 terms), as we expect userspace to call fsync() to ensure data-writes have reached a power-fail-safe zone in the platform. The fsync() triggers a REQ_FUA or REQ_FLUSH to the pmem driver which will turn around and fence previous writes with an "sfence". Implement a __copy_from_user_inatomic_flushcache, memcpy_page_flushcache, and memcpy_flushcache, that guarantee that the destination buffer is not dirty in the cpu cache on completion. The new copy_from_iter_flushcache and sub-routines will be used to replace the "pmem api" (include/linux/pmem.h + arch/x86/include/asm/pmem.h). The availability of copy_from_iter_flushcache() and memcpy_flushcache() are gated by the CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE config symbol, and fallback to copy_from_iter_nocache() and plain memcpy() otherwise. This is meant to satisfy the concern from Linus that if a driver wants to do something beyond the normal nocache semantics it should be something private to that driver [1], and Al's concern that anything uaccess related belongs with the rest of the uaccess code [2]. The first consumer of this interface is a new 'copy_from_iter' dax operation so that pmem can inject cache maintenance operations without imposing this overhead on other dax-capable drivers. [1]: https://lists.01.org/pipermail/linux-nvdimm/2017-January/008364.html [2]: https://lists.01.org/pipermail/linux-nvdimm/2017-April/009942.html Cc: Cc: Jan Kara Cc: Jeff Moyer Cc: Ingo Molnar Cc: Christoph Hellwig Cc: Toshi Kani Cc: "H. Peter Anvin" Cc: Al Viro Cc: Thomas Gleixner Cc: Matthew Wilcox Reviewed-by: Ross Zwisler Signed-off-by: Dan Williams --- drivers/nvdimm/claim.c | 2 +- drivers/nvdimm/pmem.c | 13 +++++++++++-- drivers/nvdimm/region_devs.c | 4 ++-- 3 files changed, 14 insertions(+), 5 deletions(-) (limited to 'drivers/nvdimm') diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index 7ceb5fa4f2a1..b8b9c8ca7862 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -277,7 +277,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, rc = -EIO; } - memcpy_to_pmem(nsio->addr + offset, buf, size); + memcpy_flushcache(nsio->addr + offset, buf, size); nvdimm_flush(to_nd_region(ndns->dev.parent)); return rc; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index c544d466ea51..2f3aefe565c6 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include "pmem.h" @@ -80,7 +81,7 @@ static void write_pmem(void *pmem_addr, struct page *page, { void *mem = kmap_atomic(page); - memcpy_to_pmem(pmem_addr, mem + off, len); + memcpy_flushcache(pmem_addr, mem + off, len); kunmap_atomic(mem); } @@ -235,8 +236,15 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev, return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn); } +static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, + void *addr, size_t bytes, struct iov_iter *i) +{ + return copy_from_iter_flushcache(addr, bytes, i); +} + static const struct dax_operations pmem_dax_ops = { .direct_access = pmem_dax_direct_access, + .copy_from_iter = pmem_copy_from_iter, }; static void pmem_release_queue(void *q) @@ -294,7 +302,8 @@ static int pmem_attach_disk(struct device *dev, dev_set_drvdata(dev, pmem); pmem->phys_addr = res->start; pmem->size = resource_size(res); - if (nvdimm_has_flush(nd_region) < 0) + if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) + || nvdimm_has_flush(nd_region) < 0) dev_warn(dev, "unable to guarantee persistence of writes\n"); if (!devm_request_mem_region(dev, res->start, resource_size(res), diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index b550edf2571f..985b0e11bd73 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -1015,8 +1015,8 @@ void nvdimm_flush(struct nd_region *nd_region) * The first wmb() is needed to 'sfence' all previous writes * such that they are architecturally visible for the platform * buffer flush. Note that we've already arranged for pmem - * writes to avoid the cache via arch_memcpy_to_pmem(). The - * final wmb() ensures ordering for the NVDIMM flush write. + * writes to avoid the cache via memcpy_flushcache(). The final + * wmb() ensures ordering for the NVDIMM flush write. */ wmb(); for (i = 0; i < nd_region->ndr_mappings; i++) -- cgit v1.2.3 From 3c1cebff23cdca01c421411e953a9e239f2b9ef9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 May 2017 12:58:19 -0700 Subject: dax, pmem: introduce an optional 'flush' dax_operation Filesystem-DAX flushes caches whenever it writes to the address returned through dax_direct_access() and when writing back dirty radix entries. That flushing is only required in the pmem case, so add a dax operation to allow pmem to take this extra action, but skip it for other dax capable devices that do not provide a flush routine. An example for this differentiation might be a volatile ram disk where there is no expectation of persistence. In fact the pmem driver itself might front such an address range specified by the NFIT. So, this "no flush" property might be something passed down by the bus / libnvdimm. Cc: Christoph Hellwig Cc: Matthew Wilcox Cc: Ross Zwisler Reviewed-by: Jan Kara Signed-off-by: Dan Williams --- drivers/nvdimm/pmem.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/nvdimm') diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 2f3aefe565c6..823b07774244 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -242,9 +242,16 @@ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, return copy_from_iter_flushcache(addr, bytes, i); } +static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, + void *addr, size_t size) +{ + wb_cache_pmem(addr, size); +} + static const struct dax_operations pmem_dax_ops = { .direct_access = pmem_dax_direct_access, .copy_from_iter = pmem_copy_from_iter, + .flush = pmem_dax_flush, }; static void pmem_release_queue(void *q) -- cgit v1.2.3 From 4e4f00a9b51a1c52ebdd728a1caeb3b9fe48c39d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 May 2017 22:40:44 -0700 Subject: x86, dax, libnvdimm: remove wb_cache_pmem() indirection With all handling of the CONFIG_ARCH_HAS_PMEM_API case being moved to libnvdimm and the pmem driver directly we do not need to provide global wrappers and fallbacks in the CONFIG_ARCH_HAS_PMEM_API=n case. The pmem driver will simply not link to arch_wb_cache_pmem() in that case. Same as before, pmem flushing is only defined for x86_64, via clean_cache_range(), but it is straightforward to add other archs in the future. arch_wb_cache_pmem() is an exported function since the pmem module needs to find it, but it is privately declared in drivers/nvdimm/pmem.h because there are no consumers outside of the pmem driver. Cc: Cc: Jan Kara Cc: Jeff Moyer Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Thomas Gleixner Cc: Oliver O'Halloran Cc: Matthew Wilcox Cc: Ross Zwisler Suggested-by: Christoph Hellwig Signed-off-by: Dan Williams --- drivers/nvdimm/pmem.c | 2 +- drivers/nvdimm/pmem.h | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/nvdimm') diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 823b07774244..3b87702d46bb 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -245,7 +245,7 @@ static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t size) { - wb_cache_pmem(addr, size); + arch_wb_cache_pmem(addr, size); } static const struct dax_operations pmem_dax_ops = { diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h index 7f4dbd72a90a..c4b3371c7f88 100644 --- a/drivers/nvdimm/pmem.h +++ b/drivers/nvdimm/pmem.h @@ -5,6 +5,14 @@ #include #include +#ifdef CONFIG_ARCH_HAS_PMEM_API +void arch_wb_cache_pmem(void *addr, size_t size); +#else +static inline void arch_wb_cache_pmem(void *addr, size_t size) +{ +} +#endif + /* this definition is in it's own header for tools/testing/nvdimm to consume */ struct pmem_device { /* One contiguous memory region per device */ -- cgit v1.2.3 From f2b612578e163b49661ece2fe01dfafb0e78f545 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 May 2017 23:00:34 -0700 Subject: x86, libnvdimm, pmem: move arch_invalidate_pmem() to libnvdimm Kill this globally defined wrapper and move to libnvdimm so that we can ultimately remove include/linux/pmem.h and asm/pmem.h. Cc: Cc: Jeff Moyer Cc: Ingo Molnar Cc: Christoph Hellwig Cc: "H. Peter Anvin" Cc: Thomas Gleixner Cc: Matthew Wilcox Cc: Ross Zwisler Reviewed-by: Jan Kara Signed-off-by: Dan Williams --- drivers/nvdimm/claim.c | 3 ++- drivers/nvdimm/pmem.c | 2 +- drivers/nvdimm/pmem.h | 4 ++++ 3 files changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/nvdimm') diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index b8b9c8ca7862..d2e16c0401df 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -14,6 +14,7 @@ #include #include #include "nd-core.h" +#include "pmem.h" #include "pfn.h" #include "btt.h" #include "nd.h" @@ -272,7 +273,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, cleared /= 512; badblocks_clear(&nsio->bb, sector, cleared); } - invalidate_pmem(nsio->addr + offset, size); + arch_invalidate_pmem(nsio->addr + offset, size); } else rc = -EIO; } diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 3b87702d46bb..68737bc68a07 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -71,7 +71,7 @@ static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, badblocks_clear(&pmem->bb, sector, cleared); } - invalidate_pmem(pmem->virt_addr + offset, len); + arch_invalidate_pmem(pmem->virt_addr + offset, len); return rc; } diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h index c4b3371c7f88..00005900c1b7 100644 --- a/drivers/nvdimm/pmem.h +++ b/drivers/nvdimm/pmem.h @@ -7,10 +7,14 @@ #ifdef CONFIG_ARCH_HAS_PMEM_API void arch_wb_cache_pmem(void *addr, size_t size); +void arch_invalidate_pmem(void *addr, size_t size); #else static inline void arch_wb_cache_pmem(void *addr, size_t size) { } +static inline void arch_invalidate_pmem(void *addr, size_t size) +{ +} #endif /* this definition is in it's own header for tools/testing/nvdimm to consume */ -- cgit v1.2.3 From ca6a4657e5420dec727256717e905ebc3c751352 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 13 Jan 2017 20:36:58 -0800 Subject: x86, libnvdimm, pmem: remove global pmem api Now that all callers of the pmem api have been converted to dax helpers that call back to the pmem driver, we can remove include/linux/pmem.h and asm/pmem.h. Cc: Cc: Jeff Moyer Cc: Ingo Molnar Cc: Christoph Hellwig Cc: Toshi Kani Cc: Oliver O'Halloran Cc: Ross Zwisler Reviewed-by: Jan Kara Signed-off-by: Dan Williams --- drivers/nvdimm/claim.c | 1 - drivers/nvdimm/dimm_devs.c | 8 ++++++++ drivers/nvdimm/namespace_devs.c | 6 +----- drivers/nvdimm/pmem.c | 1 - drivers/nvdimm/pmem.h | 2 ++ drivers/nvdimm/region_devs.c | 1 - 6 files changed, 11 insertions(+), 8 deletions(-) (limited to 'drivers/nvdimm') diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index d2e16c0401df..3beedf173902 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -12,7 +12,6 @@ */ #include #include -#include #include "nd-core.h" #include "pmem.h" #include "pfn.h" diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 9852a3355509..6a1e7a3c0c17 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -20,6 +20,7 @@ #include #include "nd-core.h" #include "label.h" +#include "pmem.h" #include "nd.h" static DEFINE_IDA(dimm_ida); @@ -235,6 +236,13 @@ struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr) } EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm); +unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr) +{ + /* pmem mapping properties are private to libnvdimm */ + return ARCH_MEMREMAP_PMEM; +} +EXPORT_SYMBOL_GPL(nd_blk_memremap_flags); + struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) { struct nvdimm *nvdimm = nd_mapping->nvdimm; diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 2f9dfbd2dbec..4e9261ef8a95 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -14,10 +14,10 @@ #include #include #include -#include #include #include #include "nd-core.h" +#include "pmem.h" #include "nd.h" static void namespace_io_release(struct device *dev) @@ -155,11 +155,7 @@ bool pmem_should_map_pages(struct device *dev) IORES_DESC_NONE) == REGION_MIXED) return false; -#ifdef ARCH_MEMREMAP_PMEM return ARCH_MEMREMAP_PMEM == MEMREMAP_WB; -#else - return false; -#endif } EXPORT_SYMBOL(pmem_should_map_pages); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 68737bc68a07..06f6c27ec1e9 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h index 00005900c1b7..fce248a1fc87 100644 --- a/drivers/nvdimm/pmem.h +++ b/drivers/nvdimm/pmem.h @@ -6,9 +6,11 @@ #include #ifdef CONFIG_ARCH_HAS_PMEM_API +#define ARCH_MEMREMAP_PMEM MEMREMAP_WB void arch_wb_cache_pmem(void *addr, size_t size); void arch_invalidate_pmem(void *addr, size_t size); #else +#define ARCH_MEMREMAP_PMEM MEMREMAP_WT static inline void arch_wb_cache_pmem(void *addr, size_t size) { } diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 985b0e11bd73..3c06a6ea6958 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From c00b396ef782cb2296200d868a0013d8ca1d615e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 May 2017 23:11:57 -0700 Subject: libnvdimm, pmem: fix persistence warning The pmem driver assumes if platform firmware describes the memory devices associated with a persistent memory range and CONFIG_ARCH_HAS_PMEM_API=y that it has all the mechanism necessary to flush data to a power-fail safe zone. We warn if the firmware does not describe memory devices, but we also need to warn if the architecture does not claim pmem support. Cc: Jeff Moyer Cc: Christoph Hellwig Cc: Matthew Wilcox Cc: Ross Zwisler Reviewed-by: Jan Kara Signed-off-by: Dan Williams --- drivers/nvdimm/region_devs.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/nvdimm') diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 3c06a6ea6958..41b4cdf5dea8 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -1037,8 +1037,9 @@ int nvdimm_has_flush(struct nd_region *nd_region) { int i; - /* no nvdimm == flushing capability unknown */ - if (nd_region->ndr_mappings == 0) + /* no nvdimm or pmem api == flushing capability unknown */ + if (nd_region->ndr_mappings == 0 + || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) return -ENXIO; for (i = 0; i < nd_region->ndr_mappings; i++) { -- cgit v1.2.3 From c9e582aa689f5418ca30e1e7a975039772c3a757 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 29 May 2017 23:12:19 -0700 Subject: libnvdimm, nfit: enable support for volatile ranges Allow volatile nfit ranges to participate in all the same infrastructure provided for persistent memory regions. A resulting resulting namespace device will still be called "pmem", but the parent region type will be "nd_volatile". This is in preparation for disabling the dax ->flush() operation in the pmem driver when it is hosted on a volatile range. Cc: Jan Kara Cc: Jeff Moyer Cc: Christoph Hellwig Cc: Matthew Wilcox Cc: Ross Zwisler Signed-off-by: Dan Williams --- drivers/nvdimm/bus.c | 8 ++++---- drivers/nvdimm/core.c | 2 +- drivers/nvdimm/dax_devs.c | 2 +- drivers/nvdimm/dimm_devs.c | 2 +- drivers/nvdimm/namespace_devs.c | 8 ++++---- drivers/nvdimm/nd-core.h | 9 +++++++++ drivers/nvdimm/pfn_devs.c | 4 ++-- drivers/nvdimm/region_devs.c | 27 ++++++++++++++------------- 8 files changed, 36 insertions(+), 26 deletions(-) (limited to 'drivers/nvdimm') diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index e9361bffe5ee..4cfba534814b 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -38,13 +38,13 @@ static int to_nd_device_type(struct device *dev) { if (is_nvdimm(dev)) return ND_DEVICE_DIMM; - else if (is_nd_pmem(dev)) + else if (is_memory(dev)) return ND_DEVICE_REGION_PMEM; else if (is_nd_blk(dev)) return ND_DEVICE_REGION_BLK; else if (is_nd_dax(dev)) return ND_DEVICE_DAX_PMEM; - else if (is_nd_pmem(dev->parent) || is_nd_blk(dev->parent)) + else if (is_nd_region(dev->parent)) return nd_region_to_nstype(to_nd_region(dev->parent)); return 0; @@ -56,7 +56,7 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env) * Ensure that region devices always have their numa node set as * early as possible. */ - if (is_nd_pmem(dev) || is_nd_blk(dev)) + if (is_nd_region(dev)) set_dev_node(dev, to_nd_region(dev)->numa_node); return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT, to_nd_device_type(dev)); @@ -65,7 +65,7 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env) static struct module *to_bus_provider(struct device *dev) { /* pin bus providers while regions are enabled */ - if (is_nd_pmem(dev) || is_nd_blk(dev)) { + if (is_nd_region(dev)) { struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); return nvdimm_bus->nd_desc->module; diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 2dee908e4bae..22e3ef463401 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -504,7 +504,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region, struct nvdimm_bus *nvdimm_bus; struct list_head *poison_list; - if (!is_nd_pmem(&nd_region->dev)) { + if (!is_memory(&nd_region->dev)) { dev_WARN_ONCE(&nd_region->dev, 1, "%s only valid for pmem regions\n", __func__); return; diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c index c1b6556aea6e..a304983ac417 100644 --- a/drivers/nvdimm/dax_devs.c +++ b/drivers/nvdimm/dax_devs.c @@ -89,7 +89,7 @@ struct device *nd_dax_create(struct nd_region *nd_region) struct device *dev = NULL; struct nd_dax *nd_dax; - if (!is_nd_pmem(&nd_region->dev)) + if (!is_memory(&nd_region->dev)) return NULL; nd_dax = nd_dax_alloc(nd_region); diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 6a1e7a3c0c17..f0d1b7e5de01 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -419,7 +419,7 @@ int alias_dpa_busy(struct device *dev, void *data) struct resource *res; int i; - if (!is_nd_pmem(dev)) + if (!is_memory(dev)) return 0; nd_region = to_nd_region(dev); diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 4e9261ef8a95..57724da484d0 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -112,7 +112,7 @@ static int is_uuid_busy(struct device *dev, void *data) static int is_namespace_uuid_busy(struct device *dev, void *data) { - if (is_nd_pmem(dev) || is_nd_blk(dev)) + if (is_nd_region(dev)) return device_for_each_child(dev, data, is_uuid_busy); return 0; } @@ -783,7 +783,7 @@ static int __reserve_free_pmem(struct device *dev, void *data) struct nd_label_id label_id; int i; - if (!is_nd_pmem(dev)) + if (!is_memory(dev)) return 0; nd_region = to_nd_region(dev); @@ -1872,7 +1872,7 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region) struct resource *res; struct device *dev; - if (!is_nd_pmem(&nd_region->dev)) + if (!is_memory(&nd_region->dev)) return NULL; nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); @@ -2152,7 +2152,7 @@ static struct device **scan_labels(struct nd_region *nd_region) } dev->parent = &nd_region->dev; devs[count++] = dev; - } else if (is_nd_pmem(&nd_region->dev)) { + } else if (is_memory(&nd_region->dev)) { /* clean unselected labels */ for (i = 0; i < nd_region->ndr_mappings; i++) { struct list_head *l, *e; diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 4c4bd209e725..86bc19ae30da 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -64,7 +64,16 @@ struct blk_alloc_info { bool is_nvdimm(struct device *dev); bool is_nd_pmem(struct device *dev); +bool is_nd_volatile(struct device *dev); bool is_nd_blk(struct device *dev); +static inline bool is_nd_region(struct device *dev) +{ + return is_nd_pmem(dev) || is_nd_blk(dev) || is_nd_volatile(dev); +} +static inline bool is_memory(struct device *dev) +{ + return is_nd_pmem(dev) || is_nd_volatile(dev); +} struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev); int __init nvdimm_bus_init(void); void nvdimm_bus_exit(void); diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index a6c403600d19..5929eb65cee3 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -331,7 +331,7 @@ struct device *nd_pfn_create(struct nd_region *nd_region) struct nd_pfn *nd_pfn; struct device *dev; - if (!is_nd_pmem(&nd_region->dev)) + if (!is_memory(&nd_region->dev)) return NULL; nd_pfn = nd_pfn_alloc(nd_region); @@ -354,7 +354,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) if (!pfn_sb || !ndns) return -ENODEV; - if (!is_nd_pmem(nd_pfn->dev.parent)) + if (!is_memory(nd_pfn->dev.parent)) return -ENODEV; if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0)) diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 41b4cdf5dea8..53a64a16aba4 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -168,6 +168,11 @@ bool is_nd_blk(struct device *dev) return dev ? dev->type == &nd_blk_device_type : false; } +bool is_nd_volatile(struct device *dev) +{ + return dev ? dev->type == &nd_volatile_device_type : false; +} + struct nd_region *to_nd_region(struct device *dev) { struct nd_region *nd_region = container_of(dev, struct nd_region, dev); @@ -214,7 +219,7 @@ EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); */ int nd_region_to_nstype(struct nd_region *nd_region) { - if (is_nd_pmem(&nd_region->dev)) { + if (is_memory(&nd_region->dev)) { u16 i, alias; for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) { @@ -242,7 +247,7 @@ static ssize_t size_show(struct device *dev, struct nd_region *nd_region = to_nd_region(dev); unsigned long long size = 0; - if (is_nd_pmem(dev)) { + if (is_memory(dev)) { size = nd_region->ndr_size; } else if (nd_region->ndr_mappings == 1) { struct nd_mapping *nd_mapping = &nd_region->mapping[0]; @@ -307,7 +312,7 @@ static ssize_t set_cookie_show(struct device *dev, struct nd_region *nd_region = to_nd_region(dev); struct nd_interleave_set *nd_set = nd_region->nd_set; - if (is_nd_pmem(dev) && nd_set) + if (is_memory(dev) && nd_set) /* pass, should be precluded by region_visible */; else return -ENXIO; @@ -334,7 +339,7 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region) if (!ndd) return 0; - if (is_nd_pmem(&nd_region->dev)) { + if (is_memory(&nd_region->dev)) { available += nd_pmem_available_dpa(nd_region, nd_mapping, &overlap); if (overlap > blk_max_overlap) { @@ -520,10 +525,10 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) struct nd_interleave_set *nd_set = nd_region->nd_set; int type = nd_region_to_nstype(nd_region); - if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr) + if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr) return 0; - if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr) + if (!is_memory(dev) && a == &dev_attr_dax_seed.attr) return 0; if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr) @@ -551,7 +556,7 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) || type == ND_DEVICE_NAMESPACE_BLK) && a == &dev_attr_available_size.attr) return a->mode; - else if (is_nd_pmem(dev) && nd_set) + else if (is_memory(dev) && nd_set) return a->mode; return 0; @@ -603,7 +608,7 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, { struct nd_region *nd_region; - if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) { + if (!probe && is_nd_region(dev)) { int i; nd_region = to_nd_region(dev); @@ -621,12 +626,8 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, if (ndd) atomic_dec(&nvdimm->busy); } - - if (is_nd_pmem(dev)) - return; } - if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent)) - && probe) { + if (dev->parent && is_nd_region(dev->parent) && probe) { nd_region = to_nd_region(dev->parent); nvdimm_bus_lock(dev); if (nd_region->ns_seed == dev) -- cgit v1.2.3 From 6e0c90d691cd5d90569f5918ab03eb76c81f9c6e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 26 Jun 2017 21:28:41 -0700 Subject: libnvdimm, pmem, dax: export a cache control attribute The dax_flush() operation can be turned into a nop on platforms where firmware arranges for cpu caches to be flushed on a power-fail event. The ACPI 6.2 specification defines a mechanism for the platform to indicate this capability so the kernel can select the proper default. However, for other platforms, the administrator must toggle this setting manually. Given this flush setting is a dax-specific mechanism we advertise it through a 'dax' attribute group hanging off a host device. For example, a 'pmem0' block-device gets a 'dax' sysfs-subdirectory with a 'write_cache' attribute to control response to dax cache flush requests. This is similar to the 'queue/write_cache' attribute that appears under block devices. Cc: Jan Kara Cc: Jeff Moyer Cc: Matthew Wilcox Cc: Ross Zwisler Suggested-by: Christoph Hellwig Signed-off-by: Dan Williams --- drivers/nvdimm/pmem.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/nvdimm') diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 06f6c27ec1e9..7339d184070e 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -253,6 +253,11 @@ static const struct dax_operations pmem_dax_ops = { .flush = pmem_dax_flush, }; +static const struct attribute_group *pmem_attribute_groups[] = { + &dax_attribute_group, + NULL, +}; + static void pmem_release_queue(void *q) { blk_cleanup_queue(q); @@ -287,6 +292,7 @@ static int pmem_attach_disk(struct device *dev, struct pmem_device *pmem; struct resource pfn_res; struct request_queue *q; + struct device *gendev; struct gendisk *disk; void *addr; @@ -384,8 +390,12 @@ static int pmem_attach_disk(struct device *dev, put_disk(disk); return -ENOMEM; } + dax_write_cache(dax_dev, true); pmem->dax_dev = dax_dev; + gendev = disk_to_dev(disk); + gendev->groups = pmem_attribute_groups; + device_add_disk(dev, disk); if (devm_add_action_or_reset(dev, pmem_release_disk, pmem)) return -ENOMEM; -- cgit v1.2.3 From 0b277961f4484fb3f142caaa1dd1748cb0b2cbee Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 9 Jun 2017 09:46:50 -0700 Subject: libnvdimm, pmem: disable dax flushing when pmem is fronting a volatile region The pmem driver attaches to both persistent and volatile memory ranges advertised by the ACPI NFIT. When the region is volatile it is redundant to spend cycles flushing caches at fsync(). Check if the hosting region is volatile and do not set dax_write_cache() if it is. Cc: Jan Kara Cc: Jeff Moyer Cc: Christoph Hellwig Cc: Matthew Wilcox Cc: Ross Zwisler Signed-off-by: Dan Williams --- drivers/nvdimm/pmem.c | 13 ++++++++----- drivers/nvdimm/region_devs.c | 6 ++++++ 2 files changed, 14 insertions(+), 5 deletions(-) (limited to 'drivers/nvdimm') diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 7339d184070e..e7a40f77f729 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -284,10 +284,10 @@ static int pmem_attach_disk(struct device *dev, struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); struct nd_region *nd_region = to_nd_region(dev->parent); struct vmem_altmap __altmap, *altmap = NULL; + int nid = dev_to_node(dev), fua, wbc; struct resource *res = &nsio->res; struct nd_pfn *nd_pfn = NULL; struct dax_device *dax_dev; - int nid = dev_to_node(dev); struct nd_pfn_sb *pfn_sb; struct pmem_device *pmem; struct resource pfn_res; @@ -314,9 +314,12 @@ static int pmem_attach_disk(struct device *dev, dev_set_drvdata(dev, pmem); pmem->phys_addr = res->start; pmem->size = resource_size(res); - if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) - || nvdimm_has_flush(nd_region) < 0) + fua = nvdimm_has_flush(nd_region); + if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) { dev_warn(dev, "unable to guarantee persistence of writes\n"); + fua = 0; + } + wbc = nvdimm_has_cache(nd_region); if (!devm_request_mem_region(dev, res->start, resource_size(res), dev_name(&ndns->dev))) { @@ -360,7 +363,7 @@ static int pmem_attach_disk(struct device *dev, return PTR_ERR(addr); pmem->virt_addr = addr; - blk_queue_write_cache(q, true, true); + blk_queue_write_cache(q, wbc, fua); blk_queue_make_request(q, pmem_make_request); blk_queue_physical_block_size(q, PAGE_SIZE); blk_queue_max_hw_sectors(q, UINT_MAX); @@ -390,7 +393,7 @@ static int pmem_attach_disk(struct device *dev, put_disk(disk); return -ENOMEM; } - dax_write_cache(dax_dev, true); + dax_write_cache(dax_dev, wbc); pmem->dax_dev = dax_dev; gendev = disk_to_dev(disk); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 53a64a16aba4..0c3b089b280a 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -1060,6 +1060,12 @@ int nvdimm_has_flush(struct nd_region *nd_region) } EXPORT_SYMBOL_GPL(nvdimm_has_flush); +int nvdimm_has_cache(struct nd_region *nd_region) +{ + return is_nd_pmem(&nd_region->dev); +} +EXPORT_SYMBOL_GPL(nvdimm_has_cache); + void __exit nd_region_devs_exit(void) { ida_destroy(®ion_ida); -- cgit v1.2.3