summaryrefslogtreecommitdiff
path: root/tools/testing/cxl/test
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/cxl/test')
-rw-r--r--tools/testing/cxl/test/Kbuild1
-rw-r--r--tools/testing/cxl/test/cxl.c449
-rw-r--r--tools/testing/cxl/test/hmem_test.c47
-rw-r--r--tools/testing/cxl/test/mem.c6
-rw-r--r--tools/testing/cxl/test/mock.c50
-rw-r--r--tools/testing/cxl/test/mock.h8
6 files changed, 549 insertions, 12 deletions
diff --git a/tools/testing/cxl/test/Kbuild b/tools/testing/cxl/test/Kbuild
index af50972c8b6d..c168e3c998a7 100644
--- a/tools/testing/cxl/test/Kbuild
+++ b/tools/testing/cxl/test/Kbuild
@@ -7,6 +7,7 @@ obj-m += cxl_mock_mem.o
obj-m += cxl_translate.o
cxl_test-y := cxl.o
+cxl_test-y += hmem_test.o
cxl_mock-y := mock.o
cxl_mock_mem-y := mem.o
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index 81e2aef3627a..418669927fb0 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -16,6 +16,7 @@
static int interleave_arithmetic;
static bool extended_linear_cache;
+static bool fail_autoassemble;
#define FAKE_QTG_ID 42
@@ -51,6 +52,31 @@ struct platform_device *cxl_mem_single[NR_MEM_SINGLE];
static struct platform_device *cxl_rch[NR_CXL_RCH];
static struct platform_device *cxl_rcd[NR_CXL_RCH];
+/*
+ * Decoder registry
+ *
+ * Record decoder programming so that the topology can be reconstructed
+ * after cxl_acpi unbind/bind. This allows a user-created region config
+ * to be replayed as if firmware had provided the region at enumeration
+ * time.
+ *
+ * Entries are keyed by a stable port identity (port->uport_dev) combined
+ * with the decoder id. Decoder state is saved at initialization and
+ * updated on commit and reset.
+ *
+ * On re-enumeration mock_init_hdm_decoder() consults this registry to
+ * restore enabled decoders. Disabled decoders are reinitialized to a
+ * clean default state rather than replaying stale programming.
+ */
+static DEFINE_XARRAY(decoder_registry);
+
+/*
+ * When set, decoder reset will not update the registry. This allows
+ * region destroy operations to reset live decoders without erasing
+ * the saved programming needed for replay after re-enumeration.
+ */
+static bool decoder_reset_preserve_registry;
+
static inline bool is_multi_bridge(struct device *dev)
{
int i;
@@ -704,6 +730,194 @@ static int map_targets(struct device *dev, void *data)
return 0;
}
+/*
+ * Build a stable registry key from the decoder's upstream port identity
+ * and decoder id.
+ *
+ * Decoder objects and cxl_port objects are reallocated on each enumeration,
+ * so their addresses cannot be used directly as replay keys. However,
+ * port->uport_dev is stable for a given topology across cxl_acpi unbind/bind
+ * in cxl_test, so use that as the port identity and pack the local decoder
+ * id into the low bits.
+ *
+ * The key is formed as:
+ * ((unsigned long)port->uport_dev << 4) | cxld->id
+ *
+ * The low bits hold the decoder id (which must fit in 4 bits) while
+ * the remaining bits identify the upstream port. This key is only used
+ * within cxl_test to locate saved decoder state during replay.
+ */
+static unsigned long cxld_registry_index(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+
+ dev_WARN_ONCE(&port->dev, cxld->id >= 16,
+ "decoder id:%d out of range\n", cxld->id);
+ return (((unsigned long)port->uport_dev) << 4) | cxld->id;
+}
+
+struct cxl_test_decoder {
+ union {
+ struct cxl_switch_decoder cxlsd;
+ struct cxl_endpoint_decoder cxled;
+ };
+ struct range dpa_range;
+};
+
+static struct cxl_test_decoder *cxld_registry_find(struct cxl_decoder *cxld)
+{
+ return xa_load(&decoder_registry, cxld_registry_index(cxld));
+}
+
+#define dbg_cxld(port, msg, cxld) \
+ do { \
+ struct cxl_decoder *___d = (cxld); \
+ dev_dbg((port)->uport_dev, \
+ "decoder%d: %s range: %#llx-%#llx iw: %d ig: %d flags: %#lx\n", \
+ ___d->id, msg, ___d->hpa_range.start, \
+ ___d->hpa_range.end + 1, ___d->interleave_ways, \
+ ___d->interleave_granularity, ___d->flags); \
+ } while (0)
+
+static int mock_decoder_commit(struct cxl_decoder *cxld);
+static void mock_decoder_reset(struct cxl_decoder *cxld);
+static void init_disabled_mock_decoder(struct cxl_decoder *cxld);
+
+static void cxld_copy(struct cxl_decoder *a, struct cxl_decoder *b)
+{
+ a->id = b->id;
+ a->hpa_range = b->hpa_range;
+ a->interleave_ways = b->interleave_ways;
+ a->interleave_granularity = b->interleave_granularity;
+ a->target_type = b->target_type;
+ a->flags = b->flags;
+ a->commit = mock_decoder_commit;
+ a->reset = mock_decoder_reset;
+}
+
+/*
+ * Restore decoder programming saved in the registry.
+ *
+ * Only decoders that were saved enabled are restored. Disabled decoders
+ * are left in their default inactive state so that stale programming is
+ * not resurrected after topology replay.
+ *
+ * For endpoint decoders this also restores the DPA reservation needed
+ * to reconstruct committed mappings.
+ */
+static int cxld_registry_restore(struct cxl_decoder *cxld,
+ struct cxl_test_decoder *td)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ int rc;
+
+ if (is_switch_decoder(&cxld->dev)) {
+ struct cxl_switch_decoder *cxlsd =
+ to_cxl_switch_decoder(&cxld->dev);
+
+ if (!(td->cxlsd.cxld.flags & CXL_DECODER_F_ENABLE))
+ return 0;
+
+ dbg_cxld(port, "restore", &td->cxlsd.cxld);
+ cxld_copy(cxld, &td->cxlsd.cxld);
+ WARN_ON(cxlsd->nr_targets != td->cxlsd.nr_targets);
+
+ /* Restore saved target intent; live dport binding happens later */
+ for (int i = 0; i < cxlsd->nr_targets; i++) {
+ cxlsd->target[i] = NULL;
+ cxld->target_map[i] = td->cxlsd.cxld.target_map[i];
+ }
+
+ port->commit_end = cxld->id;
+
+ } else {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+
+ if (!(td->cxled.cxld.flags & CXL_DECODER_F_ENABLE))
+ return 0;
+
+ dbg_cxld(port, "restore", &td->cxled.cxld);
+ cxld_copy(cxld, &td->cxled.cxld);
+ cxled->state = td->cxled.state;
+ cxled->skip = td->cxled.skip;
+ if (range_len(&td->dpa_range)) {
+ rc = devm_cxl_dpa_reserve(cxled, td->dpa_range.start,
+ range_len(&td->dpa_range),
+ td->cxled.skip);
+ if (rc) {
+ init_disabled_mock_decoder(cxld);
+ return rc;
+ }
+ }
+ port->commit_end = cxld->id;
+ }
+
+ return 0;
+}
+
+static void __cxld_registry_save(struct cxl_test_decoder *td,
+ struct cxl_decoder *cxld)
+{
+ if (is_switch_decoder(&cxld->dev)) {
+ struct cxl_switch_decoder *cxlsd =
+ to_cxl_switch_decoder(&cxld->dev);
+
+ cxld_copy(&td->cxlsd.cxld, cxld);
+ td->cxlsd.nr_targets = cxlsd->nr_targets;
+
+ /* Save target port_id as a stable identify for the dport */
+ for (int i = 0; i < cxlsd->nr_targets; i++) {
+ struct cxl_dport *dport;
+
+ if (!cxlsd->target[i])
+ continue;
+
+ dport = cxlsd->target[i];
+ td->cxlsd.cxld.target_map[i] = dport->port_id;
+ }
+ } else {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+
+ cxld_copy(&td->cxled.cxld, cxld);
+ td->cxled.state = cxled->state;
+ td->cxled.skip = cxled->skip;
+
+ if (!(cxld->flags & CXL_DECODER_F_ENABLE)) {
+ td->dpa_range.start = 0;
+ td->dpa_range.end = -1;
+ } else if (cxled->dpa_res) {
+ td->dpa_range.start = cxled->dpa_res->start;
+ td->dpa_range.end = cxled->dpa_res->end;
+ } else {
+ td->dpa_range.start = 0;
+ td->dpa_range.end = -1;
+ }
+ }
+}
+
+static void cxld_registry_save(struct cxl_test_decoder *td,
+ struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+
+ dbg_cxld(port, "save", cxld);
+ __cxld_registry_save(td, cxld);
+}
+
+static void cxld_registry_update(struct cxl_decoder *cxld)
+{
+ struct cxl_test_decoder *td = cxld_registry_find(cxld);
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+
+ if (WARN_ON_ONCE(!td))
+ return;
+
+ dbg_cxld(port, "update", cxld);
+ __cxld_registry_save(td, cxld);
+}
+
static int mock_decoder_commit(struct cxl_decoder *cxld)
{
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
@@ -723,6 +937,13 @@ static int mock_decoder_commit(struct cxl_decoder *cxld)
port->commit_end++;
cxld->flags |= CXL_DECODER_F_ENABLE;
+ if (is_endpoint_decoder(&cxld->dev)) {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+
+ cxled->state = CXL_DECODER_STATE_AUTO;
+ }
+ cxld_registry_update(cxld);
return 0;
}
@@ -743,6 +964,65 @@ static void mock_decoder_reset(struct cxl_decoder *cxld)
"%s: out of order reset, expected decoder%d.%d\n",
dev_name(&cxld->dev), port->id, port->commit_end);
cxld->flags &= ~CXL_DECODER_F_ENABLE;
+
+ if (is_endpoint_decoder(&cxld->dev)) {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+
+ cxled->state = CXL_DECODER_STATE_MANUAL;
+ cxled->skip = 0;
+ }
+ if (decoder_reset_preserve_registry)
+ dev_dbg(port->uport_dev, "decoder%d: skip registry update\n",
+ cxld->id);
+ else
+ cxld_registry_update(cxld);
+}
+
+static struct cxl_test_decoder *cxld_registry_new(struct cxl_decoder *cxld)
+{
+ struct cxl_test_decoder *td __free(kfree) =
+ kzalloc(sizeof(*td), GFP_KERNEL);
+ unsigned long key = cxld_registry_index(cxld);
+
+ if (!td)
+ return NULL;
+
+ if (xa_insert(&decoder_registry, key, td, GFP_KERNEL)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ cxld_registry_save(td, cxld);
+ return no_free_ptr(td);
+}
+
+static void init_disabled_mock_decoder(struct cxl_decoder *cxld)
+{
+ cxld->hpa_range.start = 0;
+ cxld->hpa_range.end = -1;
+ cxld->interleave_ways = 1;
+ cxld->interleave_granularity = 0;
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
+ cxld->flags = 0;
+ cxld->commit = mock_decoder_commit;
+ cxld->reset = mock_decoder_reset;
+
+ if (is_switch_decoder(&cxld->dev)) {
+ struct cxl_switch_decoder *cxlsd =
+ to_cxl_switch_decoder(&cxld->dev);
+
+ for (int i = 0; i < cxlsd->nr_targets; i++) {
+ cxlsd->target[i] = NULL;
+ cxld->target_map[i] = 0;
+ }
+ } else {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+
+ cxled->state = CXL_DECODER_STATE_MANUAL;
+ cxled->skip = 0;
+ }
}
static void default_mock_decoder(struct cxl_decoder *cxld)
@@ -757,6 +1037,8 @@ static void default_mock_decoder(struct cxl_decoder *cxld)
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->commit = mock_decoder_commit;
cxld->reset = mock_decoder_reset;
+
+ WARN_ON_ONCE(!cxld_registry_new(cxld));
}
static int first_decoder(struct device *dev, const void *data)
@@ -771,13 +1053,29 @@ static int first_decoder(struct device *dev, const void *data)
return 0;
}
-static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
+/*
+ * Initialize a decoder during HDM enumeration.
+ *
+ * If a saved registry entry exists:
+ * - enabled decoders are restored from the saved programming
+ * - disabled decoders are initialized in a clean disabled state
+ *
+ * If no registry entry exists the decoder follows the normal mock
+ * initialization path, including the special auto-region setup for
+ * the first endpoints under host-bridge0.
+ *
+ * Returns true if decoder state was restored from the registry. In
+ * that case the saved decode configuration (including target mapping)
+ * has already been applied and the map_targets() is skipped.
+ */
+static bool mock_init_hdm_decoder(struct cxl_decoder *cxld)
{
struct acpi_cedt_cfmws *window = mock_cfmws[0];
struct platform_device *pdev = NULL;
struct cxl_endpoint_decoder *cxled;
struct cxl_switch_decoder *cxlsd;
struct cxl_port *port, *iter;
+ struct cxl_test_decoder *td;
struct cxl_memdev *cxlmd;
struct cxl_dport *dport;
struct device *dev;
@@ -804,6 +1102,24 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
port = NULL;
} while (port);
port = cxled_to_port(cxled);
+ } else {
+ port = to_cxl_port(cxld->dev.parent);
+ }
+
+ td = cxld_registry_find(cxld);
+ if (td) {
+ bool enabled;
+
+ if (is_switch_decoder(&cxld->dev))
+ enabled = td->cxlsd.cxld.flags & CXL_DECODER_F_ENABLE;
+ else
+ enabled = td->cxled.cxld.flags & CXL_DECODER_F_ENABLE;
+
+ if (enabled)
+ return !cxld_registry_restore(cxld, td);
+
+ init_disabled_mock_decoder(cxld);
+ return false;
}
/*
@@ -814,9 +1130,16 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
*
* See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4'
*/
- if (!hb0 || pdev->id % 4 || pdev->id > 4 || cxld->id > 0) {
+ if (!is_endpoint_decoder(&cxld->dev) || !hb0 || pdev->id % 4 ||
+ pdev->id > 4 || cxld->id > 0) {
default_mock_decoder(cxld);
- return;
+ return false;
+ }
+
+ /* Simulate missing cxl_mem.4 configuration */
+ if (hb0 && pdev->id == 4 && cxld->id == 0 && fail_autoassemble) {
+ default_mock_decoder(cxld);
+ return false;
}
base = window->base_hpa;
@@ -838,6 +1161,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
cxld->commit = mock_decoder_commit;
cxld->reset = mock_decoder_reset;
+ WARN_ON_ONCE(!cxld_registry_new(cxld));
/*
* Now that endpoint decoder is set up, walk up the hierarchy
* and setup the switch and root port decoders targeting @cxlmd.
@@ -859,14 +1183,14 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
/* put cxl_mem.4 second in the decode order */
if (pdev->id == 4) {
cxlsd->target[1] = dport;
- cxld->target_map[1] = dport->port_id;
+ cxlsd->cxld.target_map[1] = dport->port_id;
} else {
cxlsd->target[0] = dport;
- cxld->target_map[0] = dport->port_id;
+ cxlsd->cxld.target_map[0] = dport->port_id;
}
} else {
cxlsd->target[0] = dport;
- cxld->target_map[0] = dport->port_id;
+ cxlsd->cxld.target_map[0] = dport->port_id;
}
cxld = &cxlsd->cxld;
cxld->target_type = CXL_DECODER_HOSTONLYMEM;
@@ -885,8 +1209,14 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
.start = base,
.end = base + mock_auto_region_size - 1,
};
+ cxld->commit = mock_decoder_commit;
+ cxld->reset = mock_decoder_reset;
+
+ cxld_registry_update(cxld);
put_device(dev);
}
+
+ return false;
}
static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
@@ -895,6 +1225,7 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
struct cxl_port *port = cxlhdm->port;
struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
int target_count, i;
+ bool restored;
if (is_cxl_endpoint(port))
target_count = 0;
@@ -934,10 +1265,8 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
}
ctx.target_map = cxld->target_map;
-
- mock_init_hdm_decoder(cxld);
-
- if (target_count) {
+ restored = mock_init_hdm_decoder(cxld);
+ if (target_count && !restored) {
rc = device_for_each_child(port->uport_dev, &ctx,
map_targets);
if (rc) {
@@ -1114,6 +1443,53 @@ static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
cxl_endpoint_get_perf_coordinates(port, ep_c);
}
+/*
+ * Simulate that the first half of mock CXL Window 0 is "Soft Reserve" capacity
+ */
+static int mock_walk_hmem_resources(struct device *host, walk_hmem_fn fn)
+{
+ struct acpi_cedt_cfmws *cfmws = mock_cfmws[0];
+ struct resource window =
+ DEFINE_RES_MEM(cfmws->base_hpa, cfmws->window_size / 2);
+
+ dev_dbg(host, "walk cxl_test resource: %pr\n", &window);
+ return fn(host, 0, &window);
+}
+
+/*
+ * This should only be called by the dax_hmem case, treat mismatches (negative
+ * result) as "fallback to base region_intersects()". Simulate that the first
+ * half of mock CXL Window 0 is IORES_DESC_CXL capacity.
+ */
+static int mock_region_intersects(resource_size_t start, size_t size,
+ unsigned long flags, unsigned long desc)
+{
+ struct resource res = DEFINE_RES_MEM(start, size);
+ struct acpi_cedt_cfmws *cfmws = mock_cfmws[0];
+ struct resource window =
+ DEFINE_RES_MEM(cfmws->base_hpa, cfmws->window_size / 2);
+
+ if (resource_overlaps(&res, &window))
+ return REGION_INTERSECTS;
+ pr_debug("warning: no cxl_test CXL intersection for %pr\n", &res);
+ return -1;
+}
+
+
+static int
+mock_region_intersects_soft_reserve(resource_size_t start, size_t size)
+{
+ struct resource res = DEFINE_RES_MEM(start, size);
+ struct acpi_cedt_cfmws *cfmws = mock_cfmws[0];
+ struct resource window =
+ DEFINE_RES_MEM(cfmws->base_hpa, cfmws->window_size / 2);
+
+ if (resource_overlaps(&res, &window))
+ return REGION_INTERSECTS;
+ pr_debug("warning: no cxl_test soft reserve intersection for %pr\n", &res);
+ return -1;
+}
+
static struct cxl_mock_ops cxl_mock_ops = {
.is_mock_adev = is_mock_adev,
.is_mock_bridge = is_mock_bridge,
@@ -1129,6 +1505,9 @@ static struct cxl_mock_ops cxl_mock_ops = {
.devm_cxl_add_dport_by_dev = mock_cxl_add_dport_by_dev,
.hmat_get_extended_linear_cache_size =
mock_hmat_get_extended_linear_cache_size,
+ .walk_hmem_resources = mock_walk_hmem_resources,
+ .region_intersects = mock_region_intersects,
+ .region_intersects_soft_reserve = mock_region_intersects_soft_reserve,
.list = LIST_HEAD_INIT(cxl_mock_ops.list),
};
@@ -1415,6 +1794,33 @@ err_mem:
return rc;
}
+static ssize_t
+decoder_reset_preserve_registry_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", decoder_reset_preserve_registry);
+}
+
+static ssize_t
+decoder_reset_preserve_registry_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+
+ rc = kstrtobool(buf, &decoder_reset_preserve_registry);
+ if (rc)
+ return rc;
+ return count;
+}
+
+static DEVICE_ATTR_RW(decoder_reset_preserve_registry);
+
+static struct attribute *cxl_acpi_attrs[] = {
+ &dev_attr_decoder_reset_preserve_registry.attr, NULL
+};
+ATTRIBUTE_GROUPS(cxl_acpi);
+
static __init int cxl_test_init(void)
{
int rc, i;
@@ -1545,6 +1951,7 @@ static __init int cxl_test_init(void)
mock_companion(&acpi0017_mock, &cxl_acpi->dev);
acpi0017_mock.dev.bus = &platform_bus_type;
+ cxl_acpi->dev.groups = cxl_acpi_groups;
rc = platform_device_add(cxl_acpi);
if (rc)
@@ -1554,8 +1961,14 @@ static __init int cxl_test_init(void)
if (rc)
goto err_root;
+ rc = hmem_test_init();
+ if (rc)
+ goto err_mem;
+
return 0;
+err_mem:
+ cxl_mem_exit();
err_root:
platform_device_put(cxl_acpi);
err_rch:
@@ -1589,10 +2002,22 @@ err_gen_pool_create:
return rc;
}
+static void free_decoder_registry(void)
+{
+ unsigned long index;
+ void *entry;
+
+ xa_for_each(&decoder_registry, index, entry) {
+ xa_erase(&decoder_registry, index);
+ kfree(entry);
+ }
+}
+
static __exit void cxl_test_exit(void)
{
int i;
+ hmem_test_exit();
cxl_mem_exit();
platform_device_unregister(cxl_acpi);
cxl_rch_topo_exit();
@@ -1614,12 +2039,16 @@ static __exit void cxl_test_exit(void)
depopulate_all_mock_resources();
gen_pool_destroy(cxl_mock_pool);
unregister_cxl_mock_ops(&cxl_mock_ops);
+ free_decoder_registry();
+ xa_destroy(&decoder_registry);
}
module_param(interleave_arithmetic, int, 0444);
MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1");
module_param(extended_linear_cache, bool, 0444);
MODULE_PARM_DESC(extended_linear_cache, "Enable extended linear cache support");
+module_param(fail_autoassemble, bool, 0444);
+MODULE_PARM_DESC(fail_autoassemble, "Simulate missing member of an auto-region");
module_init(cxl_test_init);
module_exit(cxl_test_exit);
MODULE_LICENSE("GPL v2");
diff --git a/tools/testing/cxl/test/hmem_test.c b/tools/testing/cxl/test/hmem_test.c
new file mode 100644
index 000000000000..3a1a089e1721
--- /dev/null
+++ b/tools/testing/cxl/test/hmem_test.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2026 Intel Corporation */
+#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include "../../../drivers/dax/bus.h"
+
+static bool hmem_test;
+
+static void hmem_test_work(struct work_struct *work)
+{
+}
+
+static void hmem_test_release(struct device *dev)
+{
+ struct hmem_platform_device *hpdev =
+ container_of(dev, typeof(*hpdev), pdev.dev);
+
+ memset(hpdev, 0, sizeof(*hpdev));
+}
+
+static struct hmem_platform_device hmem_test_device = {
+ .pdev = {
+ .name = "hmem_platform",
+ .id = 1,
+ .dev = {
+ .release = hmem_test_release,
+ },
+ },
+ .work = __WORK_INITIALIZER(hmem_test_device.work, hmem_test_work),
+};
+
+int hmem_test_init(void)
+{
+ if (!hmem_test)
+ return 0;
+
+ return platform_device_register(&hmem_test_device.pdev);
+}
+
+void hmem_test_exit(void)
+{
+ if (hmem_test)
+ platform_device_unregister(&hmem_test_device.pdev);
+}
+
+module_param(hmem_test, bool, 0444);
+MODULE_PARM_DESC(hmem_test, "Enable/disable the dax_hmem test platform device");
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index cb87e8c0e63c..271c7ad8cc32 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -1695,6 +1695,9 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
struct cxl_dpa_info range_info = { 0 };
int rc;
+ /* Increase async probe race window */
+ usleep_range(500*1000, 1000*1000);
+
mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
if (!mdata)
return -ENOMEM;
@@ -1716,7 +1719,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
if (rc)
return rc;
- mds = cxl_memdev_state_create(dev);
+ mds = cxl_memdev_state_create(dev, pdev->id + 1, 0);
if (IS_ERR(mds))
return PTR_ERR(mds);
@@ -1732,7 +1735,6 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
- cxlds->serial = pdev->id + 1;
if (is_rcd(pdev))
cxlds->rcd = true;
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index b8fcb50c1027..6454b868b122 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -251,6 +251,56 @@ struct cxl_dport *__wrap_devm_cxl_add_dport_by_dev(struct cxl_port *port,
}
EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_add_dport_by_dev, "CXL");
+int __wrap_region_intersects(resource_size_t start, size_t size,
+ unsigned long flags, unsigned long desc)
+{
+ int rc = -1;
+ int index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops)
+ rc = ops->region_intersects(start, size, flags, desc);
+ if (rc < 0)
+ rc = region_intersects(start, size, flags, desc);
+ put_cxl_mock_ops(index);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(__wrap_region_intersects);
+
+int __wrap_region_intersects_soft_reserve(resource_size_t start, size_t size)
+{
+ int rc = -1;
+ int index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops)
+ rc = ops->region_intersects_soft_reserve(start, size);
+ if (rc < 0)
+ rc = region_intersects_soft_reserve(start, size);
+ put_cxl_mock_ops(index);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(__wrap_region_intersects_soft_reserve);
+
+int __wrap_walk_hmem_resources(struct device *host, walk_hmem_fn fn)
+{
+ int index, rc = 0;
+ bool is_mock = strcmp(dev_name(host), "hmem_platform.1") == 0;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (is_mock) {
+ if (ops)
+ rc = ops->walk_hmem_resources(host, fn);
+ } else {
+ rc = walk_hmem_resources(host, fn);
+ }
+ put_cxl_mock_ops(index);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(__wrap_walk_hmem_resources);
+
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("cxl_test: emulation module");
MODULE_IMPORT_NS("ACPI");
diff --git a/tools/testing/cxl/test/mock.h b/tools/testing/cxl/test/mock.h
index 2684b89c8aa2..4f57dc80ae7d 100644
--- a/tools/testing/cxl/test/mock.h
+++ b/tools/testing/cxl/test/mock.h
@@ -2,6 +2,7 @@
#include <linux/list.h>
#include <linux/acpi.h>
+#include <linux/dax.h>
#include <cxl.h>
struct cxl_mock_ops {
@@ -27,8 +28,15 @@ struct cxl_mock_ops {
int (*hmat_get_extended_linear_cache_size)(struct resource *backing_res,
int nid,
resource_size_t *cache_size);
+ int (*walk_hmem_resources)(struct device *host, walk_hmem_fn fn);
+ int (*region_intersects)(resource_size_t start, size_t size,
+ unsigned long flags, unsigned long desc);
+ int (*region_intersects_soft_reserve)(resource_size_t start,
+ size_t size);
};
+int hmem_test_init(void);
+void hmem_test_exit(void);
void register_cxl_mock_ops(struct cxl_mock_ops *ops);
void unregister_cxl_mock_ops(struct cxl_mock_ops *ops);
struct cxl_mock_ops *get_cxl_mock_ops(int *index);