summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/tests/.kunitconfig1
-rw-r--r--drivers/gpu/drm/ttm/tests/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/tests/TODO27
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c69
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c1225
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_device_test.c3
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c176
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h17
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.c234
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.h30
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c9
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c23
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_tt_test.c169
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c461
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c151
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c251
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c5
19 files changed, 2467 insertions, 417 deletions
diff --git a/drivers/gpu/drm/ttm/tests/.kunitconfig b/drivers/gpu/drm/ttm/tests/.kunitconfig
index 75fdce0cd98e..1ae1ffabd51e 100644
--- a/drivers/gpu/drm/ttm/tests/.kunitconfig
+++ b/drivers/gpu/drm/ttm/tests/.kunitconfig
@@ -1,4 +1,3 @@
CONFIG_KUNIT=y
CONFIG_DRM=y
-CONFIG_DRM_KUNIT_TEST_HELPERS=y
CONFIG_DRM_TTM_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/ttm/tests/Makefile b/drivers/gpu/drm/ttm/tests/Makefile
index 468535f7eed2..f3149de77541 100644
--- a/drivers/gpu/drm/ttm/tests/Makefile
+++ b/drivers/gpu/drm/ttm/tests/Makefile
@@ -6,4 +6,6 @@ obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += \
ttm_resource_test.o \
ttm_tt_test.o \
ttm_bo_test.o \
+ ttm_bo_validate_test.o \
+ ttm_mock_manager.o \
ttm_kunit_helpers.o
diff --git a/drivers/gpu/drm/ttm/tests/TODO b/drivers/gpu/drm/ttm/tests/TODO
new file mode 100644
index 000000000000..45b03d184ccf
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/TODO
@@ -0,0 +1,27 @@
+TODO
+=====
+
+- Add a test case where the only evictable BO is busy
+- Update eviction tests so they use parametrized "from" memory type
+- Improve mock manager's implementation, e.g. allocate a block of
+ dummy memory that can be used when testing page mapping functions
+- Suggestion: Add test cases with external BOs
+- Suggestion: randomize the number and size of tested buffers in
+ ttm_bo_validate()
+- Agree on the naming convention
+- Rewrite the mock manager: drop use_tt and manage mock memory using
+ drm_mm manager
+
+Notes and gotchas
+=================
+
+- These tests are built and run with a UML kernel, because
+ 1) We are interested in hardware-independent testing
+ 2) We don't want to have actual DRM devices interacting with TTM
+ at the same time as the test one. Getting these to work in
+ parallel would require some time (...and that's a "todo" in itself!)
+- Triggering ttm_bo_vm_ops callbacks from KUnit (i.e. kernel) might be
+ a challenge, but is worth trying. Look at selftests like
+ i915/gem/selftests/i915_gem_mman.c for inspiration
+- The test suite uses UML where ioremap() call returns NULL, meaning that
+ ttm_bo_ioremap() can't be tested, unless we find a way to stub it
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index 1f8a4f8adc92..f0a7eb62116c 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -18,6 +18,12 @@
#define BO_SIZE SZ_8K
+#ifdef CONFIG_PREEMPT_RT
+#define ww_mutex_base_lock(b) rt_mutex_lock(b)
+#else
+#define ww_mutex_base_lock(b) mutex_lock(b)
+#endif
+
struct ttm_bo_test_case {
const char *description;
bool interruptible;
@@ -56,7 +62,7 @@ static void ttm_bo_reserve_optimistic_no_ticket(struct kunit *test)
struct ttm_buffer_object *bo;
int err;
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
err = ttm_bo_reserve(bo, params->interruptible, params->no_wait, NULL);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -71,7 +77,7 @@ static void ttm_bo_reserve_locked_no_sleep(struct kunit *test)
bool no_wait = true;
int err;
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
/* Let's lock it beforehand */
dma_resv_lock(bo->base.resv, NULL);
@@ -92,7 +98,7 @@ static void ttm_bo_reserve_no_wait_ticket(struct kunit *test)
ww_acquire_init(&ctx, &reservation_ww_class);
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
KUNIT_ASSERT_EQ(test, err, -EBUSY);
@@ -110,7 +116,7 @@ static void ttm_bo_reserve_double_resv(struct kunit *test)
ww_acquire_init(&ctx, &reservation_ww_class);
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
err = ttm_bo_reserve(bo, interruptible, no_wait, &ctx);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -138,11 +144,11 @@ static void ttm_bo_reserve_deadlock(struct kunit *test)
bool no_wait = false;
int err;
- bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
- bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
ww_acquire_init(&ctx1, &reservation_ww_class);
- mutex_lock(&bo2->base.resv->lock.base);
+ ww_mutex_base_lock(&bo2->base.resv->lock.base);
/* The deadlock will be caught by WW mutex, don't warn about it */
lock_release(&bo2->base.resv->lock.base.dep_map, 1);
@@ -208,7 +214,7 @@ static void ttm_bo_reserve_interrupted(struct kunit *test)
struct task_struct *task;
int err;
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
task = kthread_create(threaded_ttm_bo_reserve, bo, "ttm-bo-reserve");
@@ -237,7 +243,7 @@ static void ttm_bo_unreserve_basic(struct kunit *test)
struct ttm_place *place;
struct ttm_resource_manager *man;
unsigned int bo_prio = TTM_MAX_BO_PRIORITY - 1;
- uint32_t mem_type = TTM_PL_SYSTEM;
+ u32 mem_type = TTM_PL_SYSTEM;
int err;
place = ttm_place_kunit_init(test, mem_type, 0);
@@ -249,7 +255,7 @@ static void ttm_bo_unreserve_basic(struct kunit *test)
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
bo->priority = bo_prio;
err = ttm_resource_alloc(bo, place, &res1);
@@ -265,7 +271,7 @@ static void ttm_bo_unreserve_basic(struct kunit *test)
man = ttm_manager_type(priv->ttm_dev, mem_type);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res1->lru, &man->lru[bo->priority]), 1);
+ list_is_last(&res1->lru.link, &man->lru[bo->priority]), 1);
ttm_resource_free(bo, &res2);
ttm_resource_free(bo, &res1);
@@ -278,7 +284,7 @@ static void ttm_bo_unreserve_pinned(struct kunit *test)
struct ttm_device *ttm_dev;
struct ttm_resource *res1, *res2;
struct ttm_place *place;
- uint32_t mem_type = TTM_PL_SYSTEM;
+ u32 mem_type = TTM_PL_SYSTEM;
int err;
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
@@ -288,7 +294,7 @@ static void ttm_bo_unreserve_pinned(struct kunit *test)
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
place = ttm_place_kunit_init(test, mem_type, 0);
dma_resv_lock(bo->base.resv, NULL);
@@ -302,11 +308,11 @@ static void ttm_bo_unreserve_pinned(struct kunit *test)
err = ttm_resource_alloc(bo, place, &res2);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res2->lru, &priv->ttm_dev->pinned), 1);
+ list_is_last(&res2->lru.link, &priv->ttm_dev->pinned), 1);
ttm_bo_unreserve(bo);
KUNIT_ASSERT_EQ(test,
- list_is_last(&res1->lru, &priv->ttm_dev->pinned), 1);
+ list_is_last(&res1->lru.link, &priv->ttm_dev->pinned), 1);
ttm_resource_free(bo, &res1);
ttm_resource_free(bo, &res2);
@@ -321,7 +327,8 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
struct ttm_resource *res1, *res2;
struct ttm_device *ttm_dev;
struct ttm_place *place;
- uint32_t mem_type = TTM_PL_SYSTEM;
+ struct dma_resv *resv;
+ u32 mem_type = TTM_PL_SYSTEM;
unsigned int bo_priority = 0;
int err;
@@ -332,12 +339,17 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+ resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
+
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
- bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
- bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ dma_resv_init(resv);
+
+ bo1 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, resv);
+ bo2 = ttm_bo_kunit_init(test, test->priv, BO_SIZE, resv);
dma_resv_lock(bo1->base.resv, NULL);
ttm_bo_set_bulk_move(bo1, &lru_bulk_move);
@@ -363,6 +375,8 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
ttm_resource_free(bo1, &res1);
ttm_resource_free(bo2, &res2);
+
+ dma_resv_fini(resv);
}
static void ttm_bo_put_basic(struct kunit *test)
@@ -372,7 +386,7 @@ static void ttm_bo_put_basic(struct kunit *test)
struct ttm_resource *res;
struct ttm_device *ttm_dev;
struct ttm_place *place;
- uint32_t mem_type = TTM_PL_SYSTEM;
+ u32 mem_type = TTM_PL_SYSTEM;
int err;
place = ttm_place_kunit_init(test, mem_type, 0);
@@ -384,7 +398,7 @@ static void ttm_bo_put_basic(struct kunit *test)
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
bo->type = ttm_bo_type_device;
err = ttm_resource_alloc(bo, place, &res);
@@ -445,7 +459,7 @@ static void ttm_bo_put_shared_resv(struct kunit *test)
dma_fence_signal(fence);
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
bo->type = ttm_bo_type_device;
bo->base.resv = external_resv;
@@ -467,7 +481,7 @@ static void ttm_bo_pin_basic(struct kunit *test)
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
for (int i = 0; i < no_pins; i++) {
dma_resv_lock(bo->base.resv, NULL);
@@ -487,7 +501,7 @@ static void ttm_bo_pin_unpin_resource(struct kunit *test)
struct ttm_resource *res;
struct ttm_device *ttm_dev;
struct ttm_place *place;
- uint32_t mem_type = TTM_PL_SYSTEM;
+ u32 mem_type = TTM_PL_SYSTEM;
unsigned int bo_priority = 0;
int err;
@@ -502,7 +516,7 @@ static void ttm_bo_pin_unpin_resource(struct kunit *test)
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
err = ttm_resource_alloc(bo, place, &res);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -538,7 +552,7 @@ static void ttm_bo_multiple_pin_one_unpin(struct kunit *test)
struct ttm_resource *res;
struct ttm_device *ttm_dev;
struct ttm_place *place;
- uint32_t mem_type = TTM_PL_SYSTEM;
+ u32 mem_type = TTM_PL_SYSTEM;
unsigned int bo_priority = 0;
int err;
@@ -553,7 +567,7 @@ static void ttm_bo_multiple_pin_one_unpin(struct kunit *test)
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
err = ttm_resource_alloc(bo, place, &res);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -619,4 +633,5 @@ static struct kunit_suite ttm_bo_test_suite = {
kunit_test_suites(&ttm_bo_test_suite);
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit tests for ttm_bo APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
new file mode 100644
index 000000000000..1adf18481ea0
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
@@ -0,0 +1,1225 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "ttm_kunit_helpers.h"
+#include "ttm_mock_manager.h"
+
+#define BO_SIZE SZ_4K
+#define MANAGER_SIZE SZ_1M
+
+static struct spinlock fence_lock;
+
+struct ttm_bo_validate_test_case {
+ const char *description;
+ enum ttm_bo_type bo_type;
+ u32 mem_type;
+ bool with_ttm;
+ bool no_gpu_wait;
+};
+
+static struct ttm_placement *ttm_placement_kunit_init(struct kunit *test,
+ struct ttm_place *places,
+ unsigned int num_places)
+{
+ struct ttm_placement *placement;
+
+ placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, placement);
+
+ placement->num_placement = num_places;
+ placement->placement = places;
+
+ return placement;
+}
+
+static const char *fence_name(struct dma_fence *f)
+{
+ return "ttm-bo-validate-fence";
+}
+
+static const struct dma_fence_ops fence_ops = {
+ .get_driver_name = fence_name,
+ .get_timeline_name = fence_name,
+};
+
+static struct dma_fence *alloc_mock_fence(struct kunit *test)
+{
+ struct dma_fence *fence;
+
+ fence = kunit_kzalloc(test, sizeof(*fence), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, fence);
+
+ dma_fence_init(fence, &fence_ops, &fence_lock, 0, 0);
+
+ return fence;
+}
+
+static void dma_resv_kunit_active_fence_init(struct kunit *test,
+ struct dma_resv *resv,
+ enum dma_resv_usage usage)
+{
+ struct dma_fence *fence;
+
+ fence = alloc_mock_fence(test);
+ dma_fence_enable_sw_signaling(fence);
+
+ dma_resv_lock(resv, NULL);
+ dma_resv_reserve_fences(resv, 1);
+ dma_resv_add_fence(resv, fence, usage);
+ dma_resv_unlock(resv);
+}
+
+static void ttm_bo_validate_case_desc(const struct ttm_bo_validate_test_case *t,
+ char *desc)
+{
+ strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
+}
+
+static const struct ttm_bo_validate_test_case ttm_bo_type_cases[] = {
+ {
+ .description = "Buffer object for userspace",
+ .bo_type = ttm_bo_type_device,
+ },
+ {
+ .description = "Kernel buffer object",
+ .bo_type = ttm_bo_type_kernel,
+ },
+ {
+ .description = "Shared buffer object",
+ .bo_type = ttm_bo_type_sg,
+ },
+};
+
+KUNIT_ARRAY_PARAM(ttm_bo_types, ttm_bo_type_cases,
+ ttm_bo_validate_case_desc);
+
+static void ttm_bo_init_reserved_sys_man(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ struct ttm_test_devices *priv = test->priv;
+ enum ttm_bo_type bo_type = params->bo_type;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement,
+ PAGE_SIZE, &ctx, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, kref_read(&bo->kref), 1);
+ KUNIT_EXPECT_PTR_EQ(test, bo->bdev, priv->ttm_dev);
+ KUNIT_EXPECT_EQ(test, bo->type, bo_type);
+ KUNIT_EXPECT_EQ(test, bo->page_alignment, PAGE_SIZE);
+ KUNIT_EXPECT_PTR_EQ(test, bo->destroy, &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, bo->pin_count, 0);
+ KUNIT_EXPECT_NULL(test, bo->bulk_move);
+ KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
+ KUNIT_EXPECT_FALSE(test, ttm_tt_is_populated(bo->ttm));
+ KUNIT_EXPECT_NOT_NULL(test, (void *)bo->base.resv->fences);
+ KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
+
+ if (bo_type != ttm_bo_type_kernel)
+ KUNIT_EXPECT_TRUE(test,
+ drm_mm_node_allocated(&bo->base.vma_node.vm_node));
+
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_put(bo);
+}
+
+static void ttm_bo_init_reserved_mock_man(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ enum ttm_bo_type bo_type = params->bo_type;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ u32 mem_type = TTM_PL_VRAM;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement,
+ PAGE_SIZE, &ctx, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, kref_read(&bo->kref), 1);
+ KUNIT_EXPECT_PTR_EQ(test, bo->bdev, priv->ttm_dev);
+ KUNIT_EXPECT_EQ(test, bo->type, bo_type);
+ KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
+
+ if (bo_type != ttm_bo_type_kernel)
+ KUNIT_EXPECT_TRUE(test,
+ drm_mm_node_allocated(&bo->base.vma_node.vm_node));
+
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_put(bo);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+}
+
+static void ttm_bo_init_reserved_resv(struct kunit *test)
+{
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct dma_resv resv;
+ int err;
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+ dma_resv_init(&resv);
+ dma_resv_lock(&resv, NULL);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement,
+ PAGE_SIZE, &ctx, NULL, &resv,
+ &dummy_ttm_bo_destroy);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_PTR_EQ(test, bo->base.resv, &resv);
+
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_put(bo);
+}
+
+static void ttm_bo_validate_basic(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ u32 fst_mem = TTM_PL_SYSTEM, snd_mem = TTM_PL_VRAM;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ struct ttm_placement *fst_placement, *snd_placement;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_place *fst_place, *snd_place;
+ u32 size = ALIGN(SZ_8K, PAGE_SIZE);
+ struct ttm_buffer_object *bo;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, snd_mem, MANAGER_SIZE);
+
+ fst_place = ttm_place_kunit_init(test, fst_mem, 0);
+ fst_placement = ttm_placement_kunit_init(test, fst_place, 1);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, params->bo_type,
+ fst_placement, PAGE_SIZE, &ctx_init, NULL,
+ NULL, &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ snd_place = ttm_place_kunit_init(test, snd_mem, DRM_BUDDY_TOPDOWN_ALLOCATION);
+ snd_placement = ttm_placement_kunit_init(test, snd_place, 1);
+
+ err = ttm_bo_validate(bo, snd_placement, &ctx_val);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, bo->base.size);
+ KUNIT_EXPECT_NOT_NULL(test, bo->ttm);
+ KUNIT_EXPECT_TRUE(test, ttm_tt_is_populated(bo->ttm));
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
+ KUNIT_EXPECT_EQ(test, bo->resource->placement,
+ DRM_BUDDY_TOPDOWN_ALLOCATION);
+
+ ttm_bo_put(bo);
+ ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
+}
+
+static void ttm_bo_validate_invalid_placement(struct kunit *test)
+{
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ u32 unknown_mem_type = TTM_PL_PRIV + 1;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ place = ttm_place_kunit_init(test, unknown_mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = bo_type;
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, -ENOMEM);
+
+ ttm_bo_put(bo);
+}
+
+static void ttm_bo_validate_failed_alloc(struct kunit *test)
+{
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ u32 mem_type = TTM_PL_VRAM;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = bo_type;
+
+ ttm_bad_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, -ENOMEM);
+
+ ttm_bo_put(bo);
+ ttm_bad_manager_fini(priv->ttm_dev, mem_type);
+}
+
+static void ttm_bo_validate_pinned(struct kunit *test)
+{
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ u32 mem_type = TTM_PL_SYSTEM;
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = bo_type;
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ ttm_bo_pin(bo);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, -EINVAL);
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ ttm_bo_unpin(bo);
+ dma_resv_unlock(bo->base.resv);
+
+ ttm_bo_put(bo);
+}
+
+static const struct ttm_bo_validate_test_case ttm_mem_type_cases[] = {
+ {
+ .description = "System manager",
+ .mem_type = TTM_PL_SYSTEM,
+ },
+ {
+ .description = "VRAM manager",
+ .mem_type = TTM_PL_VRAM,
+ },
+};
+
+KUNIT_ARRAY_PARAM(ttm_bo_validate_mem, ttm_mem_type_cases,
+ ttm_bo_validate_case_desc);
+
+static void ttm_bo_validate_same_placement(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ place = ttm_place_kunit_init(test, params->mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ if (params->mem_type != TTM_PL_SYSTEM)
+ ttm_mock_manager_init(priv->ttm_dev, params->mem_type, MANAGER_SIZE);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, params->bo_type,
+ placement, PAGE_SIZE, &ctx_init, NULL,
+ NULL, &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ err = ttm_bo_validate(bo, placement, &ctx_val);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, 0);
+
+ ttm_bo_put(bo);
+
+ if (params->mem_type != TTM_PL_SYSTEM)
+ ttm_mock_manager_fini(priv->ttm_dev, params->mem_type);
+}
+
+static void ttm_bo_validate_busy_placement(struct kunit *test)
+{
+ u32 fst_mem = TTM_PL_VRAM, snd_mem = TTM_PL_VRAM + 1;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ struct ttm_placement *placement_init, *placement_val;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_place *init_place, places[2];
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ int err;
+
+ ttm_bad_manager_init(priv->ttm_dev, fst_mem, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, snd_mem, MANAGER_SIZE);
+
+ init_place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
+ placement_init = ttm_placement_kunit_init(test, init_place, 1);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement_init,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ places[0] = (struct ttm_place){ .mem_type = fst_mem, .flags = TTM_PL_FLAG_DESIRED };
+ places[1] = (struct ttm_place){ .mem_type = snd_mem, .flags = TTM_PL_FLAG_FALLBACK };
+ placement_val = ttm_placement_kunit_init(test, places, 2);
+
+ err = ttm_bo_validate(bo, placement_val, &ctx_val);
+ dma_resv_unlock(bo->base.resv);
+
+ man = ttm_manager_type(priv->ttm_dev, snd_mem);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, bo->base.size);
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
+ KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority]));
+
+ ttm_bo_put(bo);
+ ttm_bad_manager_fini(priv->ttm_dev, fst_mem);
+ ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
+}
+
+static void ttm_bo_validate_multihop(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ struct ttm_placement *placement_init, *placement_val;
+ u32 fst_mem = TTM_PL_VRAM, tmp_mem = TTM_PL_TT, final_mem = TTM_PL_SYSTEM;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_place *fst_place, *final_place;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_buffer_object *bo;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, fst_mem, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, tmp_mem, MANAGER_SIZE);
+
+ fst_place = ttm_place_kunit_init(test, fst_mem, 0);
+ placement_init = ttm_placement_kunit_init(test, fst_place, 1);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, params->bo_type,
+ placement_init, PAGE_SIZE, &ctx_init, NULL,
+ NULL, &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ final_place = ttm_place_kunit_init(test, final_mem, 0);
+ placement_val = ttm_placement_kunit_init(test, final_place, 1);
+
+ err = ttm_bo_validate(bo, placement_val, &ctx_val);
+ dma_resv_unlock(bo->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2);
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, final_mem);
+
+ ttm_bo_put(bo);
+
+ ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
+ ttm_mock_manager_fini(priv->ttm_dev, tmp_mem);
+}
+
+static const struct ttm_bo_validate_test_case ttm_bo_no_placement_cases[] = {
+ {
+ .description = "Buffer object in system domain, no page vector",
+ },
+ {
+ .description = "Buffer object in system domain with an existing page vector",
+ .with_ttm = true,
+ },
+};
+
+KUNIT_ARRAY_PARAM(ttm_bo_no_placement, ttm_bo_no_placement_cases,
+ ttm_bo_validate_case_desc);
+
+static void ttm_bo_validate_no_placement_signaled(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ u32 mem_type = TTM_PL_SYSTEM;
+ struct ttm_resource_manager *man;
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ struct ttm_tt *old_tt;
+ u32 flags;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ man = ttm_manager_type(priv->ttm_dev, mem_type);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = bo_type;
+
+ if (params->with_ttm) {
+ old_tt = priv->ttm_dev->funcs->ttm_tt_create(bo, 0);
+ ttm_pool_alloc(&priv->ttm_dev->pool, old_tt, &ctx);
+ bo->ttm = old_tt;
+ }
+
+ err = ttm_resource_alloc(bo, place, &bo->resource);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, man->usage, size);
+
+ placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, placement);
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ ttm_bo_unreserve(bo);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_ASSERT_EQ(test, man->usage, 0);
+ KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
+ KUNIT_EXPECT_EQ(test, ctx.bytes_moved, 0);
+
+ if (params->with_ttm) {
+ flags = bo->ttm->page_flags;
+
+ KUNIT_ASSERT_PTR_EQ(test, bo->ttm, old_tt);
+ KUNIT_ASSERT_FALSE(test, flags & TTM_TT_FLAG_PRIV_POPULATED);
+ KUNIT_ASSERT_TRUE(test, flags & TTM_TT_FLAG_ZERO_ALLOC);
+ }
+
+ ttm_bo_put(bo);
+}
+
+static int threaded_dma_resv_signal(void *arg)
+{
+ struct ttm_buffer_object *bo = arg;
+ struct dma_resv *resv = bo->base.resv;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ dma_fence_signal(fence);
+ }
+ dma_resv_iter_end(&cursor);
+
+ return 0;
+}
+
+static void ttm_bo_validate_no_placement_not_signaled(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ enum dma_resv_usage usage = DMA_RESV_USAGE_BOOKKEEP;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ u32 mem_type = TTM_PL_SYSTEM;
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct task_struct *task;
+ struct ttm_place *place;
+ int err;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = params->bo_type;
+
+ err = ttm_resource_alloc(bo, place, &bo->resource);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, placement);
+
+ /* Create an active fence to simulate a non-idle resv object */
+ spin_lock_init(&fence_lock);
+ dma_resv_kunit_active_fence_init(test, bo->base.resv, usage);
+
+ task = kthread_create(threaded_dma_resv_signal, bo, "dma-resv-signal");
+ if (IS_ERR(task))
+ KUNIT_FAIL(test, "Couldn't create dma resv signal task\n");
+
+ wake_up_process(task);
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ ttm_bo_unreserve(bo);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_ASSERT_NOT_NULL(test, bo->ttm);
+ KUNIT_ASSERT_NULL(test, bo->resource);
+ KUNIT_ASSERT_NULL(test, bo->bulk_move);
+ KUNIT_EXPECT_EQ(test, ctx.bytes_moved, 0);
+
+ if (bo->type != ttm_bo_type_sg)
+ KUNIT_ASSERT_PTR_EQ(test, bo->base.resv, &bo->base._resv);
+
+ /* Make sure we have an idle object at this point */
+ dma_resv_wait_timeout(bo->base.resv, usage, false, MAX_SCHEDULE_TIMEOUT);
+
+ ttm_bo_put(bo);
+}
+
+static void ttm_bo_validate_move_fence_signaled(struct kunit *test)
+{
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_operation_ctx ctx = { };
+ u32 mem_type = TTM_PL_SYSTEM;
+ struct ttm_resource_manager *man;
+ struct ttm_placement *placement;
+ struct ttm_buffer_object *bo;
+ struct ttm_place *place;
+ int err;
+
+ man = ttm_manager_type(priv->ttm_dev, mem_type);
+ man->move = dma_fence_get_stub();
+
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
+ bo->type = bo_type;
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx);
+ ttm_bo_unreserve(bo);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
+ KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
+
+ ttm_bo_put(bo);
+ dma_fence_put(man->move);
+}
+
+static const struct ttm_bo_validate_test_case ttm_bo_validate_wait_cases[] = {
+ {
+ .description = "Waits for GPU",
+ .no_gpu_wait = false,
+ },
+ {
+ .description = "Tries to lock straight away",
+ .no_gpu_wait = true,
+ },
+};
+
+KUNIT_ARRAY_PARAM(ttm_bo_validate_wait, ttm_bo_validate_wait_cases,
+ ttm_bo_validate_case_desc);
+
+static int threaded_fence_signal(void *arg)
+{
+ struct dma_fence *fence = arg;
+
+ msleep(20);
+
+ return dma_fence_signal(fence);
+}
+
+static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test)
+{
+ const struct ttm_bo_validate_test_case *params = test->param_value;
+ struct ttm_operation_ctx ctx_init = { },
+ ctx_val = { .no_wait_gpu = params->no_gpu_wait };
+ u32 fst_mem = TTM_PL_VRAM, snd_mem = TTM_PL_VRAM + 1;
+ struct ttm_placement *placement_init, *placement_val;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ u32 size = ALIGN(BO_SIZE, PAGE_SIZE);
+ struct ttm_place *init_place, places[2];
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ struct task_struct *task;
+ int err;
+
+ init_place = ttm_place_kunit_init(test, TTM_PL_SYSTEM, 0);
+ placement_init = ttm_placement_kunit_init(test, init_place, 1);
+
+ bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo);
+
+ drm_gem_private_object_init(priv->drm, &bo->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo, bo_type, placement_init,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ ttm_mock_manager_init(priv->ttm_dev, fst_mem, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, snd_mem, MANAGER_SIZE);
+
+ places[0] = (struct ttm_place){ .mem_type = fst_mem, .flags = TTM_PL_FLAG_DESIRED };
+ places[1] = (struct ttm_place){ .mem_type = snd_mem, .flags = TTM_PL_FLAG_FALLBACK };
+ placement_val = ttm_placement_kunit_init(test, places, 2);
+
+ spin_lock_init(&fence_lock);
+ man = ttm_manager_type(priv->ttm_dev, fst_mem);
+ man->move = alloc_mock_fence(test);
+
+ task = kthread_create(threaded_fence_signal, man->move, "move-fence-signal");
+ if (IS_ERR(task))
+ KUNIT_FAIL(test, "Couldn't create move fence signal task\n");
+
+ wake_up_process(task);
+ err = ttm_bo_validate(bo, placement_val, &ctx_val);
+ dma_resv_unlock(bo->base.resv);
+
+ dma_fence_wait_timeout(man->move, false, MAX_SCHEDULE_TIMEOUT);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size);
+
+ if (params->no_gpu_wait)
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
+ else
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, fst_mem);
+
+ ttm_bo_put(bo);
+ ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
+ ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
+}
+
+static void ttm_bo_validate_swapout(struct kunit *test)
+{
+ unsigned long size_big, size = ALIGN(BO_SIZE, PAGE_SIZE);
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_buffer_object *bo_small, *bo_big;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_placement *placement;
+ u32 mem_type = TTM_PL_TT;
+ struct ttm_place *place;
+ struct sysinfo si;
+ int err;
+
+ si_meminfo(&si);
+ size_big = ALIGN(((u64)si.totalram * si.mem_unit / 2), PAGE_SIZE);
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, size_big + size);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_small = kunit_kzalloc(test, sizeof(*bo_small), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_small);
+
+ drm_gem_private_object_init(priv->drm, &bo_small->base, size);
+
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_small, bo_type, placement,
+ PAGE_SIZE, &ctx, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_small->base.resv);
+
+ bo_big = ttm_bo_kunit_init(test, priv, size_big, NULL);
+
+ dma_resv_lock(bo_big->base.resv, NULL);
+ err = ttm_bo_validate(bo_big, placement, &ctx);
+ dma_resv_unlock(bo_big->base.resv);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_NOT_NULL(test, bo_big->resource);
+ KUNIT_EXPECT_EQ(test, bo_big->resource->mem_type, mem_type);
+ KUNIT_EXPECT_EQ(test, bo_small->resource->mem_type, TTM_PL_SYSTEM);
+ KUNIT_EXPECT_TRUE(test, bo_small->ttm->page_flags & TTM_TT_FLAG_SWAPPED);
+
+ ttm_bo_put(bo_big);
+ ttm_bo_put(bo_small);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+}
+
+static void ttm_bo_validate_happy_evict(struct kunit *test)
+{
+ u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT,
+ mem_type_evict = TTM_PL_SYSTEM;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ u32 small = SZ_8K, medium = SZ_512K,
+ big = MANAGER_SIZE - (small + medium);
+ u32 bo_sizes[] = { small, medium, big };
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bos, *bo_val;
+ struct ttm_placement *placement;
+ struct ttm_place *place;
+ u32 bo_no = 3;
+ int i, err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, mem_multihop, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bos = kunit_kmalloc_array(test, bo_no, sizeof(*bos), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bos);
+
+ memset(bos, 0, sizeof(*bos) * bo_no);
+ for (i = 0; i < bo_no; i++) {
+ drm_gem_private_object_init(priv->drm, &bos[i].base, bo_sizes[i]);
+ err = ttm_bo_init_reserved(priv->ttm_dev, &bos[i], bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ dma_resv_unlock(bos[i].base.resv);
+ }
+
+ bo_val = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo_val->type = bo_type;
+
+ ttm_bo_reserve(bo_val, false, false, NULL);
+ err = ttm_bo_validate(bo_val, placement, &ctx_val);
+ ttm_bo_unreserve(bo_val);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, bos[0].resource->mem_type, mem_type_evict);
+ KUNIT_EXPECT_TRUE(test, bos[0].ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC);
+ KUNIT_EXPECT_TRUE(test, bos[0].ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, small * 2 + BO_SIZE);
+ KUNIT_EXPECT_EQ(test, bos[1].resource->mem_type, mem_type);
+
+ for (i = 0; i < bo_no; i++)
+ ttm_bo_put(&bos[i]);
+ ttm_bo_put(bo_val);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
+}
+
+static void ttm_bo_validate_all_pinned_evict(struct kunit *test)
+{
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_buffer_object *bo_big, *bo_small;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_placement *placement;
+ u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT;
+ struct ttm_place *place;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, mem_multihop, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_big = kunit_kzalloc(test, sizeof(*bo_big), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_big);
+
+ drm_gem_private_object_init(priv->drm, &bo_big->base, MANAGER_SIZE);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_big, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ ttm_bo_pin(bo_big);
+ dma_resv_unlock(bo_big->base.resv);
+
+ bo_small = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo_small->type = bo_type;
+
+ ttm_bo_reserve(bo_small, false, false, NULL);
+ err = ttm_bo_validate(bo_small, placement, &ctx_val);
+ ttm_bo_unreserve(bo_small);
+
+ KUNIT_EXPECT_EQ(test, err, -ENOMEM);
+
+ ttm_bo_put(bo_small);
+
+ ttm_bo_reserve(bo_big, false, false, NULL);
+ ttm_bo_unpin(bo_big);
+ dma_resv_unlock(bo_big->base.resv);
+ ttm_bo_put(bo_big);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
+}
+
+static void ttm_bo_validate_allowed_only_evict(struct kunit *test)
+{
+ u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT,
+ mem_type_evict = TTM_PL_SYSTEM;
+ struct ttm_buffer_object *bo, *bo_evictable, *bo_pinned;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_placement *placement;
+ struct ttm_place *place;
+ u32 size = SZ_512K;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, mem_multihop, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_pinned = kunit_kzalloc(test, sizeof(*bo_pinned), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_pinned);
+
+ drm_gem_private_object_init(priv->drm, &bo_pinned->base, size);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_pinned, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ ttm_bo_pin(bo_pinned);
+ dma_resv_unlock(bo_pinned->base.resv);
+
+ bo_evictable = kunit_kzalloc(test, sizeof(*bo_evictable), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_evictable);
+
+ drm_gem_private_object_init(priv->drm, &bo_evictable->base, size);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_evictable, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_evictable->base.resv);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->type = bo_type;
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx_val);
+ ttm_bo_unreserve(bo);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
+ KUNIT_EXPECT_EQ(test, bo_pinned->resource->mem_type, mem_type);
+ KUNIT_EXPECT_EQ(test, bo_evictable->resource->mem_type, mem_type_evict);
+ KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2 + BO_SIZE);
+
+ ttm_bo_put(bo);
+ ttm_bo_put(bo_evictable);
+
+ ttm_bo_reserve(bo_pinned, false, false, NULL);
+ ttm_bo_unpin(bo_pinned);
+ dma_resv_unlock(bo_pinned->base.resv);
+ ttm_bo_put(bo_pinned);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
+}
+
+static void ttm_bo_validate_deleted_evict(struct kunit *test)
+{
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ u32 small = SZ_8K, big = MANAGER_SIZE - BO_SIZE;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_buffer_object *bo_big, *bo_small;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_resource_manager *man;
+ u32 mem_type = TTM_PL_VRAM;
+ struct ttm_placement *placement;
+ struct ttm_place *place;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ man = ttm_manager_type(priv->ttm_dev, mem_type);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_big = kunit_kzalloc(test, sizeof(*bo_big), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_big);
+
+ drm_gem_private_object_init(priv->drm, &bo_big->base, big);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_big, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, ttm_resource_manager_usage(man), big);
+
+ dma_resv_unlock(bo_big->base.resv);
+ bo_big->deleted = true;
+
+ bo_small = ttm_bo_kunit_init(test, test->priv, small, NULL);
+ bo_small->type = bo_type;
+
+ ttm_bo_reserve(bo_small, false, false, NULL);
+ err = ttm_bo_validate(bo_small, placement, &ctx_val);
+ ttm_bo_unreserve(bo_small);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, bo_small->resource->mem_type, mem_type);
+ KUNIT_EXPECT_EQ(test, ttm_resource_manager_usage(man), small);
+ KUNIT_EXPECT_NULL(test, bo_big->ttm);
+ KUNIT_EXPECT_NULL(test, bo_big->resource);
+
+ ttm_bo_put(bo_small);
+ ttm_bo_put(bo_big);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+}
+
+static void ttm_bo_validate_busy_domain_evict(struct kunit *test)
+{
+ u32 mem_type = TTM_PL_VRAM, mem_type_evict = TTM_PL_MOCK1;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo_init, *bo_val;
+ struct ttm_placement *placement;
+ struct ttm_place *place;
+ int err;
+
+ /*
+ * Drop the default device and setup a new one that points to busy
+ * thus unsuitable eviction domain
+ */
+ ttm_device_fini(priv->ttm_dev);
+
+ err = ttm_device_kunit_init_bad_evict(test->priv, priv->ttm_dev, false, false);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ ttm_busy_manager_init(priv->ttm_dev, mem_type_evict, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_init = kunit_kzalloc(test, sizeof(*bo_init), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_init);
+
+ drm_gem_private_object_init(priv->drm, &bo_init->base, MANAGER_SIZE);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_init, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_init->base.resv);
+
+ bo_val = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo_val->type = bo_type;
+
+ ttm_bo_reserve(bo_val, false, false, NULL);
+ err = ttm_bo_validate(bo_val, placement, &ctx_val);
+ ttm_bo_unreserve(bo_val);
+
+ KUNIT_EXPECT_EQ(test, err, -ENOMEM);
+ KUNIT_EXPECT_EQ(test, bo_init->resource->mem_type, mem_type);
+ KUNIT_EXPECT_NULL(test, bo_val->resource);
+
+ ttm_bo_put(bo_init);
+ ttm_bo_put(bo_val);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+ ttm_bad_manager_fini(priv->ttm_dev, mem_type_evict);
+}
+
+static void ttm_bo_validate_evict_gutting(struct kunit *test)
+{
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_buffer_object *bo, *bo_evict;
+ u32 mem_type = TTM_PL_MOCK1;
+ struct ttm_placement *placement;
+ struct ttm_place *place;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+
+ place = ttm_place_kunit_init(test, mem_type, 0);
+ placement = ttm_placement_kunit_init(test, place, 1);
+
+ bo_evict = kunit_kzalloc(test, sizeof(*bo_evict), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_evict);
+
+ drm_gem_private_object_init(priv->drm, &bo_evict->base, MANAGER_SIZE);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_evict, bo_type, placement,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_evict->base.resv);
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo->type = bo_type;
+
+ ttm_bo_reserve(bo, false, false, NULL);
+ err = ttm_bo_validate(bo, placement, &ctx_val);
+ ttm_bo_unreserve(bo);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+ KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
+ KUNIT_ASSERT_NULL(test, bo_evict->resource);
+ KUNIT_ASSERT_TRUE(test, bo_evict->ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC);
+
+ ttm_bo_put(bo_evict);
+ ttm_bo_put(bo);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+}
+
+static void ttm_bo_validate_recrusive_evict(struct kunit *test)
+{
+ u32 mem_type = TTM_PL_TT, mem_type_evict = TTM_PL_MOCK2;
+ struct ttm_operation_ctx ctx_init = { }, ctx_val = { };
+ struct ttm_placement *placement_tt, *placement_mock;
+ struct ttm_buffer_object *bo_tt, *bo_mock, *bo_val;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct ttm_test_devices *priv = test->priv;
+ struct ttm_place *place_tt, *place_mock;
+ int err;
+
+ ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
+ ttm_mock_manager_init(priv->ttm_dev, mem_type_evict, MANAGER_SIZE);
+
+ place_tt = ttm_place_kunit_init(test, mem_type, 0);
+ place_mock = ttm_place_kunit_init(test, mem_type_evict, 0);
+
+ placement_tt = ttm_placement_kunit_init(test, place_tt, 1);
+ placement_mock = ttm_placement_kunit_init(test, place_mock, 1);
+
+ bo_tt = kunit_kzalloc(test, sizeof(*bo_tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_tt);
+
+ bo_mock = kunit_kzalloc(test, sizeof(*bo_mock), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, bo_mock);
+
+ drm_gem_private_object_init(priv->drm, &bo_tt->base, MANAGER_SIZE);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_tt, bo_type, placement_tt,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_tt->base.resv);
+
+ drm_gem_private_object_init(priv->drm, &bo_mock->base, MANAGER_SIZE);
+ err = ttm_bo_init_reserved(priv->ttm_dev, bo_mock, bo_type, placement_mock,
+ PAGE_SIZE, &ctx_init, NULL, NULL,
+ &dummy_ttm_bo_destroy);
+ KUNIT_EXPECT_EQ(test, err, 0);
+ dma_resv_unlock(bo_mock->base.resv);
+
+ bo_val = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+ bo_val->type = bo_type;
+
+ ttm_bo_reserve(bo_val, false, false, NULL);
+ err = ttm_bo_validate(bo_val, placement_tt, &ctx_val);
+ ttm_bo_unreserve(bo_val);
+
+ KUNIT_EXPECT_EQ(test, err, 0);
+
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type);
+ ttm_mock_manager_fini(priv->ttm_dev, mem_type_evict);
+
+ ttm_bo_put(bo_val);
+ ttm_bo_put(bo_tt);
+ ttm_bo_put(bo_mock);
+}
+
+static struct kunit_case ttm_bo_validate_test_cases[] = {
+ KUNIT_CASE_PARAM(ttm_bo_init_reserved_sys_man, ttm_bo_types_gen_params),
+ KUNIT_CASE_PARAM(ttm_bo_init_reserved_mock_man, ttm_bo_types_gen_params),
+ KUNIT_CASE(ttm_bo_init_reserved_resv),
+ KUNIT_CASE_PARAM(ttm_bo_validate_basic, ttm_bo_types_gen_params),
+ KUNIT_CASE(ttm_bo_validate_invalid_placement),
+ KUNIT_CASE_PARAM(ttm_bo_validate_same_placement,
+ ttm_bo_validate_mem_gen_params),
+ KUNIT_CASE(ttm_bo_validate_failed_alloc),
+ KUNIT_CASE(ttm_bo_validate_pinned),
+ KUNIT_CASE(ttm_bo_validate_busy_placement),
+ KUNIT_CASE_PARAM(ttm_bo_validate_multihop, ttm_bo_types_gen_params),
+ KUNIT_CASE_PARAM(ttm_bo_validate_no_placement_signaled,
+ ttm_bo_no_placement_gen_params),
+ KUNIT_CASE_PARAM(ttm_bo_validate_no_placement_not_signaled,
+ ttm_bo_types_gen_params),
+ KUNIT_CASE(ttm_bo_validate_move_fence_signaled),
+ KUNIT_CASE_PARAM(ttm_bo_validate_move_fence_not_signaled,
+ ttm_bo_validate_wait_gen_params),
+ KUNIT_CASE(ttm_bo_validate_swapout),
+ KUNIT_CASE(ttm_bo_validate_happy_evict),
+ KUNIT_CASE(ttm_bo_validate_all_pinned_evict),
+ KUNIT_CASE(ttm_bo_validate_allowed_only_evict),
+ KUNIT_CASE(ttm_bo_validate_deleted_evict),
+ KUNIT_CASE(ttm_bo_validate_busy_domain_evict),
+ KUNIT_CASE(ttm_bo_validate_evict_gutting),
+ KUNIT_CASE(ttm_bo_validate_recrusive_evict),
+ {}
+};
+
+static struct kunit_suite ttm_bo_validate_test_suite = {
+ .name = "ttm_bo_validate",
+ .init = ttm_test_devices_all_init,
+ .exit = ttm_test_devices_fini,
+ .test_cases = ttm_bo_validate_test_cases,
+};
+
+kunit_test_suites(&ttm_bo_validate_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for ttm_bo APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_device_test.c b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
index 19eaff22e6ae..1621903818e5 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_device_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
@@ -209,4 +209,5 @@ static struct kunit_suite ttm_device_test_suite = {
kunit_test_suites(&ttm_device_test_suite);
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit tests for ttm_device APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
index 7b7c1fa805fc..b91c13f46225 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
@@ -6,8 +6,43 @@
#include "ttm_kunit_helpers.h"
-static struct ttm_tt *ttm_tt_simple_create(struct ttm_buffer_object *bo,
- uint32_t page_flags)
+static const struct ttm_place sys_place = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = TTM_PL_FLAG_FALLBACK,
+};
+
+static const struct ttm_place mock1_place = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = TTM_PL_MOCK1,
+ .flags = TTM_PL_FLAG_FALLBACK,
+};
+
+static const struct ttm_place mock2_place = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .mem_type = TTM_PL_MOCK2,
+ .flags = TTM_PL_FLAG_FALLBACK,
+};
+
+static struct ttm_placement sys_placement = {
+ .num_placement = 1,
+ .placement = &sys_place,
+};
+
+static struct ttm_placement bad_placement = {
+ .num_placement = 1,
+ .placement = &mock1_place,
+};
+
+static struct ttm_placement mock_placement = {
+ .num_placement = 1,
+ .placement = &mock2_place,
+};
+
+static struct ttm_tt *ttm_tt_simple_create(struct ttm_buffer_object *bo, u32 page_flags)
{
struct ttm_tt *tt;
@@ -22,13 +57,84 @@ static void ttm_tt_simple_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
kfree(ttm);
}
-static void dummy_ttm_bo_destroy(struct ttm_buffer_object *bo)
+static int mock_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem,
+ struct ttm_place *hop)
+{
+ struct ttm_resource *old_mem = bo->resource;
+
+ if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm)) {
+ ttm_bo_move_null(bo, new_mem);
+ return 0;
+ }
+
+ if (bo->resource->mem_type == TTM_PL_VRAM &&
+ new_mem->mem_type == TTM_PL_SYSTEM) {
+ hop->mem_type = TTM_PL_TT;
+ hop->flags = TTM_PL_FLAG_TEMPORARY;
+ hop->fpfn = 0;
+ hop->lpfn = 0;
+ return -EMULTIHOP;
+ }
+
+ if ((old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_TT) ||
+ (old_mem->mem_type == TTM_PL_TT &&
+ new_mem->mem_type == TTM_PL_SYSTEM)) {
+ ttm_bo_move_null(bo, new_mem);
+ return 0;
+ }
+
+ return ttm_bo_move_memcpy(bo, ctx, new_mem);
+}
+
+static void mock_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
{
+ switch (bo->resource->mem_type) {
+ case TTM_PL_VRAM:
+ case TTM_PL_SYSTEM:
+ *placement = sys_placement;
+ break;
+ case TTM_PL_TT:
+ *placement = mock_placement;
+ break;
+ case TTM_PL_MOCK1:
+ /* Purge objects coming from this domain */
+ break;
+ }
+}
+
+static void bad_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ *placement = bad_placement;
+}
+
+static int ttm_device_kunit_init_with_funcs(struct ttm_test_devices *priv,
+ struct ttm_device *ttm,
+ bool use_dma_alloc,
+ bool use_dma32,
+ struct ttm_device_funcs *funcs)
+{
+ struct drm_device *drm = priv->drm;
+ int err;
+
+ err = ttm_device_init(ttm, funcs, drm->dev,
+ drm->anon_inode->i_mapping,
+ drm->vma_offset_manager,
+ use_dma_alloc, use_dma32);
+
+ return err;
}
struct ttm_device_funcs ttm_dev_funcs = {
.ttm_tt_create = ttm_tt_simple_create,
.ttm_tt_destroy = ttm_tt_simple_destroy,
+ .move = mock_move,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = mock_evict_flags,
};
EXPORT_SYMBOL_GPL(ttm_dev_funcs);
@@ -37,21 +143,34 @@ int ttm_device_kunit_init(struct ttm_test_devices *priv,
bool use_dma_alloc,
bool use_dma32)
{
- struct drm_device *drm = priv->drm;
- int err;
+ return ttm_device_kunit_init_with_funcs(priv, ttm, use_dma_alloc,
+ use_dma32, &ttm_dev_funcs);
+}
+EXPORT_SYMBOL_GPL(ttm_device_kunit_init);
- err = ttm_device_init(ttm, &ttm_dev_funcs, drm->dev,
- drm->anon_inode->i_mapping,
- drm->vma_offset_manager,
- use_dma_alloc, use_dma32);
+struct ttm_device_funcs ttm_dev_funcs_bad_evict = {
+ .ttm_tt_create = ttm_tt_simple_create,
+ .ttm_tt_destroy = ttm_tt_simple_destroy,
+ .move = mock_move,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = bad_evict_flags,
+};
+EXPORT_SYMBOL_GPL(ttm_dev_funcs_bad_evict);
- return err;
+int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv,
+ struct ttm_device *ttm,
+ bool use_dma_alloc,
+ bool use_dma32)
+{
+ return ttm_device_kunit_init_with_funcs(priv, ttm, use_dma_alloc,
+ use_dma32, &ttm_dev_funcs_bad_evict);
}
-EXPORT_SYMBOL_GPL(ttm_device_kunit_init);
+EXPORT_SYMBOL_GPL(ttm_device_kunit_init_bad_evict);
struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
struct ttm_test_devices *devs,
- size_t size)
+ size_t size,
+ struct dma_resv *obj)
{
struct drm_gem_object gem_obj = { };
struct ttm_buffer_object *bo;
@@ -61,6 +180,10 @@ struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
KUNIT_ASSERT_NOT_NULL(test, bo);
bo->base = gem_obj;
+
+ if (obj)
+ bo->base.resv = obj;
+
err = drm_gem_object_init(devs->drm, &bo->base, size);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -73,8 +196,7 @@ struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
}
EXPORT_SYMBOL_GPL(ttm_bo_kunit_init);
-struct ttm_place *ttm_place_kunit_init(struct kunit *test,
- uint32_t mem_type, uint32_t flags)
+struct ttm_place *ttm_place_kunit_init(struct kunit *test, u32 mem_type, u32 flags)
{
struct ttm_place *place;
@@ -88,6 +210,12 @@ struct ttm_place *ttm_place_kunit_init(struct kunit *test,
}
EXPORT_SYMBOL_GPL(ttm_place_kunit_init);
+void dummy_ttm_bo_destroy(struct ttm_buffer_object *bo)
+{
+ drm_gem_object_release(&bo->base);
+}
+EXPORT_SYMBOL_GPL(dummy_ttm_bo_destroy);
+
struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
{
struct ttm_test_devices *devs;
@@ -98,6 +226,9 @@ struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
devs->dev = drm_kunit_helper_alloc_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->dev);
+ /* Set mask for alloc_coherent mappings to enable ttm_pool_alloc testing */
+ devs->dev->coherent_dma_mask = -1;
+
devs->drm = __drm_kunit_helper_alloc_drm_device(test, devs->dev,
sizeof(*devs->drm), 0,
DRIVER_GEM);
@@ -150,10 +281,25 @@ int ttm_test_devices_init(struct kunit *test)
}
EXPORT_SYMBOL_GPL(ttm_test_devices_init);
+int ttm_test_devices_all_init(struct kunit *test)
+{
+ struct ttm_test_devices *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv = ttm_test_devices_all(test);
+ test->priv = priv;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_test_devices_all_init);
+
void ttm_test_devices_fini(struct kunit *test)
{
ttm_test_devices_put(test, test->priv);
}
EXPORT_SYMBOL_GPL(ttm_test_devices_fini);
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TTM KUnit test helper functions");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
index 2f51c833a536..c7da23232ffa 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
@@ -13,7 +13,11 @@
#include <drm/drm_kunit_helpers.h>
#include <kunit/test.h>
+#define TTM_PL_MOCK1 (TTM_PL_PRIV + 1)
+#define TTM_PL_MOCK2 (TTM_PL_PRIV + 2)
+
extern struct ttm_device_funcs ttm_dev_funcs;
+extern struct ttm_device_funcs ttm_dev_funcs_bad_evict;
struct ttm_test_devices {
struct drm_device *drm;
@@ -26,11 +30,17 @@ int ttm_device_kunit_init(struct ttm_test_devices *priv,
struct ttm_device *ttm,
bool use_dma_alloc,
bool use_dma32);
+int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv,
+ struct ttm_device *ttm,
+ bool use_dma_alloc,
+ bool use_dma32);
struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
struct ttm_test_devices *devs,
- size_t size);
-struct ttm_place *ttm_place_kunit_init(struct kunit *test,
- uint32_t mem_type, uint32_t flags);
+ size_t size,
+ struct dma_resv *obj);
+struct ttm_place *ttm_place_kunit_init(struct kunit *test, u32 mem_type,
+ u32 flags);
+void dummy_ttm_bo_destroy(struct ttm_buffer_object *bo);
struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test);
struct ttm_test_devices *ttm_test_devices_all(struct kunit *test);
@@ -39,6 +49,7 @@ void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs);
/* Generic init/fini for tests that only need DRM/TTM devices */
int ttm_test_devices_init(struct kunit *test);
+int ttm_test_devices_all_init(struct kunit *test);
void ttm_test_devices_fini(struct kunit *test);
#endif // TTM_KUNIT_HELPERS_H
diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
new file mode 100644
index 000000000000..f6d1c8a2845d
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "ttm_mock_manager.h"
+
+static inline struct ttm_mock_manager *
+to_mock_mgr(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct ttm_mock_manager, man);
+}
+
+static inline struct ttm_mock_resource *
+to_mock_mgr_resource(struct ttm_resource *res)
+{
+ return container_of(res, struct ttm_mock_resource, base);
+}
+
+static int ttm_mock_manager_alloc(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ struct ttm_mock_manager *manager = to_mock_mgr(man);
+ struct ttm_mock_resource *mock_res;
+ struct drm_buddy *mm = &manager->mm;
+ u64 lpfn, fpfn, alloc_size;
+ int err;
+
+ mock_res = kzalloc(sizeof(*mock_res), GFP_KERNEL);
+
+ if (!mock_res)
+ return -ENOMEM;
+
+ fpfn = 0;
+ lpfn = man->size;
+
+ ttm_resource_init(bo, place, &mock_res->base);
+ INIT_LIST_HEAD(&mock_res->blocks);
+
+ if (place->flags & TTM_PL_FLAG_TOPDOWN)
+ mock_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
+
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
+ mock_res->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
+
+ alloc_size = (uint64_t)mock_res->base.size;
+ mutex_lock(&manager->lock);
+ err = drm_buddy_alloc_blocks(mm, fpfn, lpfn, alloc_size,
+ manager->default_page_size,
+ &mock_res->blocks,
+ mock_res->flags);
+
+ if (err)
+ goto error_free_blocks;
+ mutex_unlock(&manager->lock);
+
+ *res = &mock_res->base;
+ return 0;
+
+error_free_blocks:
+ drm_buddy_free_list(mm, &mock_res->blocks, 0);
+ ttm_resource_fini(man, &mock_res->base);
+ mutex_unlock(&manager->lock);
+
+ return err;
+}
+
+static void ttm_mock_manager_free(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+ struct ttm_mock_manager *manager = to_mock_mgr(man);
+ struct ttm_mock_resource *mock_res = to_mock_mgr_resource(res);
+ struct drm_buddy *mm = &manager->mm;
+
+ mutex_lock(&manager->lock);
+ drm_buddy_free_list(mm, &mock_res->blocks, 0);
+ mutex_unlock(&manager->lock);
+
+ ttm_resource_fini(man, res);
+ kfree(mock_res);
+}
+
+static const struct ttm_resource_manager_func ttm_mock_manager_funcs = {
+ .alloc = ttm_mock_manager_alloc,
+ .free = ttm_mock_manager_free,
+};
+
+int ttm_mock_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size)
+{
+ struct ttm_mock_manager *manager;
+ struct ttm_resource_manager *base;
+ int err;
+
+ manager = kzalloc(sizeof(*manager), GFP_KERNEL);
+ if (!manager)
+ return -ENOMEM;
+
+ mutex_init(&manager->lock);
+
+ err = drm_buddy_init(&manager->mm, size, PAGE_SIZE);
+
+ if (err) {
+ kfree(manager);
+ return err;
+ }
+
+ manager->default_page_size = PAGE_SIZE;
+ base = &manager->man;
+ base->func = &ttm_mock_manager_funcs;
+ base->use_tt = true;
+
+ ttm_resource_manager_init(base, bdev, size);
+ ttm_set_driver_manager(bdev, mem_type, base);
+ ttm_resource_manager_set_used(base, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_mock_manager_init);
+
+void ttm_mock_manager_fini(struct ttm_device *bdev, u32 mem_type)
+{
+ struct ttm_resource_manager *man;
+ struct ttm_mock_manager *mock_man;
+ int err;
+
+ man = ttm_manager_type(bdev, mem_type);
+ mock_man = to_mock_mgr(man);
+
+ err = ttm_resource_manager_evict_all(bdev, man);
+ if (err)
+ return;
+
+ ttm_resource_manager_set_used(man, false);
+
+ mutex_lock(&mock_man->lock);
+ drm_buddy_fini(&mock_man->mm);
+ mutex_unlock(&mock_man->lock);
+
+ ttm_set_driver_manager(bdev, mem_type, NULL);
+}
+EXPORT_SYMBOL_GPL(ttm_mock_manager_fini);
+
+static int ttm_bad_manager_alloc(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ return -ENOSPC;
+}
+
+static int ttm_busy_manager_alloc(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ return -EBUSY;
+}
+
+static void ttm_bad_manager_free(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+}
+
+static bool ttm_bad_manager_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ return true;
+}
+
+static const struct ttm_resource_manager_func ttm_bad_manager_funcs = {
+ .alloc = ttm_bad_manager_alloc,
+ .free = ttm_bad_manager_free,
+ .compatible = ttm_bad_manager_compatible
+};
+
+static const struct ttm_resource_manager_func ttm_bad_busy_manager_funcs = {
+ .alloc = ttm_busy_manager_alloc,
+ .free = ttm_bad_manager_free,
+ .compatible = ttm_bad_manager_compatible
+};
+
+int ttm_bad_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size)
+{
+ struct ttm_resource_manager *man;
+
+ man = kzalloc(sizeof(*man), GFP_KERNEL);
+ if (!man)
+ return -ENOMEM;
+
+ man->func = &ttm_bad_manager_funcs;
+
+ ttm_resource_manager_init(man, bdev, size);
+ ttm_set_driver_manager(bdev, mem_type, man);
+ ttm_resource_manager_set_used(man, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_bad_manager_init);
+
+int ttm_busy_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size)
+{
+ struct ttm_resource_manager *man;
+
+ ttm_bad_manager_init(bdev, mem_type, size);
+ man = ttm_manager_type(bdev, mem_type);
+
+ man->func = &ttm_bad_busy_manager_funcs;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_busy_manager_init);
+
+void ttm_bad_manager_fini(struct ttm_device *bdev, uint32_t mem_type)
+{
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(bdev, mem_type);
+
+ ttm_resource_manager_set_used(man, false);
+ ttm_set_driver_manager(bdev, mem_type, NULL);
+
+ kfree(man);
+}
+EXPORT_SYMBOL_GPL(ttm_bad_manager_fini);
+
+MODULE_DESCRIPTION("KUnit tests for ttm with mock resource managers");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h
new file mode 100644
index 000000000000..e4c95f86a467
--- /dev/null
+++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+#ifndef TTM_MOCK_MANAGER_H
+#define TTM_MOCK_MANAGER_H
+
+#include <drm/drm_buddy.h>
+
+struct ttm_mock_manager {
+ struct ttm_resource_manager man;
+ struct drm_buddy mm;
+ u64 default_page_size;
+ /* protects allocations of mock buffer objects */
+ struct mutex lock;
+};
+
+struct ttm_mock_resource {
+ struct ttm_resource base;
+ struct list_head blocks;
+ unsigned long flags;
+};
+
+int ttm_mock_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size);
+int ttm_bad_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size);
+int ttm_busy_manager_init(struct ttm_device *bdev, u32 mem_type, u32 size);
+void ttm_mock_manager_fini(struct ttm_device *bdev, u32 mem_type);
+void ttm_bad_manager_fini(struct ttm_device *bdev, u32 mem_type);
+
+#endif // TTM_MOCK_MANAGER_H
diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
index 0a3fede84da9..8ade53371f72 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
@@ -48,7 +48,7 @@ static void ttm_pool_test_fini(struct kunit *test)
}
static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
- uint32_t page_flags,
+ u32 page_flags,
enum ttm_caching caching,
size_t size)
{
@@ -57,7 +57,7 @@ static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
struct ttm_tt *tt;
int err;
- bo = ttm_bo_kunit_init(test, priv->devs, size);
+ bo = ttm_bo_kunit_init(test, priv->devs, size, NULL);
KUNIT_ASSERT_NOT_NULL(test, bo);
priv->mock_bo = bo;
@@ -209,7 +209,7 @@ static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, tt);
- bo = ttm_bo_kunit_init(test, devs, size);
+ bo = ttm_bo_kunit_init(test, devs, size, NULL);
KUNIT_ASSERT_NOT_NULL(test, bo);
err = ttm_sg_tt_init(tt, bo, 0, caching);
@@ -433,4 +433,5 @@ static struct kunit_suite ttm_pool_test_suite = {
kunit_test_suites(&ttm_pool_test_suite);
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit tests for ttm_pool APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
index 029e1f094bb0..22260e7aea58 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
@@ -11,8 +11,8 @@
struct ttm_resource_test_case {
const char *description;
- uint32_t mem_type;
- uint32_t flags;
+ u32 mem_type;
+ u32 flags;
};
struct ttm_resource_test_priv {
@@ -47,20 +47,20 @@ static void ttm_resource_test_fini(struct kunit *test)
static void ttm_init_test_mocks(struct kunit *test,
struct ttm_resource_test_priv *priv,
- uint32_t mem_type, uint32_t flags)
+ u32 mem_type, u32 flags)
{
size_t size = RES_SIZE;
/* Make sure we have what we need for a good BO mock */
KUNIT_ASSERT_NOT_NULL(test, priv->devs->ttm_dev);
- priv->bo = ttm_bo_kunit_init(test, priv->devs, size);
+ priv->bo = ttm_bo_kunit_init(test, priv->devs, size, NULL);
priv->place = ttm_place_kunit_init(test, mem_type, flags);
}
static void ttm_init_test_manager(struct kunit *test,
struct ttm_resource_test_priv *priv,
- uint32_t mem_type)
+ u32 mem_type)
{
struct ttm_device *ttm_dev = priv->devs->ttm_dev;
struct ttm_resource_manager *man;
@@ -112,7 +112,7 @@ static void ttm_resource_init_basic(struct kunit *test)
struct ttm_buffer_object *bo;
struct ttm_place *place;
struct ttm_resource_manager *man;
- uint64_t expected_usage;
+ u64 expected_usage;
ttm_init_test_mocks(test, priv, params->mem_type, params->flags);
bo = priv->bo;
@@ -198,7 +198,7 @@ static void ttm_resource_fini_basic(struct kunit *test)
ttm_resource_init(bo, place, res);
ttm_resource_fini(man, res);
- KUNIT_ASSERT_TRUE(test, list_empty(&res->lru));
+ KUNIT_ASSERT_TRUE(test, list_empty(&res->lru.link));
KUNIT_ASSERT_EQ(test, man->usage, 0);
}
@@ -230,7 +230,7 @@ static void ttm_resource_manager_usage_basic(struct kunit *test)
struct ttm_buffer_object *bo;
struct ttm_place *place;
struct ttm_resource_manager *man;
- uint64_t actual_usage;
+ u64 actual_usage;
ttm_init_test_mocks(test, priv, TTM_PL_SYSTEM, TTM_PL_FLAG_TOPDOWN);
bo = priv->bo;
@@ -268,7 +268,7 @@ static void ttm_sys_man_alloc_basic(struct kunit *test)
struct ttm_buffer_object *bo;
struct ttm_place *place;
struct ttm_resource *res;
- uint32_t mem_type = TTM_PL_SYSTEM;
+ u32 mem_type = TTM_PL_SYSTEM;
int ret;
ttm_init_test_mocks(test, priv, mem_type, 0);
@@ -293,7 +293,7 @@ static void ttm_sys_man_free_basic(struct kunit *test)
struct ttm_buffer_object *bo;
struct ttm_place *place;
struct ttm_resource *res;
- uint32_t mem_type = TTM_PL_SYSTEM;
+ u32 mem_type = TTM_PL_SYSTEM;
ttm_init_test_mocks(test, priv, mem_type, 0);
bo = priv->bo;
@@ -332,4 +332,5 @@ static struct kunit_suite ttm_resource_test_suite = {
kunit_test_suites(&ttm_resource_test_suite);
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit tests for ttm_resource and ttm_sys_man APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/tests/ttm_tt_test.c b/drivers/gpu/drm/ttm/tests/ttm_tt_test.c
index fd4502c18de6..61ec6f580b62 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_tt_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_tt_test.c
@@ -11,23 +11,10 @@
struct ttm_tt_test_case {
const char *description;
- uint32_t size;
- uint32_t extra_pages_num;
+ u32 size;
+ u32 extra_pages_num;
};
-static int ttm_tt_test_init(struct kunit *test)
-{
- struct ttm_test_devices *priv;
-
- priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, priv);
-
- priv = ttm_test_devices_all(test);
- test->priv = priv;
-
- return 0;
-}
-
static const struct ttm_tt_test_case ttm_tt_init_basic_cases[] = {
{
.description = "Page-aligned size",
@@ -54,16 +41,16 @@ static void ttm_tt_init_basic(struct kunit *test)
const struct ttm_tt_test_case *params = test->param_value;
struct ttm_buffer_object *bo;
struct ttm_tt *tt;
- uint32_t page_flags = TTM_TT_FLAG_ZERO_ALLOC;
+ u32 page_flags = TTM_TT_FLAG_ZERO_ALLOC;
enum ttm_caching caching = ttm_cached;
- uint32_t extra_pages = params->extra_pages_num;
+ u32 extra_pages = params->extra_pages_num;
int num_pages = params->size >> PAGE_SHIFT;
int err;
tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, tt);
- bo = ttm_bo_kunit_init(test, test->priv, params->size);
+ bo = ttm_bo_kunit_init(test, test->priv, params->size, NULL);
err = ttm_tt_init(tt, bo, page_flags, caching, extra_pages);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -82,14 +69,14 @@ static void ttm_tt_init_misaligned(struct kunit *test)
struct ttm_buffer_object *bo;
struct ttm_tt *tt;
enum ttm_caching caching = ttm_cached;
- uint32_t size = SZ_8K;
+ u32 size = SZ_8K;
int num_pages = (size + SZ_4K) >> PAGE_SHIFT;
int err;
tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, tt);
- bo = ttm_bo_kunit_init(test, test->priv, size);
+ bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
/* Make the object size misaligned */
bo->base.size += 1;
@@ -110,7 +97,7 @@ static void ttm_tt_fini_basic(struct kunit *test)
tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, tt);
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
err = ttm_tt_init(tt, bo, 0, caching, 0);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -130,7 +117,7 @@ static void ttm_tt_fini_sg(struct kunit *test)
tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, tt);
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
err = ttm_sg_tt_init(tt, bo, 0, caching);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -151,7 +138,7 @@ static void ttm_tt_fini_shmem(struct kunit *test)
tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, tt);
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
err = ttm_tt_init(tt, bo, 0, caching, 0);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -168,7 +155,7 @@ static void ttm_tt_create_basic(struct kunit *test)
struct ttm_buffer_object *bo;
int err;
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
bo->type = ttm_bo_type_device;
dma_resv_lock(bo->base.resv, NULL);
@@ -187,7 +174,7 @@ static void ttm_tt_create_invalid_bo_type(struct kunit *test)
struct ttm_buffer_object *bo;
int err;
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
bo->type = ttm_bo_type_sg + 1;
dma_resv_lock(bo->base.resv, NULL);
@@ -208,7 +195,7 @@ static void ttm_tt_create_ttm_exists(struct kunit *test)
tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, tt);
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
err = ttm_tt_init(tt, bo, 0, caching, 0);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -224,7 +211,7 @@ static void ttm_tt_create_ttm_exists(struct kunit *test)
}
static struct ttm_tt *ttm_tt_null_create(struct ttm_buffer_object *bo,
- uint32_t page_flags)
+ u32 page_flags)
{
return NULL;
}
@@ -239,7 +226,7 @@ static void ttm_tt_create_failed(struct kunit *test)
struct ttm_buffer_object *bo;
int err;
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
/* Update ttm_device_funcs so we don't alloc ttm_tt */
devs->ttm_dev->funcs = &ttm_dev_empty_funcs;
@@ -257,7 +244,7 @@ static void ttm_tt_destroy_basic(struct kunit *test)
struct ttm_buffer_object *bo;
int err;
- bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE);
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
dma_resv_lock(bo->base.resv, NULL);
err = ttm_tt_create(bo, false);
@@ -269,6 +256,120 @@ static void ttm_tt_destroy_basic(struct kunit *test)
ttm_tt_destroy(devs->ttm_dev, bo->ttm);
}
+static void ttm_tt_populate_null_ttm(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_operation_ctx ctx = { };
+ int err;
+
+ err = ttm_tt_populate(devs->ttm_dev, NULL, &ctx);
+ KUNIT_ASSERT_EQ(test, err, -EINVAL);
+}
+
+static void ttm_tt_populate_populated_ttm(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ struct page *populated_page;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ populated_page = *tt->pages;
+
+ err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
+ KUNIT_ASSERT_PTR_EQ(test, populated_page, *tt->pages);
+}
+
+static void ttm_tt_unpopulate_basic(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_TRUE(test, ttm_tt_is_populated(tt));
+
+ ttm_tt_unpopulate(devs->ttm_dev, tt);
+ KUNIT_ASSERT_FALSE(test, ttm_tt_is_populated(tt));
+}
+
+static void ttm_tt_unpopulate_empty_ttm(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ int err;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ ttm_tt_unpopulate(devs->ttm_dev, tt);
+ /* Expect graceful handling of unpopulated TTs */
+}
+
+static void ttm_tt_swapin_basic(struct kunit *test)
+{
+ const struct ttm_test_devices *devs = test->priv;
+ int expected_num_pages = BO_SIZE >> PAGE_SHIFT;
+ struct ttm_operation_ctx ctx = { };
+ struct ttm_buffer_object *bo;
+ struct ttm_tt *tt;
+ int err, num_pages;
+
+ bo = ttm_bo_kunit_init(test, test->priv, BO_SIZE, NULL);
+
+ tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, tt);
+
+ err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_tt_populate(devs->ttm_dev, tt, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_TRUE(test, ttm_tt_is_populated(tt));
+
+ num_pages = ttm_tt_swapout(devs->ttm_dev, tt, GFP_KERNEL);
+ KUNIT_ASSERT_EQ(test, num_pages, expected_num_pages);
+ KUNIT_ASSERT_NOT_NULL(test, tt->swap_storage);
+ KUNIT_ASSERT_TRUE(test, tt->page_flags & TTM_TT_FLAG_SWAPPED);
+
+ /* Swapout depopulates TT, allocate pages and then swap them in */
+ err = ttm_pool_alloc(&devs->ttm_dev->pool, tt, &ctx);
+ KUNIT_ASSERT_EQ(test, err, 0);
+
+ err = ttm_tt_swapin(tt);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ KUNIT_ASSERT_NULL(test, tt->swap_storage);
+ KUNIT_ASSERT_FALSE(test, tt->page_flags & TTM_TT_FLAG_SWAPPED);
+}
+
static struct kunit_case ttm_tt_test_cases[] = {
KUNIT_CASE_PARAM(ttm_tt_init_basic, ttm_tt_init_basic_gen_params),
KUNIT_CASE(ttm_tt_init_misaligned),
@@ -280,16 +381,22 @@ static struct kunit_case ttm_tt_test_cases[] = {
KUNIT_CASE(ttm_tt_create_ttm_exists),
KUNIT_CASE(ttm_tt_create_failed),
KUNIT_CASE(ttm_tt_destroy_basic),
+ KUNIT_CASE(ttm_tt_populate_null_ttm),
+ KUNIT_CASE(ttm_tt_populate_populated_ttm),
+ KUNIT_CASE(ttm_tt_unpopulate_basic),
+ KUNIT_CASE(ttm_tt_unpopulate_empty_ttm),
+ KUNIT_CASE(ttm_tt_swapin_basic),
{}
};
static struct kunit_suite ttm_tt_test_suite = {
.name = "ttm_tt",
- .init = ttm_tt_test_init,
+ .init = ttm_test_devices_all_init,
.exit = ttm_test_devices_fini,
.test_cases = ttm_tt_test_cases,
};
kunit_test_suites(&ttm_tt_test_suite);
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit tests for ttm_tt APIs");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 6396dece0db1..320592435252 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -224,80 +224,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
dma_resv_iter_end(&cursor);
}
-/**
- * ttm_bo_cleanup_refs
- * If bo idle, remove from lru lists, and unref.
- * If not idle, block if possible.
- *
- * Must be called with lru_lock and reservation held, this function
- * will drop the lru lock and optionally the reservation lock before returning.
- *
- * @bo: The buffer object to clean-up
- * @interruptible: Any sleeps should occur interruptibly.
- * @no_wait_gpu: Never wait for gpu. Return -EBUSY instead.
- * @unlock_resv: Unlock the reservation lock as well.
- */
-
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
- bool unlock_resv)
-{
- struct dma_resv *resv = &bo->base._resv;
- int ret;
-
- if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
- ret = 0;
- else
- ret = -EBUSY;
-
- if (ret && !no_wait_gpu) {
- long lret;
-
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
- spin_unlock(&bo->bdev->lru_lock);
-
- lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
- interruptible,
- 30 * HZ);
-
- if (lret < 0)
- return lret;
- else if (lret == 0)
- return -EBUSY;
-
- spin_lock(&bo->bdev->lru_lock);
- if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
- /*
- * We raced, and lost, someone else holds the reservation now,
- * and is probably busy in ttm_bo_cleanup_memtype_use.
- *
- * Even if it's not the case, because we finished waiting any
- * delayed destruction would succeed, so just return success
- * here.
- */
- spin_unlock(&bo->bdev->lru_lock);
- return 0;
- }
- ret = 0;
- }
-
- if (ret) {
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
- spin_unlock(&bo->bdev->lru_lock);
- return ret;
- }
-
- spin_unlock(&bo->bdev->lru_lock);
- ttm_bo_cleanup_memtype_use(bo);
-
- if (unlock_resv)
- dma_resv_unlock(bo->base.resv);
-
- return 0;
-}
-
/*
* Block for the dma_resv object to become idle, lock the buffer and clean up
* the resource and tt object.
@@ -346,6 +272,7 @@ static void ttm_bo_release(struct kref *kref)
if (!dma_resv_test_signaled(bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP) ||
(want_init_on_free() && (bo->ttm != NULL)) ||
+ bo->type == ttm_bo_type_sg ||
!dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
@@ -505,150 +432,152 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_eviction_valuable);
-/*
- * Check the target bo is allowable to be evicted or swapout, including cases:
- *
- * a. if share same reservation object with ctx->resv, have assumption
- * reservation objects should already be locked, so not lock again and
- * return true directly when either the opreation allow_reserved_eviction
- * or the target bo already is in delayed free list;
+/**
+ * ttm_bo_evict_first() - Evict the first bo on the manager's LRU list.
+ * @bdev: The ttm device.
+ * @man: The manager whose bo to evict.
+ * @ctx: The TTM operation ctx governing the eviction.
*
- * b. Otherwise, trylock it.
+ * Return: 0 if successful or the resource disappeared. Negative error code on error.
*/
-static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- const struct ttm_place *place,
- bool *locked, bool *busy)
+int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx)
{
- bool ret = false;
+ struct ttm_resource_cursor cursor;
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *res;
+ unsigned int mem_type;
+ int ret = 0;
- if (bo->pin_count) {
- *locked = false;
- if (busy)
- *busy = false;
- return false;
+ spin_lock(&bdev->lru_lock);
+ res = ttm_resource_manager_first(man, &cursor);
+ ttm_resource_cursor_fini(&cursor);
+ if (!res) {
+ ret = -ENOENT;
+ goto out_no_ref;
}
+ bo = res->bo;
+ if (!ttm_bo_get_unless_zero(bo))
+ goto out_no_ref;
+ mem_type = res->mem_type;
+ spin_unlock(&bdev->lru_lock);
+ ret = ttm_bo_reserve(bo, ctx->interruptible, ctx->no_wait_gpu, NULL);
+ if (ret)
+ goto out_no_lock;
+ if (!bo->resource || bo->resource->mem_type != mem_type)
+ goto out_bo_moved;
- if (bo->base.resv == ctx->resv) {
- dma_resv_assert_held(bo->base.resv);
- if (ctx->allow_res_evict)
- ret = true;
- *locked = false;
- if (busy)
- *busy = false;
+ if (bo->deleted) {
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (!ret)
+ ttm_bo_cleanup_memtype_use(bo);
} else {
- ret = dma_resv_trylock(bo->base.resv);
- *locked = ret;
- if (busy)
- *busy = !ret;
- }
-
- if (ret && place && (bo->resource->mem_type != place->mem_type ||
- !bo->bdev->funcs->eviction_valuable(bo, place))) {
- ret = false;
- if (*locked) {
- dma_resv_unlock(bo->base.resv);
- *locked = false;
- }
+ ret = ttm_bo_evict(bo, ctx);
}
+out_bo_moved:
+ dma_resv_unlock(bo->base.resv);
+out_no_lock:
+ ttm_bo_put(bo);
+ return ret;
+out_no_ref:
+ spin_unlock(&bdev->lru_lock);
return ret;
}
/**
- * ttm_mem_evict_wait_busy - wait for a busy BO to become available
- *
- * @busy_bo: BO which couldn't be locked with trylock
- * @ctx: operation context
- * @ticket: acquire ticket
- *
- * Try to lock a busy buffer object to avoid failing eviction.
+ * struct ttm_bo_evict_walk - Parameters for the evict walk.
*/
-static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket)
+struct ttm_bo_evict_walk {
+ /** @walk: The walk base parameters. */
+ struct ttm_lru_walk walk;
+ /** @place: The place passed to the resource allocation. */
+ const struct ttm_place *place;
+ /** @evictor: The buffer object we're trying to make room for. */
+ struct ttm_buffer_object *evictor;
+ /** @res: The allocated resource if any. */
+ struct ttm_resource **res;
+ /** @evicted: Number of successful evictions. */
+ unsigned long evicted;
+};
+
+static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
{
- int r;
-
- if (!busy_bo || !ticket)
- return -EBUSY;
-
- if (ctx->interruptible)
- r = dma_resv_lock_interruptible(busy_bo->base.resv,
- ticket);
- else
- r = dma_resv_lock(busy_bo->base.resv, ticket);
-
- /*
- * TODO: It would be better to keep the BO locked until allocation is at
- * least tried one more time, but that would mean a much larger rework
- * of TTM.
- */
- if (!r)
- dma_resv_unlock(busy_bo->base.resv);
-
- return r == -EDEADLK ? -EBUSY : r;
-}
-
-int ttm_mem_evict_first(struct ttm_device *bdev,
- struct ttm_resource_manager *man,
- const struct ttm_place *place,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket)
-{
- struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
- struct ttm_resource_cursor cursor;
- struct ttm_resource *res;
- bool locked = false;
- int ret;
+ struct ttm_bo_evict_walk *evict_walk =
+ container_of(walk, typeof(*evict_walk), walk);
+ s64 lret;
- spin_lock(&bdev->lru_lock);
- ttm_resource_manager_for_each_res(man, &cursor, res) {
- bool busy;
-
- if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
- &locked, &busy)) {
- if (busy && !busy_bo && ticket !=
- dma_resv_locking_ctx(res->bo->base.resv))
- busy_bo = res->bo;
- continue;
- }
+ if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
+ return 0;
- if (ttm_bo_get_unless_zero(res->bo)) {
- bo = res->bo;
- break;
- }
- if (locked)
- dma_resv_unlock(res->bo->base.resv);
+ if (bo->deleted) {
+ lret = ttm_bo_wait_ctx(bo, walk->ctx);
+ if (!lret)
+ ttm_bo_cleanup_memtype_use(bo);
+ } else {
+ lret = ttm_bo_evict(bo, walk->ctx);
}
- if (!bo) {
- if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
- busy_bo = NULL;
- spin_unlock(&bdev->lru_lock);
- ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
- if (busy_bo)
- ttm_bo_put(busy_bo);
- return ret;
- }
+ if (lret)
+ goto out;
- if (bo->deleted) {
- ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
- ctx->no_wait_gpu, locked);
- ttm_bo_put(bo);
- return ret;
- }
+ evict_walk->evicted++;
+ if (evict_walk->res)
+ lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
+ evict_walk->res);
+ if (lret == 0)
+ return 1;
+out:
+ /* Errors that should terminate the walk. */
+ if (lret == -ENOSPC)
+ return -EBUSY;
- spin_unlock(&bdev->lru_lock);
+ return lret;
+}
- ret = ttm_bo_evict(bo, ctx);
- if (locked)
- ttm_bo_unreserve(bo);
- else
- ttm_bo_move_to_lru_tail_unlocked(bo);
+static const struct ttm_lru_walk_ops ttm_evict_walk_ops = {
+ .process_bo = ttm_bo_evict_cb,
+};
+
+static int ttm_bo_evict_alloc(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ const struct ttm_place *place,
+ struct ttm_buffer_object *evictor,
+ struct ttm_operation_ctx *ctx,
+ struct ww_acquire_ctx *ticket,
+ struct ttm_resource **res)
+{
+ struct ttm_bo_evict_walk evict_walk = {
+ .walk = {
+ .ops = &ttm_evict_walk_ops,
+ .ctx = ctx,
+ .ticket = ticket,
+ },
+ .place = place,
+ .evictor = evictor,
+ .res = res,
+ };
+ s64 lret;
+
+ evict_walk.walk.trylock_only = true;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ if (lret || !ticket)
+ goto out;
- ttm_bo_put(bo);
- return ret;
+ /* If ticket-locking, repeat while making progress. */
+ evict_walk.walk.trylock_only = false;
+ do {
+ /* The walk may clear the evict_walk.walk.ticket field */
+ evict_walk.walk.ticket = ticket;
+ evict_walk.evicted = 0;
+ lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
+ } while (!lret && evict_walk.evicted);
+out:
+ if (lret < 0)
+ return lret;
+ if (lret == 0)
+ return -EBUSY;
+ return 0;
}
/**
@@ -759,6 +688,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
+ bool may_evict;
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
@@ -768,22 +698,21 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
TTM_PL_FLAG_FALLBACK))
continue;
- do {
- ret = ttm_resource_alloc(bo, place, res);
- if (unlikely(ret && ret != -ENOSPC))
+ may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
+ ret = ttm_resource_alloc(bo, place, res);
+ if (ret) {
+ if (ret != -ENOSPC)
return ret;
- if (likely(!ret) || !force_space)
- break;
-
- ret = ttm_mem_evict_first(bdev, man, place, ctx,
- ticket);
- if (unlikely(ret == -EBUSY))
- break;
- if (unlikely(ret))
+ if (!may_evict)
+ continue;
+
+ ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
+ ticket, res);
+ if (ret == -EBUSY)
+ continue;
+ if (ret)
return ret;
- } while (1);
- if (ret)
- continue;
+ }
ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu);
if (unlikely(ret)) {
@@ -1117,12 +1046,24 @@ int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
}
EXPORT_SYMBOL(ttm_bo_wait_ctx);
-int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
- gfp_t gfp_flags)
+/**
+ * struct ttm_bo_swapout_walk - Parameters for the swapout walk
+ */
+struct ttm_bo_swapout_walk {
+ /** @walk: The walk base parameters. */
+ struct ttm_lru_walk walk;
+ /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
+ gfp_t gfp_flags;
+};
+
+static s64
+ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
{
- struct ttm_place place;
- bool locked;
- long ret;
+ struct ttm_place place = {.mem_type = bo->resource->mem_type};
+ struct ttm_bo_swapout_walk *swapout_walk =
+ container_of(walk, typeof(*swapout_walk), walk);
+ struct ttm_operation_ctx *ctx = walk->ctx;
+ s64 ret;
/*
* While the bo may already reside in SYSTEM placement, set
@@ -1130,28 +1071,29 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* The driver may use the fact that we're moving from SYSTEM
* as an indication that we're about to swap out.
*/
- memset(&place, 0, sizeof(place));
- place.mem_type = bo->resource->mem_type;
- if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
- return -EBUSY;
+ if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, &place)) {
+ ret = -EBUSY;
+ goto out;
+ }
if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
- bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
- !ttm_bo_get_unless_zero(bo)) {
- if (locked)
- dma_resv_unlock(bo->base.resv);
- return -EBUSY;
+ bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) {
+ ret = -EBUSY;
+ goto out;
}
if (bo->deleted) {
- ret = ttm_bo_cleanup_refs(bo, false, false, locked);
- ttm_bo_put(bo);
- return ret == -EBUSY ? -ENOSPC : ret;
- }
+ pgoff_t num_pages = bo->ttm->num_pages;
- /* TODO: Cleanup the locking */
- spin_unlock(&bo->bdev->lru_lock);
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (ret)
+ goto out;
+
+ ttm_bo_cleanup_memtype_use(bo);
+ ret = num_pages;
+ goto out;
+ }
/*
* Move to system cached
@@ -1163,12 +1105,13 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
memset(&hop, 0, sizeof(hop));
place.mem_type = TTM_PL_SYSTEM;
ret = ttm_resource_alloc(bo, &place, &evict_mem);
- if (unlikely(ret))
+ if (ret)
goto out;
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
- if (unlikely(ret != 0)) {
- WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
+ if (ret) {
+ WARN(ret == -EMULTIHOP,
+ "Unexpected multihop in swapout - likely driver bug.\n");
ttm_resource_free(bo, &evict_mem);
goto out;
}
@@ -1178,30 +1121,54 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* Make sure BO is idle.
*/
ret = ttm_bo_wait_ctx(bo, ctx);
- if (unlikely(ret != 0))
+ if (ret)
goto out;
ttm_bo_unmap_virtual(bo);
-
- /*
- * Swap out. Buffer will be swapped in again as soon as
- * anyone tries to access a ttm page.
- */
if (bo->bdev->funcs->swap_notify)
bo->bdev->funcs->swap_notify(bo);
if (ttm_tt_is_populated(bo->ttm))
- ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
+ ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags);
+
out:
+ /* Consider -ENOMEM and -ENOSPC non-fatal. */
+ if (ret == -ENOMEM || ret == -ENOSPC)
+ ret = -EBUSY;
- /*
- * Unreserve without putting on LRU to avoid swapping out an
- * already swapped buffer.
- */
- if (locked)
- dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
- return ret == -EBUSY ? -ENOSPC : ret;
+ return ret;
+}
+
+const struct ttm_lru_walk_ops ttm_swap_ops = {
+ .process_bo = ttm_bo_swapout_cb,
+};
+
+/**
+ * ttm_bo_swapout() - Swap out buffer objects on the LRU list to shmem.
+ * @bdev: The ttm device.
+ * @ctx: The ttm_operation_ctx governing the swapout operation.
+ * @man: The resource manager whose resources / buffer objects are
+ * goint to be swapped out.
+ * @gfp_flags: The gfp flags used for shmem page allocations.
+ * @target: The desired number of bytes to swap out.
+ *
+ * Return: The number of bytes actually swapped out, or negative error code
+ * on error.
+ */
+s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ struct ttm_resource_manager *man, gfp_t gfp_flags,
+ s64 target)
+{
+ struct ttm_bo_swapout_walk swapout_walk = {
+ .walk = {
+ .ops = &ttm_swap_ops,
+ .ctx = ctx,
+ .trylock_only = true,
+ },
+ .gfp_flags = gfp_flags,
+ };
+
+ return ttm_lru_walk_for_evict(&swapout_walk.walk, bdev, man, target);
}
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 0b3f4267130c..3c07f4712d5c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -768,3 +768,154 @@ error_destroy_tt:
ttm_tt_destroy(bo->bdev, ttm);
return ret;
}
+
+static bool ttm_lru_walk_trylock(struct ttm_lru_walk *walk,
+ struct ttm_buffer_object *bo,
+ bool *needs_unlock)
+{
+ struct ttm_operation_ctx *ctx = walk->ctx;
+
+ *needs_unlock = false;
+
+ if (dma_resv_trylock(bo->base.resv)) {
+ *needs_unlock = true;
+ return true;
+ }
+
+ if (bo->base.resv == ctx->resv && ctx->allow_res_evict) {
+ dma_resv_assert_held(bo->base.resv);
+ return true;
+ }
+
+ return false;
+}
+
+static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk,
+ struct ttm_buffer_object *bo,
+ bool *needs_unlock)
+{
+ struct dma_resv *resv = bo->base.resv;
+ int ret;
+
+ if (walk->ctx->interruptible)
+ ret = dma_resv_lock_interruptible(resv, walk->ticket);
+ else
+ ret = dma_resv_lock(resv, walk->ticket);
+
+ if (!ret) {
+ *needs_unlock = true;
+ /*
+ * Only a single ticketlock per loop. Ticketlocks are prone
+ * to return -EDEADLK causing the eviction to fail, so
+ * after waiting for the ticketlock, revert back to
+ * trylocking for this walk.
+ */
+ walk->ticket = NULL;
+ } else if (ret == -EDEADLK) {
+ /* Caller needs to exit the ww transaction. */
+ ret = -ENOSPC;
+ }
+
+ return ret;
+}
+
+static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked)
+{
+ if (locked)
+ dma_resv_unlock(bo->base.resv);
+}
+
+/**
+ * ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on
+ * valid items.
+ * @walk: describe the walks and actions taken
+ * @bdev: The TTM device.
+ * @man: The struct ttm_resource manager whose LRU lists we're walking.
+ * @target: The end condition for the walk.
+ *
+ * The LRU lists of @man are walk, and for each struct ttm_resource encountered,
+ * the corresponding ttm_buffer_object is locked and taken a reference on, and
+ * the LRU lock is dropped. the LRU lock may be dropped before locking and, in
+ * that case, it's verified that the item actually remains on the LRU list after
+ * the lock, and that the buffer object didn't switch resource in between.
+ *
+ * With a locked object, the actions indicated by @walk->process_bo are
+ * performed, and after that, the bo is unlocked, the refcount dropped and the
+ * next struct ttm_resource is processed. Here, the walker relies on
+ * TTM's restartable LRU list implementation.
+ *
+ * Typically @walk->process_bo() would return the number of pages evicted,
+ * swapped or shrunken, so that when the total exceeds @target, or when the
+ * LRU list has been walked in full, iteration is terminated. It's also terminated
+ * on error. Note that the definition of @target is done by the caller, it
+ * could have a different meaning than the number of pages.
+ *
+ * Note that the way dma_resv individualization is done, locking needs to be done
+ * either with the LRU lock held (trylocking only) or with a reference on the
+ * object.
+ *
+ * Return: The progress made towards target or negative error code on error.
+ */
+s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
+ struct ttm_resource_manager *man, s64 target)
+{
+ struct ttm_resource_cursor cursor;
+ struct ttm_resource *res;
+ s64 progress = 0;
+ s64 lret;
+
+ spin_lock(&bdev->lru_lock);
+ ttm_resource_manager_for_each_res(man, &cursor, res) {
+ struct ttm_buffer_object *bo = res->bo;
+ bool bo_needs_unlock = false;
+ bool bo_locked = false;
+ int mem_type;
+
+ /*
+ * Attempt a trylock before taking a reference on the bo,
+ * since if we do it the other way around, and the trylock fails,
+ * we need to drop the lru lock to put the bo.
+ */
+ if (ttm_lru_walk_trylock(walk, bo, &bo_needs_unlock))
+ bo_locked = true;
+ else if (!walk->ticket || walk->ctx->no_wait_gpu ||
+ walk->trylock_only)
+ continue;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ ttm_lru_walk_unlock(bo, bo_needs_unlock);
+ continue;
+ }
+
+ mem_type = res->mem_type;
+ spin_unlock(&bdev->lru_lock);
+
+ lret = 0;
+ if (!bo_locked)
+ lret = ttm_lru_walk_ticketlock(walk, bo, &bo_needs_unlock);
+
+ /*
+ * Note that in between the release of the lru lock and the
+ * ticketlock, the bo may have switched resource,
+ * and also memory type, since the resource may have been
+ * freed and allocated again with a different memory type.
+ * In that case, just skip it.
+ */
+ if (!lret && bo->resource && bo->resource->mem_type == mem_type)
+ lret = walk->ops->process_bo(walk, bo);
+
+ ttm_lru_walk_unlock(bo, bo_needs_unlock);
+ ttm_bo_put(bo);
+ if (lret == -EBUSY || lret == -EALREADY)
+ lret = 0;
+ progress = (lret < 0) ? lret : progress + lret;
+
+ spin_lock(&bdev->lru_lock);
+ if (progress < 0 || progress >= target)
+ break;
+ }
+ ttm_resource_cursor_fini(&cursor);
+ spin_unlock(&bdev->lru_lock);
+
+ return progress;
+}
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 434cf0258000..e7cc4954c1bc 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -148,35 +148,20 @@ int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
- struct ttm_resource_cursor cursor;
struct ttm_resource_manager *man;
- struct ttm_resource *res;
unsigned i;
- int ret;
+ s64 lret;
- spin_lock(&bdev->lru_lock);
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
if (!man || !man->use_tt)
continue;
- ttm_resource_manager_for_each_res(man, &cursor, res) {
- struct ttm_buffer_object *bo = res->bo;
- uint32_t num_pages;
-
- if (!bo || bo->resource != res)
- continue;
-
- num_pages = PFN_UP(bo->base.size);
- ret = ttm_bo_swapout(bo, ctx, gfp_flags);
- /* ttm_bo_swapout has dropped the lru_lock */
- if (!ret)
- return num_pages;
- if (ret != -EBUSY)
- return ret;
- }
+ lret = ttm_bo_swapout(bdev, ctx, man, gfp_flags, 1);
+ /* Can be both positive (num_pages) and negative (error) */
+ if (lret)
+ return lret;
}
- spin_unlock(&bdev->lru_lock);
return 0;
}
EXPORT_SYMBOL(ttm_device_swapout);
@@ -274,14 +259,14 @@ static void ttm_device_clear_lru_dma_mappings(struct ttm_device *bdev,
struct ttm_resource *res;
spin_lock(&bdev->lru_lock);
- while ((res = list_first_entry_or_null(list, typeof(*res), lru))) {
+ while ((res = ttm_lru_first_res_or_null(list))) {
struct ttm_buffer_object *bo = res->bo;
/* Take ref against racing releases once lru_lock is unlocked */
if (!ttm_bo_get_unless_zero(bo))
continue;
- list_del_init(&res->lru);
+ list_del_init(&bo->resource->lru.link);
spin_unlock(&bdev->lru_lock);
if (bo->ttm)
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 6e1fd6985ffc..8504dbe19c1a 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -91,7 +91,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
*/
if (order)
gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
- __GFP_KSWAPD_RECLAIM;
+ __GFP_THISNODE;
if (!pool->use_dma_alloc) {
p = alloc_pages_node(pool->nid, gfp_flags, order);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 4a66b851b67d..6d764ba88aab 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -33,6 +33,68 @@
#include <drm/drm_util.h>
+/* Detach the cursor from the bulk move list*/
+static void
+ttm_resource_cursor_clear_bulk(struct ttm_resource_cursor *cursor)
+{
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+
+ cursor->bulk = NULL;
+ list_del_init(&cursor->bulk_link);
+}
+
+/* Move the cursor to the end of the bulk move list it's in */
+static void ttm_resource_cursor_move_bulk_tail(struct ttm_lru_bulk_move *bulk,
+ struct ttm_resource_cursor *cursor)
+{
+ struct ttm_lru_bulk_move_pos *pos;
+
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+
+ if (WARN_ON_ONCE(bulk != cursor->bulk)) {
+ list_del_init(&cursor->bulk_link);
+ return;
+ }
+
+ pos = &bulk->pos[cursor->mem_type][cursor->priority];
+ if (pos->last)
+ list_move(&cursor->hitch.link, &pos->last->lru.link);
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
+/* Move all cursors attached to a bulk move to its end */
+static void ttm_bulk_move_adjust_cursors(struct ttm_lru_bulk_move *bulk)
+{
+ struct ttm_resource_cursor *cursor, *next;
+
+ list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
+ ttm_resource_cursor_move_bulk_tail(bulk, cursor);
+}
+
+/* Remove a cursor from an empty bulk move list */
+static void ttm_bulk_move_drop_cursors(struct ttm_lru_bulk_move *bulk)
+{
+ struct ttm_resource_cursor *cursor, *next;
+
+ list_for_each_entry_safe(cursor, next, &bulk->cursor_list, bulk_link)
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
+/**
+ * ttm_resource_cursor_fini() - Finalize the LRU list cursor usage
+ * @cursor: The struct ttm_resource_cursor to finalize.
+ *
+ * The function pulls the LRU list cursor off any lists it was previusly
+ * attached to. Needs to be called with the LRU lock held. The function
+ * can be called multiple times after eachother.
+ */
+void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor)
+{
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+ list_del_init(&cursor->hitch.link);
+ ttm_resource_cursor_clear_bulk(cursor);
+}
+
/**
* ttm_lru_bulk_move_init - initialize a bulk move structure
* @bulk: the structure to init
@@ -42,10 +104,28 @@
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk)
{
memset(bulk, 0, sizeof(*bulk));
+ INIT_LIST_HEAD(&bulk->cursor_list);
}
EXPORT_SYMBOL(ttm_lru_bulk_move_init);
/**
+ * ttm_lru_bulk_move_fini - finalize a bulk move structure
+ * @bdev: The struct ttm_device
+ * @bulk: the structure to finalize
+ *
+ * Sanity checks that bulk moves don't have any
+ * resources left and hence no cursors attached.
+ */
+void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
+ struct ttm_lru_bulk_move *bulk)
+{
+ spin_lock(&bdev->lru_lock);
+ ttm_bulk_move_drop_cursors(bulk);
+ spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_lru_bulk_move_fini);
+
+/**
* ttm_lru_bulk_move_tail - bulk move range of resources to the LRU tail.
*
* @bulk: bulk move structure
@@ -57,6 +137,7 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
{
unsigned i, j;
+ ttm_bulk_move_adjust_cursors(bulk);
for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) {
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j];
@@ -70,8 +151,8 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->last->bo->base.resv);
man = ttm_manager_type(pos->first->bo->bdev, i);
- list_bulk_move_tail(&man->lru[j], &pos->first->lru,
- &pos->last->lru);
+ list_bulk_move_tail(&man->lru[j], &pos->first->lru.link,
+ &pos->last->lru.link);
}
}
}
@@ -84,14 +165,38 @@ ttm_lru_bulk_move_pos(struct ttm_lru_bulk_move *bulk, struct ttm_resource *res)
return &bulk->pos[res->mem_type][res->bo->priority];
}
+/* Return the previous resource on the list (skip over non-resource list items) */
+static struct ttm_resource *ttm_lru_prev_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_prev_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
+/* Return the next resource on the list (skip over non-resource list items) */
+static struct ttm_resource *ttm_lru_next_res(struct ttm_resource *cur)
+{
+ struct ttm_lru_item *lru = &cur->lru;
+
+ do {
+ lru = list_next_entry(lru, link);
+ } while (!ttm_lru_item_is_res(lru));
+
+ return ttm_lru_item_to_res(lru);
+}
+
/* Move the resource to the tail of the bulk move range */
static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
struct ttm_resource *res)
{
if (pos->last != res) {
if (pos->first == res)
- pos->first = list_next_entry(res, lru);
- list_move(&res->lru, &pos->last->lru);
+ pos->first = ttm_lru_next_res(res);
+ list_move(&res->lru.link, &pos->last->lru.link);
pos->last = res;
}
}
@@ -122,11 +227,11 @@ static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
pos->first = NULL;
pos->last = NULL;
} else if (pos->first == res) {
- pos->first = list_next_entry(res, lru);
+ pos->first = ttm_lru_next_res(res);
} else if (pos->last == res) {
- pos->last = list_prev_entry(res, lru);
+ pos->last = ttm_lru_prev_res(res);
} else {
- list_move(&res->lru, &pos->last->lru);
+ list_move(&res->lru.link, &pos->last->lru.link);
}
}
@@ -155,7 +260,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
lockdep_assert_held(&bo->bdev->lru_lock);
if (bo->pin_count) {
- list_move_tail(&res->lru, &bdev->pinned);
+ list_move_tail(&res->lru.link, &bdev->pinned);
} else if (bo->bulk_move) {
struct ttm_lru_bulk_move_pos *pos =
@@ -166,7 +271,7 @@ void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
struct ttm_resource_manager *man;
man = ttm_manager_type(bdev, res->mem_type);
- list_move_tail(&res->lru, &man->lru[bo->priority]);
+ list_move_tail(&res->lru.link, &man->lru[bo->priority]);
}
}
@@ -197,9 +302,9 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
if (bo->pin_count)
- list_add_tail(&res->lru, &bo->bdev->pinned);
+ list_add_tail(&res->lru.link, &bo->bdev->pinned);
else
- list_add_tail(&res->lru, &man->lru[bo->priority]);
+ list_add_tail(&res->lru.link, &man->lru[bo->priority]);
man->usage += res->size;
spin_unlock(&bo->bdev->lru_lock);
}
@@ -221,7 +326,7 @@ void ttm_resource_fini(struct ttm_resource_manager *man,
struct ttm_device *bdev = man->bdev;
spin_lock(&bdev->lru_lock);
- list_del_init(&res->lru);
+ list_del_init(&res->lru.link);
man->usage -= res->size;
spin_unlock(&bdev->lru_lock);
}
@@ -390,24 +495,11 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
};
struct dma_fence *fence;
int ret;
- unsigned i;
- /*
- * Can't use standard list traversal since we're unlocking.
- */
-
- spin_lock(&bdev->lru_lock);
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
- while (!list_empty(&man->lru[i])) {
- spin_unlock(&bdev->lru_lock);
- ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
- NULL);
- if (ret)
- return ret;
- spin_lock(&bdev->lru_lock);
- }
- }
- spin_unlock(&bdev->lru_lock);
+ do {
+ ret = ttm_bo_evict_first(bdev, man, &ctx);
+ cond_resched();
+ } while (!ret);
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);
@@ -460,53 +552,106 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
}
EXPORT_SYMBOL(ttm_resource_manager_debug);
+static void
+ttm_resource_cursor_check_bulk(struct ttm_resource_cursor *cursor,
+ struct ttm_lru_item *next_lru)
+{
+ struct ttm_resource *next = ttm_lru_item_to_res(next_lru);
+ struct ttm_lru_bulk_move *bulk = NULL;
+ struct ttm_buffer_object *bo = next->bo;
+
+ lockdep_assert_held(&cursor->man->bdev->lru_lock);
+ bulk = bo->bulk_move;
+
+ if (cursor->bulk != bulk) {
+ if (bulk) {
+ list_move_tail(&cursor->bulk_link, &bulk->cursor_list);
+ cursor->mem_type = next->mem_type;
+ } else {
+ list_del_init(&cursor->bulk_link);
+ }
+ cursor->bulk = bulk;
+ }
+}
+
/**
- * ttm_resource_manager_first
- *
+ * ttm_resource_manager_first() - Start iterating over the resources
+ * of a resource manager
* @man: resource manager to iterate over
* @cursor: cursor to record the position
*
- * Returns the first resource from the resource manager.
+ * Initializes the cursor and starts iterating. When done iterating,
+ * the caller must explicitly call ttm_resource_cursor_fini().
+ *
+ * Return: The first resource from the resource manager.
*/
struct ttm_resource *
ttm_resource_manager_first(struct ttm_resource_manager *man,
struct ttm_resource_cursor *cursor)
{
- struct ttm_resource *res;
-
lockdep_assert_held(&man->bdev->lru_lock);
- for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
- ++cursor->priority)
- list_for_each_entry(res, &man->lru[cursor->priority], lru)
- return res;
+ cursor->priority = 0;
+ cursor->man = man;
+ ttm_lru_item_init(&cursor->hitch, TTM_LRU_HITCH);
+ INIT_LIST_HEAD(&cursor->bulk_link);
+ list_add(&cursor->hitch.link, &man->lru[cursor->priority]);
- return NULL;
+ return ttm_resource_manager_next(cursor);
}
/**
- * ttm_resource_manager_next
- *
- * @man: resource manager to iterate over
+ * ttm_resource_manager_next() - Continue iterating over the resource manager
+ * resources
* @cursor: cursor to record the position
- * @res: the current resource pointer
*
- * Returns the next resource from the resource manager.
+ * Return: the next resource from the resource manager.
*/
struct ttm_resource *
-ttm_resource_manager_next(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor,
- struct ttm_resource *res)
+ttm_resource_manager_next(struct ttm_resource_cursor *cursor)
{
+ struct ttm_resource_manager *man = cursor->man;
+ struct ttm_lru_item *lru;
+
lockdep_assert_held(&man->bdev->lru_lock);
- list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
- return res;
+ for (;;) {
+ lru = &cursor->hitch;
+ list_for_each_entry_continue(lru, &man->lru[cursor->priority], link) {
+ if (ttm_lru_item_is_res(lru)) {
+ ttm_resource_cursor_check_bulk(cursor, lru);
+ list_move(&cursor->hitch.link, &lru->link);
+ return ttm_lru_item_to_res(lru);
+ }
+ }
+
+ if (++cursor->priority >= TTM_MAX_BO_PRIORITY)
+ break;
+
+ list_move(&cursor->hitch.link, &man->lru[cursor->priority]);
+ ttm_resource_cursor_clear_bulk(cursor);
+ }
+
+ ttm_resource_cursor_fini(cursor);
- for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY;
- ++cursor->priority)
- list_for_each_entry(res, &man->lru[cursor->priority], lru)
- return res;
+ return NULL;
+}
+
+/**
+ * ttm_lru_first_res_or_null() - Return the first resource on an lru list
+ * @head: The list head of the lru list.
+ *
+ * Return: Pointer to the first resource on the lru list or NULL if
+ * there is none.
+ */
+struct ttm_resource *ttm_lru_first_res_or_null(struct list_head *head)
+{
+ struct ttm_lru_item *lru;
+
+ list_for_each_entry(lru, head, link) {
+ if (ttm_lru_item_is_res(lru))
+ return ttm_lru_item_to_res(lru);
+ }
return NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 474fe7aad2a0..4b51b9023126 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -93,7 +93,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
*/
if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
page_flags |= TTM_TT_FLAG_DECRYPTED;
- drm_info(ddev, "TT memory decryption enabled.");
+ drm_info_once(ddev, "TT memory decryption enabled.");
}
bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
@@ -251,6 +251,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
out_err:
return ret;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapin);
/**
* ttm_tt_swapout - swap out tt object
@@ -308,6 +309,7 @@ out_err:
return ret;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_swapout);
int ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
@@ -386,6 +388,7 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(ttm_tt_unpopulate);
#ifdef CONFIG_DEBUG_FS