diff options
Diffstat (limited to 'drivers/gpu/drm/drm_gpuvm.c')
| -rw-r--r-- | drivers/gpu/drm/drm_gpuvm.c | 394 |
1 files changed, 330 insertions, 64 deletions
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index f9eb56f24bef..d6bea8a4fffd 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -27,6 +27,7 @@ #include <drm/drm_gpuvm.h> +#include <linux/export.h> #include <linux/interval_tree_generic.h> #include <linux/mm.h> @@ -420,6 +421,71 @@ */ /** + * DOC: Madvise Logic - Splitting and Traversal + * + * This logic handles GPU VA range updates by generating remap and map operations + * without performing unmaps or merging existing mappings. + * + * 1) The requested range lies entirely within a single drm_gpuva. The logic splits + * the existing mapping at the start and end boundaries and inserts a new map. + * + * :: + * a start end b + * pre: |-----------------------| + * drm_gpuva1 + * + * a start end b + * new: |-----|=========|-------| + * remap map remap + * + * one REMAP and one MAP : Same behaviour as SPLIT and MERGE + * + * 2) The requested range spans multiple drm_gpuva regions. The logic traverses + * across boundaries, remapping the start and end segments, and inserting two + * map operations to cover the full range. + * + * :: a start b c end d + * pre: |------------------|--------------|------------------| + * drm_gpuva1 drm_gpuva2 drm_gpuva3 + * + * a start b c end d + * new: |-------|==========|--------------|========|---------| + * remap1 map1 drm_gpuva2 map2 remap2 + * + * two REMAPS and two MAPS + * + * 3) Either start or end lies within a drm_gpuva. A single remap and map operation + * are generated to update the affected portion. + * + * + * :: a/start b c end d + * pre: |------------------|--------------|------------------| + * drm_gpuva1 drm_gpuva2 drm_gpuva3 + * + * a/start b c end d + * new: |------------------|--------------|========|---------| + * drm_gpuva1 drm_gpuva2 map1 remap1 + * + * :: a start b c/end d + * pre: |------------------|--------------|------------------| + * drm_gpuva1 drm_gpuva2 drm_gpuva3 + * + * a start b c/end d + * new: |-------|==========|--------------|------------------| + * remap1 map1 drm_gpuva2 drm_gpuva3 + * + * one REMAP and one MAP + * + * 4) Both start and end align with existing drm_gpuva boundaries. No operations + * are needed as the range is already covered. + * + * 5) No existing drm_gpuvas. No operations. + * + * Unlike drm_gpuvm_sm_map_ops_create, this logic avoids unmaps and merging, + * focusing solely on remap and map operations for efficient traversal and update. + */ + +/** * DOC: Locking * * In terms of managing &drm_gpuva entries DRM GPUVM does not take care of @@ -485,13 +551,18 @@ * u64 addr, u64 range, * struct drm_gem_object *obj, u64 offset) * { + * struct drm_gpuvm_map_req map_req = { + * .map.va.addr = addr, + * .map.va.range = range, + * .map.gem.obj = obj, + * .map.gem.offset = offset, + * }; * struct drm_gpuva_ops *ops; * struct drm_gpuva_op *op * struct drm_gpuvm_bo *vm_bo; * * driver_lock_va_space(); - * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range, - * obj, offset); + * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req); * if (IS_ERR(ops)) * return PTR_ERR(ops); * @@ -2053,16 +2124,18 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap); static int op_map_cb(const struct drm_gpuvm_ops *fn, void *priv, - u64 addr, u64 range, - struct drm_gem_object *obj, u64 offset) + const struct drm_gpuvm_map_req *req) { struct drm_gpuva_op op = {}; + if (!req) + return 0; + op.op = DRM_GPUVA_OP_MAP; - op.map.va.addr = addr; - op.map.va.range = range; - op.map.gem.obj = obj; - op.map.gem.offset = offset; + op.map.va.addr = req->map.va.addr; + op.map.va.range = req->map.va.range; + op.map.gem.obj = req->map.gem.obj; + op.map.gem.offset = req->map.gem.offset; return fn->sm_step_map(&op, priv); } @@ -2087,10 +2160,13 @@ op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv, static int op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv, - struct drm_gpuva *va, bool merge) + struct drm_gpuva *va, bool merge, bool madvise) { struct drm_gpuva_op op = {}; + if (madvise) + return 0; + op.op = DRM_GPUVA_OP_UNMAP; op.unmap.va = va; op.unmap.keep = merge; @@ -2101,10 +2177,15 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv, static int __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, const struct drm_gpuvm_ops *ops, void *priv, - u64 req_addr, u64 req_range, - struct drm_gem_object *req_obj, u64 req_offset) + const struct drm_gpuvm_map_req *req, + bool madvise) { + struct drm_gem_object *req_obj = req->map.gem.obj; + const struct drm_gpuvm_map_req *op_map = madvise ? NULL : req; struct drm_gpuva *va, *next; + u64 req_offset = req->map.gem.offset; + u64 req_range = req->map.va.range; + u64 req_addr = req->map.va.addr; u64 req_end = req_addr + req_range; int ret; @@ -2119,19 +2200,22 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, u64 end = addr + range; bool merge = !!va->gem.obj; + if (madvise && obj) + continue; + if (addr == req_addr) { merge &= obj == req_obj && offset == req_offset; if (end == req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; break; } if (end < req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; continue; @@ -2152,6 +2236,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, NULL, &n, &u); if (ret) return ret; + + if (madvise) + op_map = req; break; } } else if (addr < req_addr) { @@ -2172,6 +2259,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, &p, NULL, &u); if (ret) return ret; + + if (madvise) + op_map = req; break; } @@ -2179,6 +2269,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, &p, NULL, &u); if (ret) return ret; + + if (madvise) { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = req_addr, + .map.va.range = end - req_addr, + }; + + ret = op_map_cb(ops, priv, &map_req); + if (ret) + return ret; + } + continue; } @@ -2194,6 +2296,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, &p, &n, &u); if (ret) return ret; + + if (madvise) + op_map = req; break; } } else if (addr > req_addr) { @@ -2202,16 +2307,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, (addr - req_addr); if (end == req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; + break; } if (end < req_end) { - ret = op_unmap_cb(ops, priv, va, merge); + ret = op_unmap_cb(ops, priv, va, merge, madvise); if (ret) return ret; + continue; } @@ -2230,14 +2337,20 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, ret = op_remap_cb(ops, priv, NULL, &n, &u); if (ret) return ret; + + if (madvise) { + struct drm_gpuvm_map_req map_req = { + .map.va.addr = addr, + .map.va.range = req_end - addr, + }; + + return op_map_cb(ops, priv, &map_req); + } break; } } } - - return op_map_cb(ops, priv, - req_addr, req_range, - req_obj, req_offset); + return op_map_cb(ops, priv, op_map); } static int @@ -2289,7 +2402,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, if (ret) return ret; } else { - ret = op_unmap_cb(ops, priv, va, false); + ret = op_unmap_cb(ops, priv, va, false, false); if (ret) return ret; } @@ -2299,13 +2412,10 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, } /** - * drm_gpuvm_sm_map() - creates the &drm_gpuva_op split/merge steps + * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps * @gpuvm: the &drm_gpuvm representing the GPU VA space - * @req_addr: the start address of the new mapping - * @req_range: the range of the new mapping - * @req_obj: the &drm_gem_object to map - * @req_offset: the offset within the &drm_gem_object * @priv: pointer to a driver private data structure + * @req: ptr to struct drm_gpuvm_map_req * * This function iterates the given range of the GPU VA space. It utilizes the * &drm_gpuvm_ops to call back into the driver providing the split and merge @@ -2332,8 +2442,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, */ int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, - u64 req_addr, u64 req_range, - struct drm_gem_object *req_obj, u64 req_offset) + const struct drm_gpuvm_map_req *req) { const struct drm_gpuvm_ops *ops = gpuvm->ops; @@ -2342,14 +2451,12 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, ops->sm_step_unmap))) return -EINVAL; - return __drm_gpuvm_sm_map(gpuvm, ops, priv, - req_addr, req_range, - req_obj, req_offset); + return __drm_gpuvm_sm_map(gpuvm, ops, priv, req, false); } EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map); /** - * drm_gpuvm_sm_unmap() - creates the &drm_gpuva_ops to split on unmap + * drm_gpuvm_sm_unmap() - calls the &drm_gpuva_ops to split on unmap * @gpuvm: the &drm_gpuvm representing the GPU VA space * @priv: pointer to a driver private data structure * @req_addr: the start address of the range to unmap @@ -2390,6 +2497,126 @@ drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv, } EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap); +static int +drm_gpuva_sm_step_lock(struct drm_gpuva_op *op, void *priv) +{ + struct drm_exec *exec = priv; + + switch (op->op) { + case DRM_GPUVA_OP_REMAP: + if (op->remap.unmap->va->gem.obj) + return drm_exec_lock_obj(exec, op->remap.unmap->va->gem.obj); + return 0; + case DRM_GPUVA_OP_UNMAP: + if (op->unmap.va->gem.obj) + return drm_exec_lock_obj(exec, op->unmap.va->gem.obj); + return 0; + default: + return 0; + } +} + +static const struct drm_gpuvm_ops lock_ops = { + .sm_step_map = drm_gpuva_sm_step_lock, + .sm_step_remap = drm_gpuva_sm_step_lock, + .sm_step_unmap = drm_gpuva_sm_step_lock, +}; + +/** + * drm_gpuvm_sm_map_exec_lock() - locks the objects touched by a drm_gpuvm_sm_map() + * @gpuvm: the &drm_gpuvm representing the GPU VA space + * @exec: the &drm_exec locking context + * @num_fences: for newly mapped objects, the # of fences to reserve + * @req: ptr to drm_gpuvm_map_req struct + * + * This function locks (drm_exec_lock_obj()) objects that will be unmapped/ + * remapped, and locks+prepares (drm_exec_prepare_object()) objects that + * will be newly mapped. + * + * The expected usage is: + * + * vm_bind { + * struct drm_exec exec; + * + * // IGNORE_DUPLICATES is required, INTERRUPTIBLE_WAIT is recommended: + * drm_exec_init(&exec, IGNORE_DUPLICATES | INTERRUPTIBLE_WAIT, 0); + * + * drm_exec_until_all_locked (&exec) { + * for_each_vm_bind_operation { + * switch (op->op) { + * case DRIVER_OP_UNMAP: + * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range); + * break; + * case DRIVER_OP_MAP: + * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req); + * break; + * } + * + * drm_exec_retry_on_contention(&exec); + * if (ret) + * return ret; + * } + * } + * } + * + * This enables all locking to be performed before the driver begins modifying + * the VM. This is safe to do in the case of overlapping DRIVER_VM_BIND_OPs, + * where an earlier op can alter the sequence of steps generated for a later + * op, because the later altered step will involve the same GEM object(s) + * already seen in the earlier locking step. For example: + * + * 1) An earlier driver DRIVER_OP_UNMAP op removes the need for a + * DRM_GPUVA_OP_REMAP/UNMAP step. This is safe because we've already + * locked the GEM object in the earlier DRIVER_OP_UNMAP op. + * + * 2) An earlier DRIVER_OP_MAP op overlaps with a later DRIVER_OP_MAP/UNMAP + * op, introducing a DRM_GPUVA_OP_REMAP/UNMAP that wouldn't have been + * required without the earlier DRIVER_OP_MAP. This is safe because we've + * already locked the GEM object in the earlier DRIVER_OP_MAP step. + * + * Returns: 0 on success or a negative error codec + */ +int +drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, unsigned int num_fences, + struct drm_gpuvm_map_req *req) +{ + struct drm_gem_object *req_obj = req->map.gem.obj; + + if (req_obj) { + int ret = drm_exec_prepare_obj(exec, req_obj, num_fences); + if (ret) + return ret; + } + + return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req, false); + +} +EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock); + +/** + * drm_gpuvm_sm_unmap_exec_lock() - locks the objects touched by drm_gpuvm_sm_unmap() + * @gpuvm: the &drm_gpuvm representing the GPU VA space + * @exec: the &drm_exec locking context + * @req_addr: the start address of the range to unmap + * @req_range: the range of the mappings to unmap + * + * This function locks (drm_exec_lock_obj()) objects that will be unmapped/ + * remapped by drm_gpuvm_sm_unmap(). + * + * See drm_gpuvm_sm_map_exec_lock() for expected usage. + * + * Returns: 0 on success or a negative error code + */ +int +drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec, + u64 req_addr, u64 req_range) +{ + return __drm_gpuvm_sm_unmap(gpuvm, &lock_ops, exec, + req_addr, req_range); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_exec_lock); + static struct drm_gpuva_op * gpuva_op_alloc(struct drm_gpuvm *gpuvm) { @@ -2481,13 +2708,42 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = { .sm_step_unmap = drm_gpuva_sm_step, }; +static struct drm_gpuva_ops * +__drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, + const struct drm_gpuvm_map_req *req, + bool madvise) +{ + struct drm_gpuva_ops *ops; + struct { + struct drm_gpuvm *vm; + struct drm_gpuva_ops *ops; + } args; + int ret; + + ops = kzalloc(sizeof(*ops), GFP_KERNEL); + if (unlikely(!ops)) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&ops->list); + + args.vm = gpuvm; + args.ops = ops; + + ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req, madvise); + if (ret) + goto err_free_ops; + + return ops; + +err_free_ops: + drm_gpuva_ops_free(gpuvm, ops); + return ERR_PTR(ret); +} + /** * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge * @gpuvm: the &drm_gpuvm representing the GPU VA space - * @req_addr: the start address of the new mapping - * @req_range: the range of the new mapping - * @req_obj: the &drm_gem_object to map - * @req_offset: the offset within the &drm_gem_object + * @req: map request arguments * * This function creates a list of operations to perform splitting and merging * of existent mapping(s) with the newly requested one. @@ -2515,40 +2771,50 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = { */ struct drm_gpuva_ops * drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, - u64 req_addr, u64 req_range, - struct drm_gem_object *req_obj, u64 req_offset) + const struct drm_gpuvm_map_req *req) { - struct drm_gpuva_ops *ops; - struct { - struct drm_gpuvm *vm; - struct drm_gpuva_ops *ops; - } args; - int ret; - - ops = kzalloc(sizeof(*ops), GFP_KERNEL); - if (unlikely(!ops)) - return ERR_PTR(-ENOMEM); - - INIT_LIST_HEAD(&ops->list); - - args.vm = gpuvm; - args.ops = ops; - - ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, - req_addr, req_range, - req_obj, req_offset); - if (ret) - goto err_free_ops; - - return ops; - -err_free_ops: - drm_gpuva_ops_free(gpuvm, ops); - return ERR_PTR(ret); + return __drm_gpuvm_sm_map_ops_create(gpuvm, req, false); } EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create); /** + * drm_gpuvm_madvise_ops_create() - creates the &drm_gpuva_ops to split + * @gpuvm: the &drm_gpuvm representing the GPU VA space + * @req: map request arguments + * + * This function creates a list of operations to perform splitting + * of existent mapping(s) at start or end, based on the request map. + * + * The list can be iterated with &drm_gpuva_for_each_op and must be processed + * in the given order. It can contain map and remap operations, but it + * also can be empty if no operation is required, e.g. if the requested mapping + * already exists is the exact same way. + * + * There will be no unmap operations, a maximum of two remap operations and two + * map operations. The two map operations correspond to: one from start to the + * end of drm_gpuvaX, and another from the start of drm_gpuvaY to end. + * + * Note that before calling this function again with another mapping request it + * is necessary to update the &drm_gpuvm's view of the GPU VA space. The + * previously obtained operations must be either processed or abandoned. To + * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(), + * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be + * used. + * + * After the caller finished processing the returned &drm_gpuva_ops, they must + * be freed with &drm_gpuva_ops_free. + * + * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure + */ +struct drm_gpuva_ops * +drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm, + const struct drm_gpuvm_map_req *req) +{ + return __drm_gpuvm_sm_map_ops_create(gpuvm, req, true); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_madvise_ops_create); + +/** * drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on * unmap * @gpuvm: the &drm_gpuvm representing the GPU VA space |
