summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2019-09-12 20:38:54 +0200
committerChristian König <christian.koenig@amd.com>2019-10-14 12:49:24 +0200
commit941f2f72dbbe0cf8c2d6e0b180a8021a0ec477fa (patch)
tree356379d44566c0483b55c9f85e46b9530645e5d9
parent73a88e4ce31055c415f1ddb55e3f08c9393cf4e3 (diff)
drm/ttm: Restore ttm prefaulting
Commit 4daa4fba3a38 ("gpu: drm: ttm: Adding new return type vm_fault_t") broke TTM prefaulting. Since vmf_insert_mixed() typically always returns VM_FAULT_NOPAGE, prefaulting stops after the second PTE. Restore (almost) the original behaviour. Unfortunately we can no longer with the new vm_fault_t return type determine whether a prefaulting PTE insertion hit an already populated PTE, and terminate the insertion loop. Instead we continue with the pre-determined number of prefaults. Fixes: 4daa4fba3a38 ("gpu: drm: ttm: Adding new return type vm_fault_t") Cc: Souptick Joarder <jrdr.linux@gmail.com> Cc: Christian König <christian.koenig@amd.com> Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Christian König <christian.koenig@amd.com> Cc: stable@vger.kernel.org # v4.19+ Signed-off-by: Christian König <christian.koenig@amd.com> Link: https://patchwork.freedesktop.org/patch/330387/
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 76eedb963693..46dc3de7e81b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -278,15 +278,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
else
ret = vmf_insert_pfn(&cvma, address, pfn);
- /*
- * Somebody beat us to this PTE or prefaulting to
- * an already populated PTE, or prefaulting error.
- */
-
- if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
- break;
- else if (unlikely(ret & VM_FAULT_ERROR))
- goto out_io_unlock;
+ /* Never error on prefaulted PTEs */
+ if (unlikely((ret & VM_FAULT_ERROR))) {
+ if (i == 0)
+ goto out_io_unlock;
+ else
+ break;
+ }
address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last))