From fa3dd623e559e8e7004179f9594b090318df0d05 Mon Sep 17 00:00:00 2001 From: Min He Date: Fri, 2 Mar 2018 10:00:25 +0800 Subject: drm/i915/gvt: keep oa config in shadow ctx When populating shadow ctx from guest, we should handle oa related registers in hw ctx, so that they will not be overlapped by guest oa configs. This patch made it possible to capture oa data from host for both host and guests. Signed-off-by: Min He Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/scheduler.c | 50 ++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) (limited to 'drivers/gpu/drm/i915/gvt/scheduler.c') diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index b55b3580ca1d..8caf72c1e794 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -52,6 +52,54 @@ static void set_context_pdp_root_pointer( pdp_pair[i].val = pdp[7 - i]; } +/* + * when populating shadow ctx from guest, we should not overrride oa related + * registers, so that they will not be overlapped by guest oa configs. Thus + * made it possible to capture oa data from host for both host and guests. + */ +static void sr_oa_regs(struct intel_vgpu_workload *workload, + u32 *reg_state, bool save) +{ + struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; + u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; + u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; + int i = 0; + u32 flex_mmio[] = { + i915_mmio_reg_offset(EU_PERF_CNTL0), + i915_mmio_reg_offset(EU_PERF_CNTL1), + i915_mmio_reg_offset(EU_PERF_CNTL2), + i915_mmio_reg_offset(EU_PERF_CNTL3), + i915_mmio_reg_offset(EU_PERF_CNTL4), + i915_mmio_reg_offset(EU_PERF_CNTL5), + i915_mmio_reg_offset(EU_PERF_CNTL6), + }; + + if (!workload || !reg_state || workload->ring_id != RCS) + return; + + if (save) { + workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; + + for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { + u32 state_offset = ctx_flexeu0 + i * 2; + + workload->flex_mmio[i] = reg_state[state_offset + 1]; + } + } else { + reg_state[ctx_oactxctrl] = + i915_mmio_reg_offset(GEN8_OACTXCONTROL); + reg_state[ctx_oactxctrl + 1] = workload->oactxctrl; + + for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { + u32 state_offset = ctx_flexeu0 + i * 2; + u32 mmio = flex_mmio[i]; + + reg_state[state_offset] = mmio; + reg_state[state_offset + 1] = workload->flex_mmio[i]; + } + } +} + static int populate_shadow_context(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; @@ -98,6 +146,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); shadow_ring_context = kmap(page); + sr_oa_regs(workload, (u32 *)shadow_ring_context, true); #define COPY_REG(name) \ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) @@ -122,6 +171,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) sizeof(*shadow_ring_context), I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); + sr_oa_regs(workload, (u32 *)shadow_ring_context, false); kunmap(page); return 0; } -- cgit v1.2.3 From ef75c685869ea2059f85855a7dc00148a704c36c Mon Sep 17 00:00:00 2001 From: fred gao Date: Thu, 15 Mar 2018 13:21:10 +0800 Subject: drm/i915/gvt: Correct the privilege shadow batch buffer address Once the ring buffer is copied to ring_scan_buffer and scanned, the shadow batch buffer start address is only updated into ring_scan_buffer, not the real ring address allocated through intel_ring_begin in later copy_workload_to_ring_buffer. This patch is only to set the right shadow batch buffer address from Ring buffer, not include the shadow_wa_ctx. v2: - refine some comments. (Zhenyu) v3: - fix typo in title. (Zhenyu) v4: - remove the unnecessary comments. (Zhenyu) - add comments in bb_start_cmd_va update. (Zhenyu) Fixes: 0a53bc07f044 ("drm/i915/gvt: Separate cmd scan from request allocation") Cc: stable@vger.kernel.org # v4.15 Cc: Zhenyu Wang Cc: Yulei Zhang Signed-off-by: fred gao Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/scheduler.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/gpu/drm/i915/gvt/scheduler.c') diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 8caf72c1e794..fdf1c0bf0d55 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -426,6 +426,17 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) goto err; } + /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va + * is only updated into ring_scan_buffer, not real ring address + * allocated in later copy_workload_to_ring_buffer. pls be noted + * shadow_ring_buffer_va is now pointed to real ring buffer va + * in copy_workload_to_ring_buffer. + */ + + if (bb->bb_offset) + bb->bb_start_cmd_va = workload->shadow_ring_buffer_va + + bb->bb_offset; + /* relocate shadow batch buffer */ bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); if (gmadr_bytes == 8) -- cgit v1.2.3 From 850555d1d31e45fc3e9a2982f81717387e8d5e1b Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Wed, 14 Feb 2018 11:35:01 +0800 Subject: drm/i915/gvt: fix user copy warning by whitelist workload rb_tail field This is to fix warning got as: [ 6730.476938] ------------[ cut here ]------------ [ 6730.476979] Bad or missing usercopy whitelist? Kernel memory exposure attempt detected from SLAB object 'gvt-g_vgpu_workload' (offset 120, size 4)! [ 6730.477021] WARNING: CPU: 2 PID: 441 at mm/usercopy.c:81 usercopy_warn+0x7e/0xa0 [ 6730.477042] Modules linked in: tun(E) bridge(E) stp(E) llc(E) kvmgt(E) x86_pkg_temp_thermal(E) vfio_mdev(E) intel_powerclamp(E) mdev(E) coretemp(E) vfio_iommu_type1(E) vfio(E) kvm_intel(E) kvm(E) hid_generic(E) irqbypass(E) crct10dif_pclmul(E) crc32_pclmul(E) usbhid(E) i915(E) crc32c_intel(E) hid(E) ghash_clmulni_intel(E) pcbc(E) aesni_intel(E) aes_x86_64(E) crypto_simd(E) cryptd(E) glue_helper(E) intel_cstate(E) idma64(E) evdev(E) virt_dma(E) iTCO_wdt(E) intel_uncore(E) intel_rapl_perf(E) intel_lpss_pci(E) sg(E) shpchp(E) mei_me(E) pcspkr(E) iTCO_vendor_support(E) intel_lpss(E) intel_pch_thermal(E) prime_numbers(E) mei(E) mfd_core(E) video(E) acpi_pad(E) button(E) binfmt_misc(E) ip_tables(E) x_tables(E) autofs4(E) ext4(E) crc16(E) mbcache(E) jbd2(E) fscrypto(E) sd_mod(E) e1000e(E) xhci_pci(E) sdhci_pci(E) [ 6730.477244] ptp(E) cqhci(E) xhci_hcd(E) pps_core(E) sdhci(E) mmc_core(E) i2c_i801(E) usbcore(E) thermal(E) fan(E) [ 6730.477276] CPU: 2 PID: 441 Comm: gvt workload 0 Tainted: G E 4.16.0-rc1-gvt-staging-0213+ #127 [ 6730.477303] Hardware name: /NUC6i5SYB, BIOS SYSKLi35.86A.0039.2016.0316.1747 03/16/2016 [ 6730.477326] RIP: 0010:usercopy_warn+0x7e/0xa0 [ 6730.477340] RSP: 0018:ffffba6301223d18 EFLAGS: 00010286 [ 6730.477355] RAX: 0000000000000000 RBX: ffff8f41caae9838 RCX: 0000000000000006 [ 6730.477375] RDX: 0000000000000007 RSI: 0000000000000082 RDI: ffff8f41dad166f0 [ 6730.477395] RBP: 0000000000000004 R08: 0000000000000576 R09: 0000000000000000 [ 6730.477415] R10: ffffffffb1293fb2 R11: 00000000ffffffff R12: 0000000000000001 [ 6730.477447] R13: ffff8f41caae983c R14: ffff8f41caae9838 R15: 00007f183ca2b000 [ 6730.477467] FS: 0000000000000000(0000) GS:ffff8f41dad00000(0000) knlGS:0000000000000000 [ 6730.477489] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 6730.477506] CR2: 0000559462817291 CR3: 000000028b46c006 CR4: 00000000003626e0 [ 6730.477526] Call Trace: [ 6730.477537] __check_object_size+0x9c/0x1a0 [ 6730.477562] __kvm_write_guest_page+0x45/0x90 [kvm] [ 6730.477585] kvm_write_guest+0x46/0x80 [kvm] [ 6730.477599] kvmgt_rw_gpa+0x9b/0xf0 [kvmgt] [ 6730.477642] workload_thread+0xa38/0x1040 [i915] [ 6730.477659] ? do_wait_intr_irq+0xc0/0xc0 [ 6730.477673] ? finish_wait+0x80/0x80 [ 6730.477707] ? clean_workloads+0x120/0x120 [i915] [ 6730.477722] kthread+0x111/0x130 [ 6730.477733] ? _kthread_create_worker_on_cpu+0x60/0x60 [ 6730.477750] ? exit_to_usermode_loop+0x6f/0xb0 [ 6730.477766] ret_from_fork+0x35/0x40 [ 6730.477777] Code: 48 c7 c0 20 e3 25 b1 48 0f 44 c2 41 50 51 41 51 48 89 f9 49 89 f1 4d 89 d8 4c 89 d2 48 89 c6 48 c7 c7 78 e3 25 b1 e8 b2 bc e4 ff <0f> ff 48 83 c4 18 c3 48 c7 c6 09 d0 26 b1 49 89 f1 49 89 f3 eb [ 6730.477849] ---[ end trace cae869c1c323e45a ]--- By whitelist guest page write from workload struct allocated from kmem cache. Reviewed-by: Hang Yuan Signed-off-by: Zhenyu Wang (cherry picked from commit 5627705406874df57fdfad3b4e0c9aedd3b007df) --- drivers/gpu/drm/i915/gvt/scheduler.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/i915/gvt/scheduler.c') diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index fdf1c0bf0d55..d74d6f05c62c 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1105,10 +1105,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); - s->workloads = kmem_cache_create("gvt-g_vgpu_workload", - sizeof(struct intel_vgpu_workload), 0, - SLAB_HWCACHE_ALIGN, - NULL); + s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload", + sizeof(struct intel_vgpu_workload), 0, + SLAB_HWCACHE_ALIGN, + offsetof(struct intel_vgpu_workload, rb_tail), + sizeof_field(struct intel_vgpu_workload, rb_tail), + NULL); if (!s->workloads) { ret = -ENOMEM; -- cgit v1.2.3