mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-03-06 08:20:36 -05:00
Merge tag 'gvt-next-2016-10-14' of https://github.com/01org/gvt-linux into drm-intel-next-queued
Zhenyu Wang writes: This is first pull request to merge GVT-g device model in i915 which contains core GVT-g device model work to virtualize GPU resources. This tries to add feature of Intel GVT-g technology for full GPU virtualization. This version will support KVM based virtualization solution named as KVMGT. More background is on official project home: https://01.org/igvt-g Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
This commit is contained in:
@@ -1,5 +1,7 @@
|
||||
GVT_DIR := gvt
|
||||
GVT_SOURCE := gvt.o
|
||||
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
|
||||
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
|
||||
execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
|
||||
|
||||
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
|
||||
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
|
||||
|
||||
341
drivers/gpu/drm/i915/gvt/aperture_gm.c
Normal file
341
drivers/gpu/drm/i915/gvt/aperture_gm.c
Normal file
@@ -0,0 +1,341 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Kevin Tian <kevin.tian@intel.com>
|
||||
* Dexuan Cui
|
||||
*
|
||||
* Contributors:
|
||||
* Pei Zhang <pei.zhang@intel.com>
|
||||
* Min He <min.he@intel.com>
|
||||
* Niu Bing <bing.niu@intel.com>
|
||||
* Yulei Zhang <yulei.zhang@intel.com>
|
||||
* Zhenyu Wang <zhenyuw@linux.intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define MB_TO_BYTES(mb) ((mb) << 20ULL)
|
||||
#define BYTES_TO_MB(b) ((b) >> 20ULL)
|
||||
|
||||
#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
|
||||
#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
|
||||
#define HOST_FENCE 4
|
||||
|
||||
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
u32 alloc_flag, search_flag;
|
||||
u64 start, end, size;
|
||||
struct drm_mm_node *node;
|
||||
int retried = 0;
|
||||
int ret;
|
||||
|
||||
if (high_gm) {
|
||||
search_flag = DRM_MM_SEARCH_BELOW;
|
||||
alloc_flag = DRM_MM_CREATE_TOP;
|
||||
node = &vgpu->gm.high_gm_node;
|
||||
size = vgpu_hidden_sz(vgpu);
|
||||
start = gvt_hidden_gmadr_base(gvt);
|
||||
end = gvt_hidden_gmadr_end(gvt);
|
||||
} else {
|
||||
search_flag = DRM_MM_SEARCH_DEFAULT;
|
||||
alloc_flag = DRM_MM_CREATE_DEFAULT;
|
||||
node = &vgpu->gm.low_gm_node;
|
||||
size = vgpu_aperture_sz(vgpu);
|
||||
start = gvt_aperture_gmadr_base(gvt);
|
||||
end = gvt_aperture_gmadr_end(gvt);
|
||||
}
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
search_again:
|
||||
ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
|
||||
node, size, 4096, 0,
|
||||
start, end, search_flag,
|
||||
alloc_flag);
|
||||
if (ret) {
|
||||
ret = i915_gem_evict_something(&dev_priv->ggtt.base,
|
||||
size, 4096, 0, start, end, 0);
|
||||
if (ret == 0 && ++retried < 3)
|
||||
goto search_again;
|
||||
|
||||
gvt_err("fail to alloc %s gm space from host, retried %d\n",
|
||||
high_gm ? "high" : "low", retried);
|
||||
}
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
int ret;
|
||||
|
||||
ret = alloc_gm(vgpu, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = alloc_gm(vgpu, true);
|
||||
if (ret)
|
||||
goto out_free_aperture;
|
||||
|
||||
gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id,
|
||||
vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu));
|
||||
|
||||
gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id,
|
||||
vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu));
|
||||
|
||||
return 0;
|
||||
out_free_aperture:
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
drm_mm_remove_node(&vgpu->gm.low_gm_node);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void free_vgpu_gm(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
drm_mm_remove_node(&vgpu->gm.low_gm_node);
|
||||
drm_mm_remove_node(&vgpu->gm.high_gm_node);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_write_fence - write fence registers owned by a vGPU
|
||||
* @vgpu: vGPU instance
|
||||
* @fence: vGPU fence register number
|
||||
* @value: Fence register value to be written
|
||||
*
|
||||
* This function is used to write fence registers owned by a vGPU. The vGPU
|
||||
* fence register number will be translated into HW fence register number.
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
||||
u32 fence, u64 value)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
struct drm_i915_fence_reg *reg;
|
||||
i915_reg_t fence_reg_lo, fence_reg_hi;
|
||||
|
||||
if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
|
||||
return;
|
||||
|
||||
reg = vgpu->fence.regs[fence];
|
||||
if (WARN_ON(!reg))
|
||||
return;
|
||||
|
||||
fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
|
||||
fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
|
||||
|
||||
I915_WRITE(fence_reg_lo, 0);
|
||||
POSTING_READ(fence_reg_lo);
|
||||
|
||||
I915_WRITE(fence_reg_hi, upper_32_bits(value));
|
||||
I915_WRITE(fence_reg_lo, lower_32_bits(value));
|
||||
POSTING_READ(fence_reg_lo);
|
||||
}
|
||||
|
||||
static void free_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
struct drm_i915_fence_reg *reg;
|
||||
u32 i;
|
||||
|
||||
if (WARN_ON(!vgpu_fence_sz(vgpu)))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
|
||||
reg = vgpu->fence.regs[i];
|
||||
intel_vgpu_write_fence(vgpu, i, 0);
|
||||
list_add_tail(®->link,
|
||||
&dev_priv->mm.fence_list);
|
||||
}
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
|
||||
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
struct drm_i915_fence_reg *reg;
|
||||
int i;
|
||||
struct list_head *pos, *q;
|
||||
|
||||
/* Request fences from host */
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
i = 0;
|
||||
list_for_each_safe(pos, q, &dev_priv->mm.fence_list) {
|
||||
reg = list_entry(pos, struct drm_i915_fence_reg, link);
|
||||
if (reg->pin_count || reg->vma)
|
||||
continue;
|
||||
list_del(pos);
|
||||
vgpu->fence.regs[i] = reg;
|
||||
intel_vgpu_write_fence(vgpu, i, 0);
|
||||
if (++i == vgpu_fence_sz(vgpu))
|
||||
break;
|
||||
}
|
||||
if (i != vgpu_fence_sz(vgpu))
|
||||
goto out_free_fence;
|
||||
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
return 0;
|
||||
out_free_fence:
|
||||
/* Return fences to host, if fail */
|
||||
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
|
||||
reg = vgpu->fence.regs[i];
|
||||
if (!reg)
|
||||
continue;
|
||||
list_add_tail(®->link,
|
||||
&dev_priv->mm.fence_list);
|
||||
}
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static void free_resource(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
|
||||
gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu);
|
||||
gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu);
|
||||
gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu);
|
||||
}
|
||||
|
||||
static int alloc_resource(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_creation_params *param)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
unsigned long request, avail, max, taken;
|
||||
const char *item;
|
||||
|
||||
if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
|
||||
gvt_err("Invalid vGPU creation params\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
item = "low GM space";
|
||||
max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
|
||||
taken = gvt->gm.vgpu_allocated_low_gm_size;
|
||||
avail = max - taken;
|
||||
request = MB_TO_BYTES(param->low_gm_sz);
|
||||
|
||||
if (request > avail)
|
||||
goto no_enough_resource;
|
||||
|
||||
vgpu_aperture_sz(vgpu) = request;
|
||||
|
||||
item = "high GM space";
|
||||
max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
|
||||
taken = gvt->gm.vgpu_allocated_high_gm_size;
|
||||
avail = max - taken;
|
||||
request = MB_TO_BYTES(param->high_gm_sz);
|
||||
|
||||
if (request > avail)
|
||||
goto no_enough_resource;
|
||||
|
||||
vgpu_hidden_sz(vgpu) = request;
|
||||
|
||||
item = "fence";
|
||||
max = gvt_fence_sz(gvt) - HOST_FENCE;
|
||||
taken = gvt->fence.vgpu_allocated_fence_num;
|
||||
avail = max - taken;
|
||||
request = param->fence_sz;
|
||||
|
||||
if (request > avail)
|
||||
goto no_enough_resource;
|
||||
|
||||
vgpu_fence_sz(vgpu) = request;
|
||||
|
||||
gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
|
||||
gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
|
||||
gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
|
||||
return 0;
|
||||
|
||||
no_enough_resource:
|
||||
gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item);
|
||||
gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n",
|
||||
vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail),
|
||||
BYTES_TO_MB(max), BYTES_TO_MB(taken));
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/**
|
||||
* inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* This function is used to free the HW resource owned by a vGPU.
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
|
||||
{
|
||||
free_vgpu_gm(vgpu);
|
||||
free_vgpu_fence(vgpu);
|
||||
free_resource(vgpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_alloc_vgpu_resource - allocate HW resource for a vGPU
|
||||
* @vgpu: vGPU
|
||||
* @param: vGPU creation params
|
||||
*
|
||||
* This function is used to allocate HW resource for a vGPU. User specifies
|
||||
* the resource configuration through the creation params.
|
||||
*
|
||||
* Returns:
|
||||
* zero on success, negative error code if failed.
|
||||
*
|
||||
*/
|
||||
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_creation_params *param)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = alloc_resource(vgpu, param);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = alloc_vgpu_gm(vgpu);
|
||||
if (ret)
|
||||
goto out_free_resource;
|
||||
|
||||
ret = alloc_vgpu_fence(vgpu);
|
||||
if (ret)
|
||||
goto out_free_vgpu_gm;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_vgpu_gm:
|
||||
free_vgpu_gm(vgpu);
|
||||
out_free_resource:
|
||||
free_resource(vgpu);
|
||||
return ret;
|
||||
}
|
||||
287
drivers/gpu/drm/i915/gvt/cfg_space.c
Normal file
287
drivers/gpu/drm/i915/gvt/cfg_space.c
Normal file
@@ -0,0 +1,287 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Eddie Dong <eddie.dong@intel.com>
|
||||
* Jike Song <jike.song@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
* Min He <min.he@intel.com>
|
||||
* Bing Niu <bing.niu@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
enum {
|
||||
INTEL_GVT_PCI_BAR_GTTMMIO = 0,
|
||||
INTEL_GVT_PCI_BAR_APERTURE,
|
||||
INTEL_GVT_PCI_BAR_PIO,
|
||||
INTEL_GVT_PCI_BAR_MAX,
|
||||
};
|
||||
|
||||
/**
|
||||
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct intel_vgpu *vgpu = __vgpu;
|
||||
|
||||
if (WARN_ON(bytes > 4))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int map_aperture(struct intel_vgpu *vgpu, bool map)
|
||||
{
|
||||
u64 first_gfn, first_mfn;
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
|
||||
return 0;
|
||||
|
||||
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
|
||||
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
|
||||
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
|
||||
else
|
||||
val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
|
||||
|
||||
first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
|
||||
first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
|
||||
|
||||
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
|
||||
first_mfn,
|
||||
vgpu_aperture_sz(vgpu)
|
||||
>> PAGE_SHIFT, map,
|
||||
GVT_MAP_APERTURE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
|
||||
{
|
||||
u64 start, end;
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
|
||||
return 0;
|
||||
|
||||
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
|
||||
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
|
||||
start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
|
||||
else
|
||||
start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
|
||||
|
||||
start &= ~GENMASK(3, 0);
|
||||
end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
|
||||
|
||||
ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int emulate_pci_command_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
u8 old = vgpu_cfg_space(vgpu)[offset];
|
||||
u8 new = *(u8 *)p_data;
|
||||
u8 changed = old ^ new;
|
||||
int ret;
|
||||
|
||||
if (!(changed & PCI_COMMAND_MEMORY))
|
||||
return 0;
|
||||
|
||||
if (old & PCI_COMMAND_MEMORY) {
|
||||
ret = trap_gttmmio(vgpu, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = map_aperture(vgpu, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
ret = trap_gttmmio(vgpu, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = map_aperture(vgpu, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
unsigned int bar_index =
|
||||
(rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
|
||||
u32 new = *(u32 *)(p_data);
|
||||
bool lo = IS_ALIGNED(offset, 8);
|
||||
u64 size;
|
||||
int ret = 0;
|
||||
bool mmio_enabled =
|
||||
vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
|
||||
|
||||
if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
|
||||
return -EINVAL;
|
||||
|
||||
if (new == 0xffffffff) {
|
||||
/*
|
||||
* Power-up software can determine how much address
|
||||
* space the device requires by writing a value of
|
||||
* all 1's to the register and then reading the value
|
||||
* back. The device will return 0's in all don't-care
|
||||
* address bits.
|
||||
*/
|
||||
size = vgpu->cfg_space.bar[bar_index].size;
|
||||
if (lo) {
|
||||
new = rounddown(new, size);
|
||||
} else {
|
||||
u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
|
||||
/* for 32bit mode bar it returns all-0 in upper 32
|
||||
* bit, for 64bit mode bar it will calculate the
|
||||
* size with lower 32bit and return the corresponding
|
||||
* value
|
||||
*/
|
||||
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
|
||||
new &= (~(size-1)) >> 32;
|
||||
else
|
||||
new = 0;
|
||||
}
|
||||
/*
|
||||
* Unmapp & untrap the BAR, since guest hasn't configured a
|
||||
* valid GPA
|
||||
*/
|
||||
switch (bar_index) {
|
||||
case INTEL_GVT_PCI_BAR_GTTMMIO:
|
||||
ret = trap_gttmmio(vgpu, false);
|
||||
break;
|
||||
case INTEL_GVT_PCI_BAR_APERTURE:
|
||||
ret = map_aperture(vgpu, false);
|
||||
break;
|
||||
}
|
||||
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
|
||||
} else {
|
||||
/*
|
||||
* Unmapp & untrap the old BAR first, since guest has
|
||||
* re-configured the BAR
|
||||
*/
|
||||
switch (bar_index) {
|
||||
case INTEL_GVT_PCI_BAR_GTTMMIO:
|
||||
ret = trap_gttmmio(vgpu, false);
|
||||
break;
|
||||
case INTEL_GVT_PCI_BAR_APERTURE:
|
||||
ret = map_aperture(vgpu, false);
|
||||
break;
|
||||
}
|
||||
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
|
||||
/* Track the new BAR */
|
||||
if (mmio_enabled) {
|
||||
switch (bar_index) {
|
||||
case INTEL_GVT_PCI_BAR_GTTMMIO:
|
||||
ret = trap_gttmmio(vgpu, true);
|
||||
break;
|
||||
case INTEL_GVT_PCI_BAR_APERTURE:
|
||||
ret = map_aperture(vgpu, true);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct intel_vgpu *vgpu = __vgpu;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(bytes > 4))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
|
||||
return -EINVAL;
|
||||
|
||||
/* First check if it's PCI_COMMAND */
|
||||
if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
|
||||
if (WARN_ON(bytes > 2))
|
||||
return -EINVAL;
|
||||
return emulate_pci_command_write(vgpu, offset, p_data, bytes);
|
||||
}
|
||||
|
||||
switch (rounddown(offset, 4)) {
|
||||
case PCI_BASE_ADDRESS_0:
|
||||
case PCI_BASE_ADDRESS_1:
|
||||
case PCI_BASE_ADDRESS_2:
|
||||
case PCI_BASE_ADDRESS_3:
|
||||
if (WARN_ON(!IS_ALIGNED(offset, 4)))
|
||||
return -EINVAL;
|
||||
return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
|
||||
|
||||
case INTEL_GVT_PCI_SWSCI:
|
||||
if (WARN_ON(!IS_ALIGNED(offset, 4)))
|
||||
return -EINVAL;
|
||||
ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
case INTEL_GVT_PCI_OPREGION:
|
||||
if (WARN_ON(!IS_ALIGNED(offset, 4)))
|
||||
return -EINVAL;
|
||||
ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
|
||||
break;
|
||||
default:
|
||||
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
2878
drivers/gpu/drm/i915/gvt/cmd_parser.c
Normal file
2878
drivers/gpu/drm/i915/gvt/cmd_parser.c
Normal file
File diff suppressed because it is too large
Load Diff
49
drivers/gpu/drm/i915/gvt/cmd_parser.h
Normal file
49
drivers/gpu/drm/i915/gvt/cmd_parser.h
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Ke Yu
|
||||
* Kevin Tian <kevin.tian@intel.com>
|
||||
* Zhiyuan Lv <zhiyuan.lv@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Min He <min.he@intel.com>
|
||||
* Ping Gao <ping.a.gao@intel.com>
|
||||
* Tina Zhang <tina.zhang@intel.com>
|
||||
* Yulei Zhang <yulei.zhang@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
#ifndef _GVT_CMD_PARSER_H_
|
||||
#define _GVT_CMD_PARSER_H_
|
||||
|
||||
#define GVT_CMD_HASH_BITS 7
|
||||
|
||||
void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
|
||||
|
||||
int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
|
||||
|
||||
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
|
||||
|
||||
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
|
||||
|
||||
#endif
|
||||
@@ -24,11 +24,34 @@
|
||||
#ifndef __GVT_DEBUG_H__
|
||||
#define __GVT_DEBUG_H__
|
||||
|
||||
#define gvt_err(fmt, args...) \
|
||||
DRM_ERROR("gvt: "fmt, ##args)
|
||||
|
||||
#define gvt_dbg_core(fmt, args...) \
|
||||
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
|
||||
|
||||
/*
|
||||
* Other GVT debug stuff will be introduced in the GVT device model patches.
|
||||
*/
|
||||
#define gvt_dbg_irq(fmt, args...) \
|
||||
DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args)
|
||||
|
||||
#define gvt_dbg_mm(fmt, args...) \
|
||||
DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args)
|
||||
|
||||
#define gvt_dbg_mmio(fmt, args...) \
|
||||
DRM_DEBUG_DRIVER("gvt: mmio: "fmt, ##args)
|
||||
|
||||
#define gvt_dbg_dpy(fmt, args...) \
|
||||
DRM_DEBUG_DRIVER("gvt: dpy: "fmt, ##args)
|
||||
|
||||
#define gvt_dbg_el(fmt, args...) \
|
||||
DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
|
||||
|
||||
#define gvt_dbg_sched(fmt, args...) \
|
||||
DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
|
||||
|
||||
#define gvt_dbg_render(fmt, args...) \
|
||||
DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)
|
||||
|
||||
#define gvt_dbg_cmd(fmt, args...) \
|
||||
DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)
|
||||
|
||||
#endif
|
||||
|
||||
329
drivers/gpu/drm/i915/gvt/display.c
Normal file
329
drivers/gpu/drm/i915/gvt/display.c
Normal file
@@ -0,0 +1,329 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Ke Yu
|
||||
* Zhiyuan Lv <zhiyuan.lv@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Terrence Xu <terrence.xu@intel.com>
|
||||
* Changbin Du <changbin.du@intel.com>
|
||||
* Bing Niu <bing.niu@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
static int get_edp_pipe(struct intel_vgpu *vgpu)
|
||||
{
|
||||
u32 data = vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP);
|
||||
int pipe = -1;
|
||||
|
||||
switch (data & TRANS_DDI_EDP_INPUT_MASK) {
|
||||
case TRANS_DDI_EDP_INPUT_A_ON:
|
||||
case TRANS_DDI_EDP_INPUT_A_ONOFF:
|
||||
pipe = PIPE_A;
|
||||
break;
|
||||
case TRANS_DDI_EDP_INPUT_B_ONOFF:
|
||||
pipe = PIPE_B;
|
||||
break;
|
||||
case TRANS_DDI_EDP_INPUT_C_ONOFF:
|
||||
pipe = PIPE_C;
|
||||
break;
|
||||
}
|
||||
return pipe;
|
||||
}
|
||||
|
||||
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
|
||||
return 0;
|
||||
|
||||
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
|
||||
return -EINVAL;
|
||||
|
||||
if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
|
||||
return 1;
|
||||
|
||||
if (edp_pipe_is_enabled(vgpu) &&
|
||||
get_edp_pipe(vgpu) == pipe)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* EDID with 1024x768 as its resolution */
|
||||
static unsigned char virtual_dp_monitor_edid[] = {
|
||||
/*Header*/
|
||||
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
|
||||
/* Vendor & Product Identification */
|
||||
0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
|
||||
/* Version & Revision */
|
||||
0x01, 0x04,
|
||||
/* Basic Display Parameters & Features */
|
||||
0xa5, 0x34, 0x20, 0x78, 0x23,
|
||||
/* Color Characteristics */
|
||||
0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
|
||||
/* Established Timings: maximum resolution is 1024x768 */
|
||||
0x21, 0x08, 0x00,
|
||||
/* Standard Timings. All invalid */
|
||||
0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
|
||||
0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
|
||||
/* 18 Byte Data Blocks 1: invalid */
|
||||
0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
|
||||
0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
|
||||
/* 18 Byte Data Blocks 2: invalid */
|
||||
0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
|
||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||
/* 18 Byte Data Blocks 3: invalid */
|
||||
0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
|
||||
0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
|
||||
/* 18 Byte Data Blocks 4: invalid */
|
||||
0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
|
||||
0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
|
||||
/* Extension Block Count */
|
||||
0x00,
|
||||
/* Checksum */
|
||||
0xef,
|
||||
};
|
||||
|
||||
#define DPCD_HEADER_SIZE 0xb
|
||||
|
||||
u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
|
||||
0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
|
||||
};
|
||||
|
||||
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
|
||||
SDE_PORTC_HOTPLUG_CPT |
|
||||
SDE_PORTD_HOTPLUG_CPT);
|
||||
|
||||
if (IS_SKYLAKE(dev_priv))
|
||||
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
|
||||
SDE_PORTE_HOTPLUG_SPT);
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) &&
|
||||
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
|
||||
}
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
GEN8_PORT_DP_A_HOTPLUG;
|
||||
else
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
|
||||
}
|
||||
}
|
||||
|
||||
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
|
||||
{
|
||||
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
|
||||
|
||||
kfree(port->edid);
|
||||
port->edid = NULL;
|
||||
|
||||
kfree(port->dpcd);
|
||||
port->dpcd = NULL;
|
||||
}
|
||||
|
||||
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
|
||||
int type)
|
||||
{
|
||||
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
|
||||
|
||||
port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
|
||||
if (!port->edid)
|
||||
return -ENOMEM;
|
||||
|
||||
port->dpcd = kzalloc(sizeof(*(port->dpcd)), GFP_KERNEL);
|
||||
if (!port->dpcd) {
|
||||
kfree(port->edid);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memcpy(port->edid->edid_block, virtual_dp_monitor_edid,
|
||||
EDID_SIZE);
|
||||
port->edid->data_valid = true;
|
||||
|
||||
memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE);
|
||||
port->dpcd->data_valid = true;
|
||||
port->dpcd->data[DPCD_SINK_COUNT] = 0x1;
|
||||
port->type = type;
|
||||
|
||||
emulate_monitor_status_change(vgpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_check_vblank_emulation - check if vblank emulation timer should
|
||||
* be turned on/off when a virtual pipe is enabled/disabled.
|
||||
* @gvt: a GVT device
|
||||
*
|
||||
* This function is used to turn on/off vblank timer according to currently
|
||||
* enabled/disabled virtual pipes.
|
||||
*
|
||||
*/
|
||||
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_irq *irq = &gvt->irq;
|
||||
struct intel_vgpu *vgpu;
|
||||
bool have_enabled_pipe = false;
|
||||
int pipe, id;
|
||||
|
||||
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
|
||||
return;
|
||||
|
||||
hrtimer_cancel(&irq->vblank_timer.timer);
|
||||
|
||||
for_each_active_vgpu(gvt, vgpu, id) {
|
||||
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
|
||||
have_enabled_pipe =
|
||||
pipe_is_enabled(vgpu, pipe);
|
||||
if (have_enabled_pipe)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (have_enabled_pipe)
|
||||
hrtimer_start(&irq->vblank_timer.timer,
|
||||
ktime_add_ns(ktime_get(), irq->vblank_timer.period),
|
||||
HRTIMER_MODE_ABS);
|
||||
}
|
||||
|
||||
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct intel_vgpu_irq *irq = &vgpu->irq;
|
||||
int vblank_event[] = {
|
||||
[PIPE_A] = PIPE_A_VBLANK,
|
||||
[PIPE_B] = PIPE_B_VBLANK,
|
||||
[PIPE_C] = PIPE_C_VBLANK,
|
||||
};
|
||||
int event;
|
||||
|
||||
if (pipe < PIPE_A || pipe > PIPE_C)
|
||||
return;
|
||||
|
||||
for_each_set_bit(event, irq->flip_done_event[pipe],
|
||||
INTEL_GVT_EVENT_MAX) {
|
||||
clear_bit(event, irq->flip_done_event[pipe]);
|
||||
if (!pipe_is_enabled(vgpu, pipe))
|
||||
continue;
|
||||
|
||||
vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
|
||||
intel_vgpu_trigger_virtual_event(vgpu, event);
|
||||
}
|
||||
|
||||
if (pipe_is_enabled(vgpu, pipe)) {
|
||||
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
|
||||
intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
|
||||
}
|
||||
}
|
||||
|
||||
static void emulate_vblank(struct intel_vgpu *vgpu)
|
||||
{
|
||||
int pipe;
|
||||
|
||||
for_each_pipe(vgpu->gvt->dev_priv, pipe)
|
||||
emulate_vblank_on_pipe(vgpu, pipe);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_emulate_vblank - trigger vblank events for vGPUs on GVT device
|
||||
* @gvt: a GVT device
|
||||
*
|
||||
* This function is used to trigger vblank interrupts for vGPUs on GVT device
|
||||
*
|
||||
*/
|
||||
void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_vgpu *vgpu;
|
||||
int id;
|
||||
|
||||
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
|
||||
return;
|
||||
|
||||
for_each_active_vgpu(gvt, vgpu, id)
|
||||
emulate_vblank(vgpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_clean_display - clean vGPU virtual display emulation
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* This function is used to clean vGPU virtual display emulation stuffs
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
if (IS_SKYLAKE(dev_priv))
|
||||
clean_virtual_dp_monitor(vgpu, PORT_D);
|
||||
else
|
||||
clean_virtual_dp_monitor(vgpu, PORT_B);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_init_display- initialize vGPU virtual display emulation
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* This function is used to initialize vGPU virtual display emulation stuffs
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*
|
||||
*/
|
||||
int intel_vgpu_init_display(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
intel_vgpu_init_i2c_edid(vgpu);
|
||||
|
||||
if (IS_SKYLAKE(dev_priv))
|
||||
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D);
|
||||
else
|
||||
return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
|
||||
}
|
||||
163
drivers/gpu/drm/i915/gvt/display.h
Normal file
163
drivers/gpu/drm/i915/gvt/display.h
Normal file
@@ -0,0 +1,163 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Ke Yu
|
||||
* Zhiyuan Lv <zhiyuan.lv@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Terrence Xu <terrence.xu@intel.com>
|
||||
* Changbin Du <changbin.du@intel.com>
|
||||
* Bing Niu <bing.niu@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _GVT_DISPLAY_H_
|
||||
#define _GVT_DISPLAY_H_
|
||||
|
||||
#define SBI_REG_MAX 20
|
||||
#define DPCD_SIZE 0x700
|
||||
|
||||
#define intel_vgpu_port(vgpu, port) \
|
||||
(&(vgpu->display.ports[port]))
|
||||
|
||||
#define intel_vgpu_has_monitor_on_port(vgpu, port) \
|
||||
(intel_vgpu_port(vgpu, port)->edid && \
|
||||
intel_vgpu_port(vgpu, port)->edid->data_valid)
|
||||
|
||||
#define intel_vgpu_port_is_dp(vgpu, port) \
|
||||
((intel_vgpu_port(vgpu, port)->type == GVT_DP_A) || \
|
||||
(intel_vgpu_port(vgpu, port)->type == GVT_DP_B) || \
|
||||
(intel_vgpu_port(vgpu, port)->type == GVT_DP_C) || \
|
||||
(intel_vgpu_port(vgpu, port)->type == GVT_DP_D))
|
||||
|
||||
#define INTEL_GVT_MAX_UEVENT_VARS 3
|
||||
|
||||
/* DPCD start */
|
||||
#define DPCD_SIZE 0x700
|
||||
|
||||
/* DPCD */
|
||||
#define DP_SET_POWER 0x600
|
||||
#define DP_SET_POWER_D0 0x1
|
||||
#define AUX_NATIVE_WRITE 0x8
|
||||
#define AUX_NATIVE_READ 0x9
|
||||
|
||||
#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
|
||||
#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
|
||||
#define AUX_NATIVE_REPLY_NAK (0x1 << 4)
|
||||
#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
|
||||
|
||||
#define AUX_BURST_SIZE 16
|
||||
|
||||
/* DPCD addresses */
|
||||
#define DPCD_REV 0x000
|
||||
#define DPCD_MAX_LINK_RATE 0x001
|
||||
#define DPCD_MAX_LANE_COUNT 0x002
|
||||
|
||||
#define DPCD_TRAINING_PATTERN_SET 0x102
|
||||
#define DPCD_SINK_COUNT 0x200
|
||||
#define DPCD_LANE0_1_STATUS 0x202
|
||||
#define DPCD_LANE2_3_STATUS 0x203
|
||||
#define DPCD_LANE_ALIGN_STATUS_UPDATED 0x204
|
||||
#define DPCD_SINK_STATUS 0x205
|
||||
|
||||
/* link training */
|
||||
#define DPCD_TRAINING_PATTERN_SET_MASK 0x03
|
||||
#define DPCD_LINK_TRAINING_DISABLED 0x00
|
||||
#define DPCD_TRAINING_PATTERN_1 0x01
|
||||
#define DPCD_TRAINING_PATTERN_2 0x02
|
||||
|
||||
#define DPCD_CP_READY_MASK (1 << 6)
|
||||
|
||||
/* lane status */
|
||||
#define DPCD_LANES_CR_DONE 0x11
|
||||
#define DPCD_LANES_EQ_DONE 0x22
|
||||
#define DPCD_SYMBOL_LOCKED 0x44
|
||||
|
||||
#define DPCD_INTERLANE_ALIGN_DONE 0x01
|
||||
|
||||
#define DPCD_SINK_IN_SYNC 0x03
|
||||
/* DPCD end */
|
||||
|
||||
#define SBI_RESPONSE_MASK 0x3
|
||||
#define SBI_RESPONSE_SHIFT 0x1
|
||||
#define SBI_STAT_MASK 0x1
|
||||
#define SBI_STAT_SHIFT 0x0
|
||||
#define SBI_OPCODE_SHIFT 8
|
||||
#define SBI_OPCODE_MASK (0xff << SBI_OPCODE_SHIFT)
|
||||
#define SBI_CMD_IORD 2
|
||||
#define SBI_CMD_IOWR 3
|
||||
#define SBI_CMD_CRRD 6
|
||||
#define SBI_CMD_CRWR 7
|
||||
#define SBI_ADDR_OFFSET_SHIFT 16
|
||||
#define SBI_ADDR_OFFSET_MASK (0xffff << SBI_ADDR_OFFSET_SHIFT)
|
||||
|
||||
struct intel_vgpu_sbi_register {
|
||||
unsigned int offset;
|
||||
u32 value;
|
||||
};
|
||||
|
||||
struct intel_vgpu_sbi {
|
||||
int number;
|
||||
struct intel_vgpu_sbi_register registers[SBI_REG_MAX];
|
||||
};
|
||||
|
||||
enum intel_gvt_plane_type {
|
||||
PRIMARY_PLANE = 0,
|
||||
CURSOR_PLANE,
|
||||
SPRITE_PLANE,
|
||||
MAX_PLANE
|
||||
};
|
||||
|
||||
struct intel_vgpu_dpcd_data {
|
||||
bool data_valid;
|
||||
u8 data[DPCD_SIZE];
|
||||
};
|
||||
|
||||
enum intel_vgpu_port_type {
|
||||
GVT_CRT = 0,
|
||||
GVT_DP_A,
|
||||
GVT_DP_B,
|
||||
GVT_DP_C,
|
||||
GVT_DP_D,
|
||||
GVT_HDMI_B,
|
||||
GVT_HDMI_C,
|
||||
GVT_HDMI_D,
|
||||
GVT_PORT_MAX
|
||||
};
|
||||
|
||||
struct intel_vgpu_port {
|
||||
/* per display EDID information */
|
||||
struct intel_vgpu_edid_data *edid;
|
||||
/* per display DPCD information */
|
||||
struct intel_vgpu_dpcd_data *dpcd;
|
||||
int type;
|
||||
};
|
||||
|
||||
void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
|
||||
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
|
||||
|
||||
int intel_vgpu_init_display(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
|
||||
|
||||
#endif
|
||||
531
drivers/gpu/drm/i915/gvt/edid.c
Normal file
531
drivers/gpu/drm/i915/gvt/edid.c
Normal file
@@ -0,0 +1,531 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Ke Yu
|
||||
* Zhiyuan Lv <zhiyuan.lv@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Terrence Xu <terrence.xu@intel.com>
|
||||
* Changbin Du <changbin.du@intel.com>
|
||||
* Bing Niu <bing.niu@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define GMBUS1_TOTAL_BYTES_SHIFT 16
|
||||
#define GMBUS1_TOTAL_BYTES_MASK 0x1ff
|
||||
#define gmbus1_total_byte_count(v) (((v) >> \
|
||||
GMBUS1_TOTAL_BYTES_SHIFT) & GMBUS1_TOTAL_BYTES_MASK)
|
||||
#define gmbus1_slave_addr(v) (((v) & 0xff) >> 1)
|
||||
#define gmbus1_slave_index(v) (((v) >> 8) & 0xff)
|
||||
#define gmbus1_bus_cycle(v) (((v) >> 25) & 0x7)
|
||||
|
||||
/* GMBUS0 bits definitions */
|
||||
#define _GMBUS_PIN_SEL_MASK (0x7)
|
||||
|
||||
static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
|
||||
unsigned char chr = 0;
|
||||
|
||||
if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) {
|
||||
gvt_err("Driver tries to read EDID without proper sequence!\n");
|
||||
return 0;
|
||||
}
|
||||
if (edid->current_edid_read >= EDID_SIZE) {
|
||||
gvt_err("edid_get_byte() exceeds the size of EDID!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!edid->edid_available) {
|
||||
gvt_err("Reading EDID but EDID is not available!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, edid->port)) {
|
||||
struct intel_vgpu_edid_data *edid_data =
|
||||
intel_vgpu_port(vgpu, edid->port)->edid;
|
||||
|
||||
chr = edid_data->edid_block[edid->current_edid_read];
|
||||
edid->current_edid_read++;
|
||||
} else {
|
||||
gvt_err("No EDID available during the reading?\n");
|
||||
}
|
||||
return chr;
|
||||
}
|
||||
|
||||
static inline int get_port_from_gmbus0(u32 gmbus0)
|
||||
{
|
||||
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
|
||||
int port = -EINVAL;
|
||||
|
||||
if (port_select == 2)
|
||||
port = PORT_E;
|
||||
else if (port_select == 4)
|
||||
port = PORT_C;
|
||||
else if (port_select == 5)
|
||||
port = PORT_B;
|
||||
else if (port_select == 6)
|
||||
port = PORT_D;
|
||||
return port;
|
||||
}
|
||||
|
||||
static void reset_gmbus_controller(struct intel_vgpu *vgpu)
|
||||
{
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
|
||||
if (!vgpu->display.i2c_edid.edid_available)
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
|
||||
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
|
||||
}
|
||||
|
||||
/* GMBUS0 */
|
||||
static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
int port, pin_select;
|
||||
|
||||
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
|
||||
|
||||
pin_select = vgpu_vreg(vgpu, offset) & _GMBUS_PIN_SEL_MASK;
|
||||
|
||||
intel_vgpu_init_i2c_edid(vgpu);
|
||||
|
||||
if (pin_select == 0)
|
||||
return 0;
|
||||
|
||||
port = get_port_from_gmbus0(pin_select);
|
||||
if (WARN_ON(port < 0))
|
||||
return 0;
|
||||
|
||||
vgpu->display.i2c_edid.state = I2C_GMBUS;
|
||||
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
|
||||
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
|
||||
!intel_vgpu_port_is_dp(vgpu, port)) {
|
||||
vgpu->display.i2c_edid.port = port;
|
||||
vgpu->display.i2c_edid.edid_available = true;
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
|
||||
} else
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
|
||||
u32 slave_addr;
|
||||
u32 wvalue = *(u32 *)p_data;
|
||||
|
||||
if (vgpu_vreg(vgpu, offset) & GMBUS_SW_CLR_INT) {
|
||||
if (!(wvalue & GMBUS_SW_CLR_INT)) {
|
||||
vgpu_vreg(vgpu, offset) &= ~GMBUS_SW_CLR_INT;
|
||||
reset_gmbus_controller(vgpu);
|
||||
}
|
||||
/*
|
||||
* TODO: "This bit is cleared to zero when an event
|
||||
* causes the HW_RDY bit transition to occur "
|
||||
*/
|
||||
} else {
|
||||
/*
|
||||
* per bspec setting this bit can cause:
|
||||
* 1) INT status bit cleared
|
||||
* 2) HW_RDY bit asserted
|
||||
*/
|
||||
if (wvalue & GMBUS_SW_CLR_INT) {
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
|
||||
}
|
||||
|
||||
/* For virtualization, we suppose that HW is always ready,
|
||||
* so GMBUS_SW_RDY should always be cleared
|
||||
*/
|
||||
if (wvalue & GMBUS_SW_RDY)
|
||||
wvalue &= ~GMBUS_SW_RDY;
|
||||
|
||||
i2c_edid->gmbus.total_byte_count =
|
||||
gmbus1_total_byte_count(wvalue);
|
||||
slave_addr = gmbus1_slave_addr(wvalue);
|
||||
|
||||
/* vgpu gmbus only support EDID */
|
||||
if (slave_addr == EDID_ADDR) {
|
||||
i2c_edid->slave_selected = true;
|
||||
} else if (slave_addr != 0) {
|
||||
gvt_dbg_dpy(
|
||||
"vgpu%d: unsupported gmbus slave addr(0x%x)\n"
|
||||
" gmbus operations will be ignored.\n",
|
||||
vgpu->id, slave_addr);
|
||||
}
|
||||
|
||||
if (wvalue & GMBUS_CYCLE_INDEX)
|
||||
i2c_edid->current_edid_read =
|
||||
gmbus1_slave_index(wvalue);
|
||||
|
||||
i2c_edid->gmbus.cycle_type = gmbus1_bus_cycle(wvalue);
|
||||
switch (gmbus1_bus_cycle(wvalue)) {
|
||||
case GMBUS_NOCYCLE:
|
||||
break;
|
||||
case GMBUS_STOP:
|
||||
/* From spec:
|
||||
* This can only cause a STOP to be generated
|
||||
* if a GMBUS cycle is generated, the GMBUS is
|
||||
* currently in a data/wait/idle phase, or it is in a
|
||||
* WAIT phase
|
||||
*/
|
||||
if (gmbus1_bus_cycle(vgpu_vreg(vgpu, offset))
|
||||
!= GMBUS_NOCYCLE) {
|
||||
intel_vgpu_init_i2c_edid(vgpu);
|
||||
/* After the 'stop' cycle, hw state would become
|
||||
* 'stop phase' and then 'idle phase' after a
|
||||
* few milliseconds. In emulation, we just set
|
||||
* it as 'idle phase' ('stop phase' is not
|
||||
* visible in gmbus interface)
|
||||
*/
|
||||
i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
|
||||
}
|
||||
break;
|
||||
case NIDX_NS_W:
|
||||
case IDX_NS_W:
|
||||
case NIDX_STOP:
|
||||
case IDX_STOP:
|
||||
/* From hw spec the GMBUS phase
|
||||
* transition like this:
|
||||
* START (-->INDEX) -->DATA
|
||||
*/
|
||||
i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
|
||||
break;
|
||||
default:
|
||||
gvt_err("Unknown/reserved GMBUS cycle detected!\n");
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* From hw spec the WAIT state will be
|
||||
* cleared:
|
||||
* (1) in a new GMBUS cycle
|
||||
* (2) by generating a stop
|
||||
*/
|
||||
vgpu_vreg(vgpu, offset) = wvalue;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
int i;
|
||||
unsigned char byte_data;
|
||||
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
|
||||
int byte_left = i2c_edid->gmbus.total_byte_count -
|
||||
i2c_edid->current_edid_read;
|
||||
int byte_count = byte_left;
|
||||
u32 reg_data = 0;
|
||||
|
||||
/* Data can only be recevied if previous settings correct */
|
||||
if (vgpu_vreg(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
|
||||
if (byte_left <= 0) {
|
||||
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (byte_count > 4)
|
||||
byte_count = 4;
|
||||
for (i = 0; i < byte_count; i++) {
|
||||
byte_data = edid_get_byte(vgpu);
|
||||
reg_data |= (byte_data << (i << 3));
|
||||
}
|
||||
|
||||
memcpy(&vgpu_vreg(vgpu, offset), ®_data, byte_count);
|
||||
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
|
||||
|
||||
if (byte_left <= 4) {
|
||||
switch (i2c_edid->gmbus.cycle_type) {
|
||||
case NIDX_STOP:
|
||||
case IDX_STOP:
|
||||
i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
|
||||
break;
|
||||
case NIDX_NS_W:
|
||||
case IDX_NS_W:
|
||||
default:
|
||||
i2c_edid->gmbus.phase = GMBUS_WAIT_PHASE;
|
||||
break;
|
||||
}
|
||||
intel_vgpu_init_i2c_edid(vgpu);
|
||||
}
|
||||
/*
|
||||
* Read GMBUS3 during send operation,
|
||||
* return the latest written value
|
||||
*/
|
||||
} else {
|
||||
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
|
||||
gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n",
|
||||
vgpu->id);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmbus2_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 value = vgpu_vreg(vgpu, offset);
|
||||
|
||||
if (!(vgpu_vreg(vgpu, offset) & GMBUS_INUSE))
|
||||
vgpu_vreg(vgpu, offset) |= GMBUS_INUSE;
|
||||
memcpy(p_data, (void *)&value, bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 wvalue = *(u32 *)p_data;
|
||||
|
||||
if (wvalue & GMBUS_INUSE)
|
||||
vgpu_vreg(vgpu, offset) &= ~GMBUS_INUSE;
|
||||
/* All other bits are read-only */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_i2c_handle_gmbus_read - emulate gmbus register mmio read
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* This function is used to emulate gmbus register mmio read
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*
|
||||
*/
|
||||
int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
|
||||
return -EINVAL;
|
||||
|
||||
if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
|
||||
return gmbus2_mmio_read(vgpu, offset, p_data, bytes);
|
||||
else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
|
||||
return gmbus3_mmio_read(vgpu, offset, p_data, bytes);
|
||||
|
||||
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_i2c_handle_gmbus_write - emulate gmbus register mmio write
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* This function is used to emulate gmbus register mmio write
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*
|
||||
*/
|
||||
int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
|
||||
return -EINVAL;
|
||||
|
||||
if (offset == i915_mmio_reg_offset(PCH_GMBUS0))
|
||||
return gmbus0_mmio_write(vgpu, offset, p_data, bytes);
|
||||
else if (offset == i915_mmio_reg_offset(PCH_GMBUS1))
|
||||
return gmbus1_mmio_write(vgpu, offset, p_data, bytes);
|
||||
else if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
|
||||
return gmbus2_mmio_write(vgpu, offset, p_data, bytes);
|
||||
else if (offset == i915_mmio_reg_offset(PCH_GMBUS3))
|
||||
return gmbus3_mmio_write(vgpu, offset, p_data, bytes);
|
||||
|
||||
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum {
|
||||
AUX_CH_CTL = 0,
|
||||
AUX_CH_DATA1,
|
||||
AUX_CH_DATA2,
|
||||
AUX_CH_DATA3,
|
||||
AUX_CH_DATA4,
|
||||
AUX_CH_DATA5
|
||||
};
|
||||
|
||||
static inline int get_aux_ch_reg(unsigned int offset)
|
||||
{
|
||||
int reg;
|
||||
|
||||
switch (offset & 0xff) {
|
||||
case 0x10:
|
||||
reg = AUX_CH_CTL;
|
||||
break;
|
||||
case 0x14:
|
||||
reg = AUX_CH_DATA1;
|
||||
break;
|
||||
case 0x18:
|
||||
reg = AUX_CH_DATA2;
|
||||
break;
|
||||
case 0x1c:
|
||||
reg = AUX_CH_DATA3;
|
||||
break;
|
||||
case 0x20:
|
||||
reg = AUX_CH_DATA4;
|
||||
break;
|
||||
case 0x24:
|
||||
reg = AUX_CH_DATA5;
|
||||
break;
|
||||
default:
|
||||
reg = -1;
|
||||
break;
|
||||
}
|
||||
return reg;
|
||||
}
|
||||
|
||||
#define AUX_CTL_MSG_LENGTH(reg) \
|
||||
((reg & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> \
|
||||
DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT)
|
||||
|
||||
/**
|
||||
* intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* This function is used to emulate AUX channel register write
|
||||
*
|
||||
*/
|
||||
void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
|
||||
int port_idx,
|
||||
unsigned int offset,
|
||||
void *p_data)
|
||||
{
|
||||
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
|
||||
int msg_length, ret_msg_size;
|
||||
int msg, addr, ctrl, op;
|
||||
u32 value = *(u32 *)p_data;
|
||||
int aux_data_for_write = 0;
|
||||
int reg = get_aux_ch_reg(offset);
|
||||
|
||||
if (reg != AUX_CH_CTL) {
|
||||
vgpu_vreg(vgpu, offset) = value;
|
||||
return;
|
||||
}
|
||||
|
||||
msg_length = AUX_CTL_MSG_LENGTH(value);
|
||||
// check the msg in DATA register.
|
||||
msg = vgpu_vreg(vgpu, offset + 4);
|
||||
addr = (msg >> 8) & 0xffff;
|
||||
ctrl = (msg >> 24) & 0xff;
|
||||
op = ctrl >> 4;
|
||||
if (!(value & DP_AUX_CH_CTL_SEND_BUSY)) {
|
||||
/* The ctl write to clear some states */
|
||||
return;
|
||||
}
|
||||
|
||||
/* Always set the wanted value for vms. */
|
||||
ret_msg_size = (((op & 0x1) == GVT_AUX_I2C_READ) ? 2 : 1);
|
||||
vgpu_vreg(vgpu, offset) =
|
||||
DP_AUX_CH_CTL_DONE |
|
||||
((ret_msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) &
|
||||
DP_AUX_CH_CTL_MESSAGE_SIZE_MASK);
|
||||
|
||||
if (msg_length == 3) {
|
||||
if (!(op & GVT_AUX_I2C_MOT)) {
|
||||
/* stop */
|
||||
intel_vgpu_init_i2c_edid(vgpu);
|
||||
} else {
|
||||
/* start or restart */
|
||||
i2c_edid->aux_ch.i2c_over_aux_ch = true;
|
||||
i2c_edid->aux_ch.aux_ch_mot = true;
|
||||
if (addr == 0) {
|
||||
/* reset the address */
|
||||
intel_vgpu_init_i2c_edid(vgpu);
|
||||
} else if (addr == EDID_ADDR) {
|
||||
i2c_edid->state = I2C_AUX_CH;
|
||||
i2c_edid->port = port_idx;
|
||||
i2c_edid->slave_selected = true;
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu,
|
||||
port_idx) &&
|
||||
intel_vgpu_port_is_dp(vgpu, port_idx))
|
||||
i2c_edid->edid_available = true;
|
||||
}
|
||||
}
|
||||
} else if ((op & 0x1) == GVT_AUX_I2C_WRITE) {
|
||||
/* TODO
|
||||
* We only support EDID reading from I2C_over_AUX. And
|
||||
* we do not expect the index mode to be used. Right now
|
||||
* the WRITE operation is ignored. It is good enough to
|
||||
* support the gfx driver to do EDID access.
|
||||
*/
|
||||
} else {
|
||||
if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ))
|
||||
return;
|
||||
if (WARN_ON(msg_length != 4))
|
||||
return;
|
||||
if (i2c_edid->edid_available && i2c_edid->slave_selected) {
|
||||
unsigned char val = edid_get_byte(vgpu);
|
||||
|
||||
aux_data_for_write = (val << 16);
|
||||
}
|
||||
}
|
||||
/* write the return value in AUX_CH_DATA reg which includes:
|
||||
* ACK of I2C_WRITE
|
||||
* returned byte if it is READ
|
||||
*/
|
||||
|
||||
aux_data_for_write |= (GVT_AUX_I2C_REPLY_ACK & 0xff) << 24;
|
||||
vgpu_vreg(vgpu, offset + 4) = aux_data_for_write;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_init_i2c_edid - initialize vGPU i2c edid emulation
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* This function is used to initialize vGPU i2c edid emulation stuffs
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_vgpu_i2c_edid *edid = &vgpu->display.i2c_edid;
|
||||
|
||||
edid->state = I2C_NOT_SPECIFIED;
|
||||
|
||||
edid->port = -1;
|
||||
edid->slave_selected = false;
|
||||
edid->edid_available = false;
|
||||
edid->current_edid_read = 0;
|
||||
|
||||
memset(&edid->gmbus, 0, sizeof(struct intel_vgpu_i2c_gmbus));
|
||||
|
||||
edid->aux_ch.i2c_over_aux_ch = false;
|
||||
edid->aux_ch.aux_ch_mot = false;
|
||||
}
|
||||
150
drivers/gpu/drm/i915/gvt/edid.h
Normal file
150
drivers/gpu/drm/i915/gvt/edid.h
Normal file
@@ -0,0 +1,150 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Ke Yu
|
||||
* Zhiyuan Lv <zhiyuan.lv@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Terrence Xu <terrence.xu@intel.com>
|
||||
* Changbin Du <changbin.du@intel.com>
|
||||
* Bing Niu <bing.niu@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _GVT_EDID_H_
|
||||
#define _GVT_EDID_H_
|
||||
|
||||
#define EDID_SIZE 128
|
||||
#define EDID_ADDR 0x50 /* Linux hvm EDID addr */
|
||||
|
||||
#define GVT_AUX_NATIVE_WRITE 0x8
|
||||
#define GVT_AUX_NATIVE_READ 0x9
|
||||
#define GVT_AUX_I2C_WRITE 0x0
|
||||
#define GVT_AUX_I2C_READ 0x1
|
||||
#define GVT_AUX_I2C_STATUS 0x2
|
||||
#define GVT_AUX_I2C_MOT 0x4
|
||||
#define GVT_AUX_I2C_REPLY_ACK (0x0 << 6)
|
||||
|
||||
struct intel_vgpu_edid_data {
|
||||
bool data_valid;
|
||||
unsigned char edid_block[EDID_SIZE];
|
||||
};
|
||||
|
||||
enum gmbus_cycle_type {
|
||||
GMBUS_NOCYCLE = 0x0,
|
||||
NIDX_NS_W = 0x1,
|
||||
IDX_NS_W = 0x3,
|
||||
GMBUS_STOP = 0x4,
|
||||
NIDX_STOP = 0x5,
|
||||
IDX_STOP = 0x7
|
||||
};
|
||||
|
||||
/*
|
||||
* States of GMBUS
|
||||
*
|
||||
* GMBUS0-3 could be related to the EDID virtualization. Another two GMBUS
|
||||
* registers, GMBUS4 (interrupt mask) and GMBUS5 (2 byte indes register), are
|
||||
* not considered here. Below describes the usage of GMBUS registers that are
|
||||
* cared by the EDID virtualization
|
||||
*
|
||||
* GMBUS0:
|
||||
* R/W
|
||||
* port selection. value of bit0 - bit2 corresponds to the GPIO registers.
|
||||
*
|
||||
* GMBUS1:
|
||||
* R/W Protect
|
||||
* Command and Status.
|
||||
* bit0 is the direction bit: 1 is read; 0 is write.
|
||||
* bit1 - bit7 is slave 7-bit address.
|
||||
* bit16 - bit24 total byte count (ignore?)
|
||||
*
|
||||
* GMBUS2:
|
||||
* Most of bits are read only except bit 15 (IN_USE)
|
||||
* Status register
|
||||
* bit0 - bit8 current byte count
|
||||
* bit 11: hardware ready;
|
||||
*
|
||||
* GMBUS3:
|
||||
* Read/Write
|
||||
* Data for transfer
|
||||
*/
|
||||
|
||||
/* From hw specs, Other phases like START, ADDRESS, INDEX
|
||||
* are invisible to GMBUS MMIO interface. So no definitions
|
||||
* in below enum types
|
||||
*/
|
||||
enum gvt_gmbus_phase {
|
||||
GMBUS_IDLE_PHASE = 0,
|
||||
GMBUS_DATA_PHASE,
|
||||
GMBUS_WAIT_PHASE,
|
||||
//GMBUS_STOP_PHASE,
|
||||
GMBUS_MAX_PHASE
|
||||
};
|
||||
|
||||
struct intel_vgpu_i2c_gmbus {
|
||||
unsigned int total_byte_count; /* from GMBUS1 */
|
||||
enum gmbus_cycle_type cycle_type;
|
||||
enum gvt_gmbus_phase phase;
|
||||
};
|
||||
|
||||
struct intel_vgpu_i2c_aux_ch {
|
||||
bool i2c_over_aux_ch;
|
||||
bool aux_ch_mot;
|
||||
};
|
||||
|
||||
enum i2c_state {
|
||||
I2C_NOT_SPECIFIED = 0,
|
||||
I2C_GMBUS = 1,
|
||||
I2C_AUX_CH = 2
|
||||
};
|
||||
|
||||
/* I2C sequences cannot interleave.
|
||||
* GMBUS and AUX_CH sequences cannot interleave.
|
||||
*/
|
||||
struct intel_vgpu_i2c_edid {
|
||||
enum i2c_state state;
|
||||
|
||||
unsigned int port;
|
||||
bool slave_selected;
|
||||
bool edid_available;
|
||||
unsigned int current_edid_read;
|
||||
|
||||
struct intel_vgpu_i2c_gmbus gmbus;
|
||||
struct intel_vgpu_i2c_aux_ch aux_ch;
|
||||
};
|
||||
|
||||
void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes);
|
||||
|
||||
int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes);
|
||||
|
||||
void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
|
||||
int port_idx,
|
||||
unsigned int offset,
|
||||
void *p_data);
|
||||
|
||||
#endif /*_GVT_EDID_H_*/
|
||||
852
drivers/gpu/drm/i915/gvt/execlist.c
Normal file
852
drivers/gpu/drm/i915/gvt/execlist.c
Normal file
@@ -0,0 +1,852 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Zhiyuan Lv <zhiyuan.lv@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Min He <min.he@intel.com>
|
||||
* Bing Niu <bing.niu@intel.com>
|
||||
* Ping Gao <ping.a.gao@intel.com>
|
||||
* Tina Zhang <tina.zhang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define _EL_OFFSET_STATUS 0x234
|
||||
#define _EL_OFFSET_STATUS_BUF 0x370
|
||||
#define _EL_OFFSET_STATUS_PTR 0x3A0
|
||||
|
||||
#define execlist_ring_mmio(gvt, ring_id, offset) \
|
||||
(gvt->dev_priv->engine[ring_id].mmio_base + (offset))
|
||||
|
||||
#define valid_context(ctx) ((ctx)->valid)
|
||||
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
|
||||
((a)->lrca == (b)->lrca))
|
||||
|
||||
static int context_switch_events[] = {
|
||||
[RCS] = RCS_AS_CONTEXT_SWITCH,
|
||||
[BCS] = BCS_AS_CONTEXT_SWITCH,
|
||||
[VCS] = VCS_AS_CONTEXT_SWITCH,
|
||||
[VCS2] = VCS2_AS_CONTEXT_SWITCH,
|
||||
[VECS] = VECS_AS_CONTEXT_SWITCH,
|
||||
};
|
||||
|
||||
static int ring_id_to_context_switch_event(int ring_id)
|
||||
{
|
||||
if (WARN_ON(ring_id < RCS && ring_id >
|
||||
ARRAY_SIZE(context_switch_events)))
|
||||
return -EINVAL;
|
||||
|
||||
return context_switch_events[ring_id];
|
||||
}
|
||||
|
||||
static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
|
||||
{
|
||||
gvt_dbg_el("[before] running slot %d/context %x pending slot %d\n",
|
||||
execlist->running_slot ?
|
||||
execlist->running_slot->index : -1,
|
||||
execlist->running_context ?
|
||||
execlist->running_context->context_id : 0,
|
||||
execlist->pending_slot ?
|
||||
execlist->pending_slot->index : -1);
|
||||
|
||||
execlist->running_slot = execlist->pending_slot;
|
||||
execlist->pending_slot = NULL;
|
||||
execlist->running_context = execlist->running_context ?
|
||||
&execlist->running_slot->ctx[0] : NULL;
|
||||
|
||||
gvt_dbg_el("[after] running slot %d/context %x pending slot %d\n",
|
||||
execlist->running_slot ?
|
||||
execlist->running_slot->index : -1,
|
||||
execlist->running_context ?
|
||||
execlist->running_context->context_id : 0,
|
||||
execlist->pending_slot ?
|
||||
execlist->pending_slot->index : -1);
|
||||
}
|
||||
|
||||
static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
|
||||
{
|
||||
struct intel_vgpu_execlist_slot *running = execlist->running_slot;
|
||||
struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
|
||||
struct execlist_ctx_descriptor_format *desc = execlist->running_context;
|
||||
struct intel_vgpu *vgpu = execlist->vgpu;
|
||||
struct execlist_status_format status;
|
||||
int ring_id = execlist->ring_id;
|
||||
u32 status_reg = execlist_ring_mmio(vgpu->gvt,
|
||||
ring_id, _EL_OFFSET_STATUS);
|
||||
|
||||
status.ldw = vgpu_vreg(vgpu, status_reg);
|
||||
status.udw = vgpu_vreg(vgpu, status_reg + 4);
|
||||
|
||||
if (running) {
|
||||
status.current_execlist_pointer = !!running->index;
|
||||
status.execlist_write_pointer = !!!running->index;
|
||||
status.execlist_0_active = status.execlist_0_valid =
|
||||
!!!(running->index);
|
||||
status.execlist_1_active = status.execlist_1_valid =
|
||||
!!(running->index);
|
||||
} else {
|
||||
status.context_id = 0;
|
||||
status.execlist_0_active = status.execlist_0_valid = 0;
|
||||
status.execlist_1_active = status.execlist_1_valid = 0;
|
||||
}
|
||||
|
||||
status.context_id = desc ? desc->context_id : 0;
|
||||
status.execlist_queue_full = !!(pending);
|
||||
|
||||
vgpu_vreg(vgpu, status_reg) = status.ldw;
|
||||
vgpu_vreg(vgpu, status_reg + 4) = status.udw;
|
||||
|
||||
gvt_dbg_el("vgpu%d: status reg offset %x ldw %x udw %x\n",
|
||||
vgpu->id, status_reg, status.ldw, status.udw);
|
||||
}
|
||||
|
||||
static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
|
||||
struct execlist_context_status_format *status,
|
||||
bool trigger_interrupt_later)
|
||||
{
|
||||
struct intel_vgpu *vgpu = execlist->vgpu;
|
||||
int ring_id = execlist->ring_id;
|
||||
struct execlist_context_status_pointer_format ctx_status_ptr;
|
||||
u32 write_pointer;
|
||||
u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
|
||||
|
||||
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
|
||||
_EL_OFFSET_STATUS_PTR);
|
||||
ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
|
||||
_EL_OFFSET_STATUS_BUF);
|
||||
|
||||
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
|
||||
|
||||
write_pointer = ctx_status_ptr.write_ptr;
|
||||
|
||||
if (write_pointer == 0x7)
|
||||
write_pointer = 0;
|
||||
else {
|
||||
++write_pointer;
|
||||
write_pointer %= 0x6;
|
||||
}
|
||||
|
||||
offset = ctx_status_buf_reg + write_pointer * 8;
|
||||
|
||||
vgpu_vreg(vgpu, offset) = status->ldw;
|
||||
vgpu_vreg(vgpu, offset + 4) = status->udw;
|
||||
|
||||
ctx_status_ptr.write_ptr = write_pointer;
|
||||
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
|
||||
|
||||
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
|
||||
vgpu->id, write_pointer, offset, status->ldw, status->udw);
|
||||
|
||||
if (trigger_interrupt_later)
|
||||
return;
|
||||
|
||||
intel_vgpu_trigger_virtual_event(vgpu,
|
||||
ring_id_to_context_switch_event(execlist->ring_id));
|
||||
}
|
||||
|
||||
static int emulate_execlist_ctx_schedule_out(
|
||||
struct intel_vgpu_execlist *execlist,
|
||||
struct execlist_ctx_descriptor_format *ctx)
|
||||
{
|
||||
struct intel_vgpu_execlist_slot *running = execlist->running_slot;
|
||||
struct intel_vgpu_execlist_slot *pending = execlist->pending_slot;
|
||||
struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0];
|
||||
struct execlist_ctx_descriptor_format *ctx1 = &running->ctx[1];
|
||||
struct execlist_context_status_format status;
|
||||
|
||||
memset(&status, 0, sizeof(status));
|
||||
|
||||
gvt_dbg_el("schedule out context id %x\n", ctx->context_id);
|
||||
|
||||
if (WARN_ON(!same_context(ctx, execlist->running_context))) {
|
||||
gvt_err("schedule out context is not running context,"
|
||||
"ctx id %x running ctx id %x\n",
|
||||
ctx->context_id,
|
||||
execlist->running_context->context_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */
|
||||
if (valid_context(ctx1) && same_context(ctx0, ctx)) {
|
||||
gvt_dbg_el("ctx 1 valid, ctx/ctx 0 is scheduled-out\n");
|
||||
|
||||
execlist->running_context = ctx1;
|
||||
|
||||
emulate_execlist_status(execlist);
|
||||
|
||||
status.context_complete = status.element_switch = 1;
|
||||
status.context_id = ctx->context_id;
|
||||
|
||||
emulate_csb_update(execlist, &status, false);
|
||||
/*
|
||||
* ctx1 is not valid, ctx == ctx0
|
||||
* ctx1 is valid, ctx1 == ctx
|
||||
* --> last element is finished
|
||||
* emulate:
|
||||
* active-to-idle if there is *no* pending execlist
|
||||
* context-complete if there *is* pending execlist
|
||||
*/
|
||||
} else if ((!valid_context(ctx1) && same_context(ctx0, ctx))
|
||||
|| (valid_context(ctx1) && same_context(ctx1, ctx))) {
|
||||
gvt_dbg_el("need to switch virtual execlist slot\n");
|
||||
|
||||
switch_virtual_execlist_slot(execlist);
|
||||
|
||||
emulate_execlist_status(execlist);
|
||||
|
||||
status.context_complete = status.active_to_idle = 1;
|
||||
status.context_id = ctx->context_id;
|
||||
|
||||
if (!pending) {
|
||||
emulate_csb_update(execlist, &status, false);
|
||||
} else {
|
||||
emulate_csb_update(execlist, &status, true);
|
||||
|
||||
memset(&status, 0, sizeof(status));
|
||||
|
||||
status.idle_to_active = 1;
|
||||
status.context_id = 0;
|
||||
|
||||
emulate_csb_update(execlist, &status, false);
|
||||
}
|
||||
} else {
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
|
||||
struct intel_vgpu_execlist *execlist)
|
||||
{
|
||||
struct intel_vgpu *vgpu = execlist->vgpu;
|
||||
int ring_id = execlist->ring_id;
|
||||
u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
|
||||
_EL_OFFSET_STATUS);
|
||||
struct execlist_status_format status;
|
||||
|
||||
status.ldw = vgpu_vreg(vgpu, status_reg);
|
||||
status.udw = vgpu_vreg(vgpu, status_reg + 4);
|
||||
|
||||
if (status.execlist_queue_full) {
|
||||
gvt_err("virtual execlist slots are full\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &execlist->slot[status.execlist_write_pointer];
|
||||
}
|
||||
|
||||
static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
|
||||
struct execlist_ctx_descriptor_format ctx[2])
|
||||
{
|
||||
struct intel_vgpu_execlist_slot *running = execlist->running_slot;
|
||||
struct intel_vgpu_execlist_slot *slot =
|
||||
get_next_execlist_slot(execlist);
|
||||
|
||||
struct execlist_ctx_descriptor_format *ctx0, *ctx1;
|
||||
struct execlist_context_status_format status;
|
||||
|
||||
gvt_dbg_el("emulate schedule-in\n");
|
||||
|
||||
if (!slot) {
|
||||
gvt_err("no available execlist slot\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&status, 0, sizeof(status));
|
||||
memset(slot->ctx, 0, sizeof(slot->ctx));
|
||||
|
||||
slot->ctx[0] = ctx[0];
|
||||
slot->ctx[1] = ctx[1];
|
||||
|
||||
gvt_dbg_el("alloc slot index %d ctx 0 %x ctx 1 %x\n",
|
||||
slot->index, ctx[0].context_id,
|
||||
ctx[1].context_id);
|
||||
|
||||
/*
|
||||
* no running execlist, make this write bundle as running execlist
|
||||
* -> idle-to-active
|
||||
*/
|
||||
if (!running) {
|
||||
gvt_dbg_el("no current running execlist\n");
|
||||
|
||||
execlist->running_slot = slot;
|
||||
execlist->pending_slot = NULL;
|
||||
execlist->running_context = &slot->ctx[0];
|
||||
|
||||
gvt_dbg_el("running slot index %d running context %x\n",
|
||||
execlist->running_slot->index,
|
||||
execlist->running_context->context_id);
|
||||
|
||||
emulate_execlist_status(execlist);
|
||||
|
||||
status.idle_to_active = 1;
|
||||
status.context_id = 0;
|
||||
|
||||
emulate_csb_update(execlist, &status, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ctx0 = &running->ctx[0];
|
||||
ctx1 = &running->ctx[1];
|
||||
|
||||
gvt_dbg_el("current running slot index %d ctx 0 %x ctx 1 %x\n",
|
||||
running->index, ctx0->context_id, ctx1->context_id);
|
||||
|
||||
/*
|
||||
* already has an running execlist
|
||||
* a. running ctx1 is valid,
|
||||
* ctx0 is finished, and running ctx1 == new execlist ctx[0]
|
||||
* b. running ctx1 is not valid,
|
||||
* ctx0 == new execlist ctx[0]
|
||||
* ----> lite-restore + preempted
|
||||
*/
|
||||
if ((valid_context(ctx1) && same_context(ctx1, &slot->ctx[0]) &&
|
||||
/* condition a */
|
||||
(!same_context(ctx0, execlist->running_context))) ||
|
||||
(!valid_context(ctx1) &&
|
||||
same_context(ctx0, &slot->ctx[0]))) { /* condition b */
|
||||
gvt_dbg_el("need to switch virtual execlist slot\n");
|
||||
|
||||
execlist->pending_slot = slot;
|
||||
switch_virtual_execlist_slot(execlist);
|
||||
|
||||
emulate_execlist_status(execlist);
|
||||
|
||||
status.lite_restore = status.preempted = 1;
|
||||
status.context_id = ctx[0].context_id;
|
||||
|
||||
emulate_csb_update(execlist, &status, false);
|
||||
} else {
|
||||
gvt_dbg_el("emulate as pending slot\n");
|
||||
/*
|
||||
* otherwise
|
||||
* --> emulate pending execlist exist + but no preemption case
|
||||
*/
|
||||
execlist->pending_slot = slot;
|
||||
emulate_execlist_status(execlist);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_workload(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
intel_vgpu_unpin_mm(workload->shadow_mm);
|
||||
intel_gvt_mm_unreference(workload->shadow_mm);
|
||||
kmem_cache_free(workload->vgpu->workloads, workload);
|
||||
}
|
||||
|
||||
#define get_desc_from_elsp_dwords(ed, i) \
|
||||
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
|
||||
|
||||
|
||||
#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
|
||||
#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
|
||||
static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
|
||||
unsigned long add, int gmadr_bytes)
|
||||
{
|
||||
if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
|
||||
return -1;
|
||||
|
||||
*((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
|
||||
BATCH_BUFFER_ADDR_MASK;
|
||||
if (gmadr_bytes == 8) {
|
||||
*((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
|
||||
add & BATCH_BUFFER_ADDR_HIGH_MASK;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
|
||||
struct i915_vma *vma;
|
||||
unsigned long gma;
|
||||
|
||||
/* pin the gem object to ggtt */
|
||||
if (!list_empty(&workload->shadow_bb)) {
|
||||
struct intel_shadow_bb_entry *entry_obj =
|
||||
list_first_entry(&workload->shadow_bb,
|
||||
struct intel_shadow_bb_entry,
|
||||
list);
|
||||
struct intel_shadow_bb_entry *temp;
|
||||
|
||||
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
|
||||
list) {
|
||||
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
|
||||
0, 0);
|
||||
if (IS_ERR(vma)) {
|
||||
gvt_err("Cannot pin\n");
|
||||
return;
|
||||
}
|
||||
i915_gem_object_unpin_pages(entry_obj->obj);
|
||||
|
||||
/* update the relocate gma with shadow batch buffer*/
|
||||
gma = i915_gem_object_ggtt_offset(entry_obj->obj, NULL);
|
||||
WARN_ON(!IS_ALIGNED(gma, 4));
|
||||
set_gma_to_bb_cmd(entry_obj, gma, gmadr_bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||
{
|
||||
int ring_id = wa_ctx->workload->ring_id;
|
||||
struct i915_gem_context *shadow_ctx =
|
||||
wa_ctx->workload->vgpu->shadow_ctx;
|
||||
struct drm_i915_gem_object *ctx_obj =
|
||||
shadow_ctx->engine[ring_id].state->obj;
|
||||
struct execlist_ring_context *shadow_ring_context;
|
||||
struct page *page;
|
||||
|
||||
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
|
||||
shadow_ring_context = kmap_atomic(page);
|
||||
|
||||
shadow_ring_context->bb_per_ctx_ptr.val =
|
||||
(shadow_ring_context->bb_per_ctx_ptr.val &
|
||||
(~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
|
||||
shadow_ring_context->rcs_indirect_ctx.val =
|
||||
(shadow_ring_context->rcs_indirect_ctx.val &
|
||||
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
|
||||
|
||||
kunmap_atomic(shadow_ring_context);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
unsigned long gma;
|
||||
unsigned char *per_ctx_va =
|
||||
(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
|
||||
wa_ctx->indirect_ctx.size;
|
||||
|
||||
if (wa_ctx->indirect_ctx.size == 0)
|
||||
return;
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 0, 0, 0);
|
||||
if (IS_ERR(vma)) {
|
||||
gvt_err("Cannot pin indirect ctx obj\n");
|
||||
return;
|
||||
}
|
||||
i915_gem_object_unpin_pages(wa_ctx->indirect_ctx.obj);
|
||||
|
||||
gma = i915_gem_object_ggtt_offset(wa_ctx->indirect_ctx.obj, NULL);
|
||||
WARN_ON(!IS_ALIGNED(gma, CACHELINE_BYTES));
|
||||
wa_ctx->indirect_ctx.shadow_gma = gma;
|
||||
|
||||
wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
|
||||
memset(per_ctx_va, 0, CACHELINE_BYTES);
|
||||
|
||||
update_wa_ctx_2_shadow_ctx(wa_ctx);
|
||||
}
|
||||
|
||||
static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct execlist_ctx_descriptor_format ctx[2];
|
||||
int ring_id = workload->ring_id;
|
||||
|
||||
intel_vgpu_pin_mm(workload->shadow_mm);
|
||||
intel_vgpu_sync_oos_pages(workload->vgpu);
|
||||
intel_vgpu_flush_post_shadow(workload->vgpu);
|
||||
prepare_shadow_batch_buffer(workload);
|
||||
prepare_shadow_wa_ctx(&workload->wa_ctx);
|
||||
if (!workload->emulate_schedule_in)
|
||||
return 0;
|
||||
|
||||
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
|
||||
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
|
||||
|
||||
return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
|
||||
}
|
||||
|
||||
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
/* release all the shadow batch buffer */
|
||||
if (!list_empty(&workload->shadow_bb)) {
|
||||
struct intel_shadow_bb_entry *entry_obj =
|
||||
list_first_entry(&workload->shadow_bb,
|
||||
struct intel_shadow_bb_entry,
|
||||
list);
|
||||
struct intel_shadow_bb_entry *temp;
|
||||
|
||||
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
|
||||
list) {
|
||||
drm_gem_object_unreference(&(entry_obj->obj->base));
|
||||
kvfree(entry_obj->va);
|
||||
list_del(&entry_obj->list);
|
||||
kfree(entry_obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||
{
|
||||
if (wa_ctx->indirect_ctx.size == 0)
|
||||
return;
|
||||
|
||||
drm_gem_object_unreference(&(wa_ctx->indirect_ctx.obj->base));
|
||||
kvfree(wa_ctx->indirect_ctx.shadow_va);
|
||||
}
|
||||
|
||||
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct intel_vgpu_execlist *execlist =
|
||||
&vgpu->execlist[workload->ring_id];
|
||||
struct intel_vgpu_workload *next_workload;
|
||||
struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
|
||||
bool lite_restore = false;
|
||||
int ret;
|
||||
|
||||
gvt_dbg_el("complete workload %p status %d\n", workload,
|
||||
workload->status);
|
||||
|
||||
release_shadow_batch_buffer(workload);
|
||||
release_shadow_wa_ctx(&workload->wa_ctx);
|
||||
|
||||
if (workload->status || vgpu->resetting)
|
||||
goto out;
|
||||
|
||||
if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
|
||||
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
|
||||
|
||||
next_workload = container_of(next,
|
||||
struct intel_vgpu_workload, list);
|
||||
this_desc = &workload->ctx_desc;
|
||||
next_desc = &next_workload->ctx_desc;
|
||||
|
||||
lite_restore = same_context(this_desc, next_desc);
|
||||
}
|
||||
|
||||
if (lite_restore) {
|
||||
gvt_dbg_el("next context == current - no schedule-out\n");
|
||||
free_workload(workload);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
|
||||
if (ret)
|
||||
goto err;
|
||||
out:
|
||||
free_workload(workload);
|
||||
return 0;
|
||||
err:
|
||||
free_workload(workload);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define RING_CTX_OFF(x) \
|
||||
offsetof(struct execlist_ring_context, x)
|
||||
|
||||
static void read_guest_pdps(struct intel_vgpu *vgpu,
|
||||
u64 ring_context_gpa, u32 pdp[8])
|
||||
{
|
||||
u64 gpa;
|
||||
int i;
|
||||
|
||||
gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
intel_gvt_hypervisor_read_gpa(vgpu,
|
||||
gpa + i * 8, &pdp[7 - i], 4);
|
||||
}
|
||||
|
||||
static int prepare_mm(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
|
||||
struct intel_vgpu_mm *mm;
|
||||
int page_table_level;
|
||||
u32 pdp[8];
|
||||
|
||||
if (desc->addressing_mode == 1) { /* legacy 32-bit */
|
||||
page_table_level = 3;
|
||||
} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
|
||||
page_table_level = 4;
|
||||
} else {
|
||||
gvt_err("Advanced Context mode(SVM) is not supported!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
|
||||
|
||||
mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
|
||||
if (mm) {
|
||||
intel_gvt_mm_reference(mm);
|
||||
} else {
|
||||
|
||||
mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
|
||||
pdp, page_table_level, 0);
|
||||
if (IS_ERR(mm)) {
|
||||
gvt_err("fail to create mm object.\n");
|
||||
return PTR_ERR(mm);
|
||||
}
|
||||
}
|
||||
workload->shadow_mm = mm;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define get_last_workload(q) \
|
||||
(list_empty(q) ? NULL : container_of(q->prev, \
|
||||
struct intel_vgpu_workload, list))
|
||||
|
||||
bool submit_context(struct intel_vgpu *vgpu, int ring_id,
|
||||
struct execlist_ctx_descriptor_format *desc,
|
||||
bool emulate_schedule_in)
|
||||
{
|
||||
struct list_head *q = workload_q_head(vgpu, ring_id);
|
||||
struct intel_vgpu_workload *last_workload = get_last_workload(q);
|
||||
struct intel_vgpu_workload *workload = NULL;
|
||||
u64 ring_context_gpa;
|
||||
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
|
||||
int ret;
|
||||
|
||||
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
|
||||
(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
|
||||
if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_err("invalid guest context LRCA: %x\n", desc->lrca);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(ring_header.val), &head, 4);
|
||||
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(ring_tail.val), &tail, 4);
|
||||
|
||||
head &= RB_HEAD_OFF_MASK;
|
||||
tail &= RB_TAIL_OFF_MASK;
|
||||
|
||||
if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
|
||||
gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
|
||||
gvt_dbg_el("ctx head %x real head %lx\n", head,
|
||||
last_workload->rb_tail);
|
||||
/*
|
||||
* cannot use guest context head pointer here,
|
||||
* as it might not be updated at this time
|
||||
*/
|
||||
head = last_workload->rb_tail;
|
||||
}
|
||||
|
||||
gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
|
||||
|
||||
workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
|
||||
if (!workload)
|
||||
return -ENOMEM;
|
||||
|
||||
/* record some ring buffer register values for scan and shadow */
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(rb_start.val), &start, 4);
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
|
||||
|
||||
INIT_LIST_HEAD(&workload->list);
|
||||
INIT_LIST_HEAD(&workload->shadow_bb);
|
||||
|
||||
init_waitqueue_head(&workload->shadow_ctx_status_wq);
|
||||
atomic_set(&workload->shadow_ctx_active, 0);
|
||||
|
||||
workload->vgpu = vgpu;
|
||||
workload->ring_id = ring_id;
|
||||
workload->ctx_desc = *desc;
|
||||
workload->ring_context_gpa = ring_context_gpa;
|
||||
workload->rb_head = head;
|
||||
workload->rb_tail = tail;
|
||||
workload->rb_start = start;
|
||||
workload->rb_ctl = ctl;
|
||||
workload->prepare = prepare_execlist_workload;
|
||||
workload->complete = complete_execlist_workload;
|
||||
workload->status = -EINPROGRESS;
|
||||
workload->emulate_schedule_in = emulate_schedule_in;
|
||||
|
||||
if (ring_id == RCS) {
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||
RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
|
||||
|
||||
workload->wa_ctx.indirect_ctx.guest_gma =
|
||||
indirect_ctx & INDIRECT_CTX_ADDR_MASK;
|
||||
workload->wa_ctx.indirect_ctx.size =
|
||||
(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
|
||||
CACHELINE_BYTES;
|
||||
workload->wa_ctx.per_ctx.guest_gma =
|
||||
per_ctx & PER_CTX_ADDR_MASK;
|
||||
workload->wa_ctx.workload = workload;
|
||||
|
||||
WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
|
||||
}
|
||||
|
||||
if (emulate_schedule_in)
|
||||
memcpy(&workload->elsp_dwords,
|
||||
&vgpu->execlist[ring_id].elsp_dwords,
|
||||
sizeof(workload->elsp_dwords));
|
||||
|
||||
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
|
||||
workload, ring_id, head, tail, start, ctl);
|
||||
|
||||
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
|
||||
emulate_schedule_in);
|
||||
|
||||
ret = prepare_mm(workload);
|
||||
if (ret) {
|
||||
kmem_cache_free(vgpu->workloads, workload);
|
||||
return ret;
|
||||
}
|
||||
|
||||
queue_workload(workload);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
|
||||
struct execlist_ctx_descriptor_format *desc[2], valid_desc[2];
|
||||
unsigned long valid_desc_bitmap = 0;
|
||||
bool emulate_schedule_in = true;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
memset(valid_desc, 0, sizeof(valid_desc));
|
||||
|
||||
desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
|
||||
desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (!desc[i]->valid)
|
||||
continue;
|
||||
|
||||
if (!desc[i]->privilege_access) {
|
||||
gvt_err("vgpu%d: unexpected GGTT elsp submission\n",
|
||||
vgpu->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* TODO: add another guest context checks here. */
|
||||
set_bit(i, &valid_desc_bitmap);
|
||||
valid_desc[i] = *desc[i];
|
||||
}
|
||||
|
||||
if (!valid_desc_bitmap) {
|
||||
gvt_err("vgpu%d: no valid desc in a elsp submission\n",
|
||||
vgpu->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!test_bit(0, (void *)&valid_desc_bitmap) &&
|
||||
test_bit(1, (void *)&valid_desc_bitmap)) {
|
||||
gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n",
|
||||
vgpu->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* submit workload */
|
||||
for_each_set_bit(i, (void *)&valid_desc_bitmap, 2) {
|
||||
ret = submit_context(vgpu, ring_id, &valid_desc[i],
|
||||
emulate_schedule_in);
|
||||
if (ret) {
|
||||
gvt_err("vgpu%d: fail to schedule workload\n",
|
||||
vgpu->id);
|
||||
return ret;
|
||||
}
|
||||
emulate_schedule_in = false;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
|
||||
struct execlist_context_status_pointer_format ctx_status_ptr;
|
||||
u32 ctx_status_ptr_reg;
|
||||
|
||||
memset(execlist, 0, sizeof(*execlist));
|
||||
|
||||
execlist->vgpu = vgpu;
|
||||
execlist->ring_id = ring_id;
|
||||
execlist->slot[0].index = 0;
|
||||
execlist->slot[1].index = 1;
|
||||
|
||||
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
|
||||
_EL_OFFSET_STATUS_PTR);
|
||||
|
||||
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
|
||||
ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
|
||||
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
|
||||
}
|
||||
|
||||
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
|
||||
{
|
||||
kmem_cache_destroy(vgpu->workloads);
|
||||
}
|
||||
|
||||
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* each ring has a virtual execlist engine */
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
init_vgpu_execlist(vgpu, i);
|
||||
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
|
||||
}
|
||||
|
||||
vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
|
||||
sizeof(struct intel_vgpu_workload), 0,
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
NULL);
|
||||
|
||||
if (!vgpu->workloads)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
|
||||
unsigned long ring_bitmap)
|
||||
{
|
||||
int bit;
|
||||
struct list_head *pos, *n;
|
||||
struct intel_vgpu_workload *workload = NULL;
|
||||
|
||||
for_each_set_bit(bit, &ring_bitmap, sizeof(ring_bitmap) * 8) {
|
||||
if (bit >= I915_NUM_ENGINES)
|
||||
break;
|
||||
/* free the unsubmited workload in the queue */
|
||||
list_for_each_safe(pos, n, &vgpu->workload_q_head[bit]) {
|
||||
workload = container_of(pos,
|
||||
struct intel_vgpu_workload, list);
|
||||
list_del_init(&workload->list);
|
||||
free_workload(workload);
|
||||
}
|
||||
|
||||
init_vgpu_execlist(vgpu, bit);
|
||||
}
|
||||
}
|
||||
188
drivers/gpu/drm/i915/gvt/execlist.h
Normal file
188
drivers/gpu/drm/i915/gvt/execlist.h
Normal file
@@ -0,0 +1,188 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Zhiyuan Lv <zhiyuan.lv@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Min He <min.he@intel.com>
|
||||
* Bing Niu <bing.niu@intel.com>
|
||||
* Ping Gao <ping.a.gao@intel.com>
|
||||
* Tina Zhang <tina.zhang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _GVT_EXECLIST_H_
|
||||
#define _GVT_EXECLIST_H_
|
||||
|
||||
struct execlist_ctx_descriptor_format {
|
||||
union {
|
||||
u32 udw;
|
||||
u32 context_id;
|
||||
};
|
||||
union {
|
||||
u32 ldw;
|
||||
struct {
|
||||
u32 valid : 1;
|
||||
u32 force_pd_restore : 1;
|
||||
u32 force_restore : 1;
|
||||
u32 addressing_mode : 2;
|
||||
u32 llc_coherency : 1;
|
||||
u32 fault_handling : 2;
|
||||
u32 privilege_access : 1;
|
||||
u32 reserved : 3;
|
||||
u32 lrca : 20;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
struct execlist_status_format {
|
||||
union {
|
||||
u32 ldw;
|
||||
struct {
|
||||
u32 current_execlist_pointer :1;
|
||||
u32 execlist_write_pointer :1;
|
||||
u32 execlist_queue_full :1;
|
||||
u32 execlist_1_valid :1;
|
||||
u32 execlist_0_valid :1;
|
||||
u32 last_ctx_switch_reason :9;
|
||||
u32 current_active_elm_status :2;
|
||||
u32 arbitration_enable :1;
|
||||
u32 execlist_1_active :1;
|
||||
u32 execlist_0_active :1;
|
||||
u32 reserved :13;
|
||||
};
|
||||
};
|
||||
union {
|
||||
u32 udw;
|
||||
u32 context_id;
|
||||
};
|
||||
};
|
||||
|
||||
struct execlist_context_status_pointer_format {
|
||||
union {
|
||||
u32 dw;
|
||||
struct {
|
||||
u32 write_ptr :3;
|
||||
u32 reserved :5;
|
||||
u32 read_ptr :3;
|
||||
u32 reserved2 :5;
|
||||
u32 mask :16;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
struct execlist_context_status_format {
|
||||
union {
|
||||
u32 ldw;
|
||||
struct {
|
||||
u32 idle_to_active :1;
|
||||
u32 preempted :1;
|
||||
u32 element_switch :1;
|
||||
u32 active_to_idle :1;
|
||||
u32 context_complete :1;
|
||||
u32 wait_on_sync_flip :1;
|
||||
u32 wait_on_vblank :1;
|
||||
u32 wait_on_semaphore :1;
|
||||
u32 wait_on_scanline :1;
|
||||
u32 reserved :2;
|
||||
u32 semaphore_wait_mode :1;
|
||||
u32 display_plane :3;
|
||||
u32 lite_restore :1;
|
||||
u32 reserved_2 :16;
|
||||
};
|
||||
};
|
||||
union {
|
||||
u32 udw;
|
||||
u32 context_id;
|
||||
};
|
||||
};
|
||||
|
||||
struct execlist_mmio_pair {
|
||||
u32 addr;
|
||||
u32 val;
|
||||
};
|
||||
|
||||
/* The first 52 dwords in register state context */
|
||||
struct execlist_ring_context {
|
||||
u32 nop1;
|
||||
u32 lri_cmd_1;
|
||||
struct execlist_mmio_pair ctx_ctrl;
|
||||
struct execlist_mmio_pair ring_header;
|
||||
struct execlist_mmio_pair ring_tail;
|
||||
struct execlist_mmio_pair rb_start;
|
||||
struct execlist_mmio_pair rb_ctrl;
|
||||
struct execlist_mmio_pair bb_cur_head_UDW;
|
||||
struct execlist_mmio_pair bb_cur_head_LDW;
|
||||
struct execlist_mmio_pair bb_state;
|
||||
struct execlist_mmio_pair second_bb_addr_UDW;
|
||||
struct execlist_mmio_pair second_bb_addr_LDW;
|
||||
struct execlist_mmio_pair second_bb_state;
|
||||
struct execlist_mmio_pair bb_per_ctx_ptr;
|
||||
struct execlist_mmio_pair rcs_indirect_ctx;
|
||||
struct execlist_mmio_pair rcs_indirect_ctx_offset;
|
||||
u32 nop2;
|
||||
u32 nop3;
|
||||
u32 nop4;
|
||||
u32 lri_cmd_2;
|
||||
struct execlist_mmio_pair ctx_timestamp;
|
||||
struct execlist_mmio_pair pdp3_UDW;
|
||||
struct execlist_mmio_pair pdp3_LDW;
|
||||
struct execlist_mmio_pair pdp2_UDW;
|
||||
struct execlist_mmio_pair pdp2_LDW;
|
||||
struct execlist_mmio_pair pdp1_UDW;
|
||||
struct execlist_mmio_pair pdp1_LDW;
|
||||
struct execlist_mmio_pair pdp0_UDW;
|
||||
struct execlist_mmio_pair pdp0_LDW;
|
||||
};
|
||||
|
||||
struct intel_vgpu_elsp_dwords {
|
||||
u32 data[4];
|
||||
u32 index;
|
||||
};
|
||||
|
||||
struct intel_vgpu_execlist_slot {
|
||||
struct execlist_ctx_descriptor_format ctx[2];
|
||||
u32 index;
|
||||
};
|
||||
|
||||
struct intel_vgpu_execlist {
|
||||
struct intel_vgpu_execlist_slot slot[2];
|
||||
struct intel_vgpu_execlist_slot *running_slot;
|
||||
struct intel_vgpu_execlist_slot *pending_slot;
|
||||
struct execlist_ctx_descriptor_format *running_context;
|
||||
int ring_id;
|
||||
struct intel_vgpu *vgpu;
|
||||
struct intel_vgpu_elsp_dwords elsp_dwords;
|
||||
};
|
||||
|
||||
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
|
||||
|
||||
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
|
||||
unsigned long ring_bitmap);
|
||||
|
||||
#endif /*_GVT_EXECLIST_H_*/
|
||||
308
drivers/gpu/drm/i915/gvt/firmware.c
Normal file
308
drivers/gpu/drm/i915/gvt/firmware.c
Normal file
@@ -0,0 +1,308 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Changbin Du <changbin.du@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/crc32.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define FIRMWARE_VERSION (0x0)
|
||||
|
||||
struct gvt_firmware_header {
|
||||
u64 magic;
|
||||
u32 crc32; /* protect the data after this field */
|
||||
u32 version;
|
||||
u64 cfg_space_size;
|
||||
u64 cfg_space_offset; /* offset in the file */
|
||||
u64 mmio_size;
|
||||
u64 mmio_offset; /* offset in the file */
|
||||
unsigned char data[1];
|
||||
};
|
||||
|
||||
#define RD(offset) (readl(mmio + offset.reg))
|
||||
#define WR(v, offset) (writel(v, mmio + offset.reg))
|
||||
|
||||
static void bdw_forcewake_get(void *mmio)
|
||||
{
|
||||
WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT);
|
||||
|
||||
RD(ECOBUS);
|
||||
|
||||
if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50))
|
||||
gvt_err("fail to wait forcewake idle\n");
|
||||
|
||||
WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT);
|
||||
|
||||
if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50))
|
||||
gvt_err("fail to wait forcewake ack\n");
|
||||
|
||||
if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) &
|
||||
GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50))
|
||||
gvt_err("fail to wait c0 wake up\n");
|
||||
}
|
||||
|
||||
#undef RD
|
||||
#undef WR
|
||||
|
||||
#define dev_to_drm_minor(d) dev_get_drvdata((d))
|
||||
|
||||
static ssize_t
|
||||
gvt_firmware_read(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *attr, char *buf,
|
||||
loff_t offset, size_t count)
|
||||
{
|
||||
memcpy(buf, attr->private + offset, count);
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct bin_attribute firmware_attr = {
|
||||
.attr = {.name = "gvt_firmware", .mode = (S_IRUSR)},
|
||||
.read = gvt_firmware_read,
|
||||
.write = NULL,
|
||||
.mmap = NULL,
|
||||
};
|
||||
|
||||
static int expose_firmware_sysfs(struct intel_gvt *gvt, void *mmio)
|
||||
{
|
||||
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
|
||||
struct intel_gvt_mmio_info *e;
|
||||
struct gvt_firmware_header *h;
|
||||
void *firmware;
|
||||
void *p;
|
||||
unsigned long size;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
|
||||
firmware = vmalloc(size);
|
||||
if (!firmware)
|
||||
return -ENOMEM;
|
||||
|
||||
h = firmware;
|
||||
|
||||
h->magic = VGT_MAGIC;
|
||||
h->version = FIRMWARE_VERSION;
|
||||
h->cfg_space_size = info->cfg_space_size;
|
||||
h->cfg_space_offset = offsetof(struct gvt_firmware_header, data);
|
||||
h->mmio_size = info->mmio_size;
|
||||
h->mmio_offset = h->cfg_space_offset + h->cfg_space_size;
|
||||
|
||||
p = firmware + h->cfg_space_offset;
|
||||
|
||||
for (i = 0; i < h->cfg_space_size; i += 4)
|
||||
pci_read_config_dword(pdev, i, p + i);
|
||||
|
||||
memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size);
|
||||
|
||||
p = firmware + h->mmio_offset;
|
||||
|
||||
hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
|
||||
int j;
|
||||
|
||||
for (j = 0; j < e->length; j += 4)
|
||||
*(u32 *)(p + e->offset + j) =
|
||||
readl(mmio + e->offset + j);
|
||||
}
|
||||
|
||||
memcpy(gvt->firmware.mmio, p, info->mmio_size);
|
||||
|
||||
firmware_attr.size = size;
|
||||
firmware_attr.private = firmware;
|
||||
|
||||
ret = device_create_bin_file(&pdev->dev, &firmware_attr);
|
||||
if (ret) {
|
||||
vfree(firmware);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void clean_firmware_sysfs(struct intel_gvt *gvt)
|
||||
{
|
||||
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
|
||||
|
||||
device_remove_bin_file(&pdev->dev, &firmware_attr);
|
||||
vfree(firmware_attr.private);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_free_firmware - free GVT firmware
|
||||
* @gvt: intel gvt device
|
||||
*
|
||||
*/
|
||||
void intel_gvt_free_firmware(struct intel_gvt *gvt)
|
||||
{
|
||||
if (!gvt->firmware.firmware_loaded)
|
||||
clean_firmware_sysfs(gvt);
|
||||
|
||||
kfree(gvt->firmware.cfg_space);
|
||||
kfree(gvt->firmware.mmio);
|
||||
}
|
||||
|
||||
static int verify_firmware(struct intel_gvt *gvt,
|
||||
const struct firmware *fw)
|
||||
{
|
||||
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct gvt_firmware_header *h;
|
||||
unsigned long id, crc32_start;
|
||||
const void *mem;
|
||||
const char *item;
|
||||
u64 file, request;
|
||||
|
||||
h = (struct gvt_firmware_header *)fw->data;
|
||||
|
||||
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
|
||||
mem = fw->data + crc32_start;
|
||||
|
||||
#define VERIFY(s, a, b) do { \
|
||||
item = (s); file = (u64)(a); request = (u64)(b); \
|
||||
if ((a) != (b)) \
|
||||
goto invalid_firmware; \
|
||||
} while (0)
|
||||
|
||||
VERIFY("magic number", h->magic, VGT_MAGIC);
|
||||
VERIFY("version", h->version, FIRMWARE_VERSION);
|
||||
VERIFY("crc32", h->crc32, crc32_le(0, mem, fw->size - crc32_start));
|
||||
VERIFY("cfg space size", h->cfg_space_size, info->cfg_space_size);
|
||||
VERIFY("mmio size", h->mmio_size, info->mmio_size);
|
||||
|
||||
mem = (fw->data + h->cfg_space_offset);
|
||||
|
||||
id = *(u16 *)(mem + PCI_VENDOR_ID);
|
||||
VERIFY("vender id", id, pdev->vendor);
|
||||
|
||||
id = *(u16 *)(mem + PCI_DEVICE_ID);
|
||||
VERIFY("device id", id, pdev->device);
|
||||
|
||||
id = *(u8 *)(mem + PCI_REVISION_ID);
|
||||
VERIFY("revision id", id, pdev->revision);
|
||||
|
||||
#undef VERIFY
|
||||
return 0;
|
||||
|
||||
invalid_firmware:
|
||||
gvt_dbg_core("Invalid firmware: %s [file] 0x%llx [request] 0x%llx\n",
|
||||
item, file, request);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#define GVT_FIRMWARE_PATH "i915/gvt"
|
||||
|
||||
/**
|
||||
* intel_gvt_load_firmware - load GVT firmware
|
||||
* @gvt: intel gvt device
|
||||
*
|
||||
*/
|
||||
int intel_gvt_load_firmware(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct intel_gvt_firmware *firmware = &gvt->firmware;
|
||||
struct gvt_firmware_header *h;
|
||||
const struct firmware *fw;
|
||||
char *path;
|
||||
void *mmio, *mem;
|
||||
int ret;
|
||||
|
||||
path = kmalloc(PATH_MAX, GFP_KERNEL);
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
mem = kmalloc(info->cfg_space_size, GFP_KERNEL);
|
||||
if (!mem) {
|
||||
kfree(path);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
firmware->cfg_space = mem;
|
||||
|
||||
mem = kmalloc(info->mmio_size, GFP_KERNEL);
|
||||
if (!mem) {
|
||||
kfree(path);
|
||||
kfree(firmware->cfg_space);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
firmware->mmio = mem;
|
||||
|
||||
mmio = pci_iomap(pdev, info->mmio_bar, info->mmio_size);
|
||||
if (!mmio) {
|
||||
kfree(path);
|
||||
kfree(firmware->cfg_space);
|
||||
kfree(firmware->mmio);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv))
|
||||
bdw_forcewake_get(mmio);
|
||||
|
||||
sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
|
||||
GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
|
||||
pdev->revision);
|
||||
|
||||
gvt_dbg_core("request hw state firmware %s...\n", path);
|
||||
|
||||
ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev);
|
||||
kfree(path);
|
||||
|
||||
if (ret)
|
||||
goto expose_firmware;
|
||||
|
||||
gvt_dbg_core("success.\n");
|
||||
|
||||
ret = verify_firmware(gvt, fw);
|
||||
if (ret)
|
||||
goto out_free_fw;
|
||||
|
||||
gvt_dbg_core("verified.\n");
|
||||
|
||||
h = (struct gvt_firmware_header *)fw->data;
|
||||
|
||||
memcpy(firmware->cfg_space, fw->data + h->cfg_space_offset,
|
||||
h->cfg_space_size);
|
||||
memcpy(firmware->mmio, fw->data + h->mmio_offset,
|
||||
h->mmio_size);
|
||||
|
||||
release_firmware(fw);
|
||||
firmware->firmware_loaded = true;
|
||||
pci_iounmap(pdev, mmio);
|
||||
return 0;
|
||||
|
||||
out_free_fw:
|
||||
release_firmware(fw);
|
||||
expose_firmware:
|
||||
expose_firmware_sysfs(gvt, mmio);
|
||||
pci_iounmap(pdev, mmio);
|
||||
return 0;
|
||||
}
|
||||
2231
drivers/gpu/drm/i915/gvt/gtt.c
Normal file
2231
drivers/gpu/drm/i915/gvt/gtt.c
Normal file
File diff suppressed because it is too large
Load Diff
270
drivers/gpu/drm/i915/gvt/gtt.h
Normal file
270
drivers/gpu/drm/i915/gvt/gtt.h
Normal file
@@ -0,0 +1,270 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
* Zhenyu Wang <zhenyuw@linux.intel.com>
|
||||
* Xiao Zheng <xiao.zheng@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Min He <min.he@intel.com>
|
||||
* Bing Niu <bing.niu@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _GVT_GTT_H_
|
||||
#define _GVT_GTT_H_
|
||||
|
||||
#define GTT_PAGE_SHIFT 12
|
||||
#define GTT_PAGE_SIZE (1UL << GTT_PAGE_SHIFT)
|
||||
#define GTT_PAGE_MASK (~(GTT_PAGE_SIZE-1))
|
||||
|
||||
struct intel_vgpu_mm;
|
||||
|
||||
#define INTEL_GVT_GTT_HASH_BITS 8
|
||||
#define INTEL_GVT_INVALID_ADDR (~0UL)
|
||||
|
||||
struct intel_gvt_gtt_entry {
|
||||
u64 val64;
|
||||
int type;
|
||||
};
|
||||
|
||||
struct intel_gvt_gtt_pte_ops {
|
||||
struct intel_gvt_gtt_entry *(*get_entry)(void *pt,
|
||||
struct intel_gvt_gtt_entry *e,
|
||||
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
||||
struct intel_vgpu *vgpu);
|
||||
struct intel_gvt_gtt_entry *(*set_entry)(void *pt,
|
||||
struct intel_gvt_gtt_entry *e,
|
||||
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
||||
struct intel_vgpu *vgpu);
|
||||
bool (*test_present)(struct intel_gvt_gtt_entry *e);
|
||||
void (*clear_present)(struct intel_gvt_gtt_entry *e);
|
||||
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
|
||||
void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
|
||||
unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
|
||||
};
|
||||
|
||||
struct intel_gvt_gtt_gma_ops {
|
||||
unsigned long (*gma_to_ggtt_pte_index)(unsigned long gma);
|
||||
unsigned long (*gma_to_pte_index)(unsigned long gma);
|
||||
unsigned long (*gma_to_pde_index)(unsigned long gma);
|
||||
unsigned long (*gma_to_l3_pdp_index)(unsigned long gma);
|
||||
unsigned long (*gma_to_l4_pdp_index)(unsigned long gma);
|
||||
unsigned long (*gma_to_pml4_index)(unsigned long gma);
|
||||
};
|
||||
|
||||
struct intel_gvt_gtt {
|
||||
struct intel_gvt_gtt_pte_ops *pte_ops;
|
||||
struct intel_gvt_gtt_gma_ops *gma_ops;
|
||||
int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm);
|
||||
void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
|
||||
struct list_head oos_page_use_list_head;
|
||||
struct list_head oos_page_free_list_head;
|
||||
struct list_head mm_lru_list_head;
|
||||
};
|
||||
|
||||
enum {
|
||||
INTEL_GVT_MM_GGTT = 0,
|
||||
INTEL_GVT_MM_PPGTT,
|
||||
};
|
||||
|
||||
struct intel_vgpu_mm {
|
||||
int type;
|
||||
bool initialized;
|
||||
bool shadowed;
|
||||
|
||||
int page_table_entry_type;
|
||||
u32 page_table_entry_size;
|
||||
u32 page_table_entry_cnt;
|
||||
void *virtual_page_table;
|
||||
void *shadow_page_table;
|
||||
|
||||
int page_table_level;
|
||||
bool has_shadow_page_table;
|
||||
u32 pde_base_index;
|
||||
|
||||
struct list_head list;
|
||||
struct kref ref;
|
||||
atomic_t pincount;
|
||||
struct list_head lru_list;
|
||||
struct intel_vgpu *vgpu;
|
||||
};
|
||||
|
||||
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(
|
||||
struct intel_vgpu_mm *mm,
|
||||
void *page_table, struct intel_gvt_gtt_entry *e,
|
||||
unsigned long index);
|
||||
|
||||
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(
|
||||
struct intel_vgpu_mm *mm,
|
||||
void *page_table, struct intel_gvt_gtt_entry *e,
|
||||
unsigned long index);
|
||||
|
||||
#define ggtt_get_guest_entry(mm, e, index) \
|
||||
intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
|
||||
|
||||
#define ggtt_set_guest_entry(mm, e, index) \
|
||||
intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
|
||||
|
||||
#define ggtt_get_shadow_entry(mm, e, index) \
|
||||
intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
|
||||
|
||||
#define ggtt_set_shadow_entry(mm, e, index) \
|
||||
intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
|
||||
|
||||
#define ppgtt_get_guest_root_entry(mm, e, index) \
|
||||
intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
|
||||
|
||||
#define ppgtt_set_guest_root_entry(mm, e, index) \
|
||||
intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index)
|
||||
|
||||
#define ppgtt_get_shadow_root_entry(mm, e, index) \
|
||||
intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
|
||||
|
||||
#define ppgtt_set_shadow_root_entry(mm, e, index) \
|
||||
intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index)
|
||||
|
||||
extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
|
||||
int mm_type, void *virtual_page_table, int page_table_level,
|
||||
u32 pde_base_index);
|
||||
extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
|
||||
|
||||
struct intel_vgpu_guest_page;
|
||||
|
||||
struct intel_vgpu_gtt {
|
||||
struct intel_vgpu_mm *ggtt_mm;
|
||||
unsigned long active_ppgtt_mm_bitmap;
|
||||
struct list_head mm_list_head;
|
||||
DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
|
||||
DECLARE_HASHTABLE(guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
|
||||
atomic_t n_write_protected_guest_page;
|
||||
struct list_head oos_page_list_head;
|
||||
struct list_head post_shadow_list_head;
|
||||
struct page *scratch_page;
|
||||
unsigned long scratch_page_mfn;
|
||||
};
|
||||
|
||||
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
|
||||
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
|
||||
|
||||
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
|
||||
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
|
||||
|
||||
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
int page_table_level, void *root_entry);
|
||||
|
||||
struct intel_vgpu_oos_page;
|
||||
|
||||
struct intel_vgpu_shadow_page {
|
||||
void *vaddr;
|
||||
struct page *page;
|
||||
int type;
|
||||
struct hlist_node node;
|
||||
unsigned long mfn;
|
||||
};
|
||||
|
||||
struct intel_vgpu_guest_page {
|
||||
struct hlist_node node;
|
||||
bool writeprotection;
|
||||
unsigned long gfn;
|
||||
int (*handler)(void *, u64, void *, int);
|
||||
void *data;
|
||||
unsigned long write_cnt;
|
||||
struct intel_vgpu_oos_page *oos_page;
|
||||
};
|
||||
|
||||
struct intel_vgpu_oos_page {
|
||||
struct intel_vgpu_guest_page *guest_page;
|
||||
struct list_head list;
|
||||
struct list_head vm_list;
|
||||
int id;
|
||||
unsigned char mem[GTT_PAGE_SIZE];
|
||||
};
|
||||
|
||||
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
|
||||
|
||||
struct intel_vgpu_ppgtt_spt {
|
||||
struct intel_vgpu_shadow_page shadow_page;
|
||||
struct intel_vgpu_guest_page guest_page;
|
||||
int guest_page_type;
|
||||
atomic_t refcount;
|
||||
struct intel_vgpu *vgpu;
|
||||
DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
|
||||
struct list_head post_shadow_list;
|
||||
};
|
||||
|
||||
int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_guest_page *guest_page,
|
||||
unsigned long gfn,
|
||||
int (*handler)(void *gp, u64, void *, int),
|
||||
void *data);
|
||||
|
||||
void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_guest_page *guest_page);
|
||||
|
||||
int intel_vgpu_set_guest_page_writeprotection(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_guest_page *guest_page);
|
||||
|
||||
void intel_vgpu_clear_guest_page_writeprotection(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_guest_page *guest_page);
|
||||
|
||||
struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
|
||||
struct intel_vgpu *vgpu, unsigned long gfn);
|
||||
|
||||
int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
|
||||
|
||||
static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
|
||||
{
|
||||
kref_get(&mm->ref);
|
||||
}
|
||||
|
||||
static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
|
||||
{
|
||||
kref_put(&mm->ref, intel_vgpu_destroy_mm);
|
||||
}
|
||||
|
||||
int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
|
||||
|
||||
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
|
||||
|
||||
unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
|
||||
unsigned long gma);
|
||||
|
||||
struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
int page_table_level, void *root_entry);
|
||||
|
||||
int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
int page_table_level);
|
||||
|
||||
int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
int page_table_level);
|
||||
|
||||
int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
|
||||
unsigned int off, void *p_data, unsigned int bytes);
|
||||
|
||||
int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
|
||||
unsigned int off, void *p_data, unsigned int bytes);
|
||||
|
||||
#endif /* _GVT_GTT_H_ */
|
||||
@@ -19,10 +19,20 @@
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Kevin Tian <kevin.tian@intel.com>
|
||||
* Eddie Dong <eddie.dong@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Niu Bing <bing.niu@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <xen/xen.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
@@ -33,6 +43,13 @@ static const char * const supported_hypervisors[] = {
|
||||
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
|
||||
};
|
||||
|
||||
struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops = {
|
||||
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
|
||||
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
|
||||
.emulate_mmio_read = intel_vgpu_emulate_mmio_read,
|
||||
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
|
||||
};
|
||||
|
||||
/**
|
||||
* intel_gvt_init_host - Load MPT modules and detect if we're running in host
|
||||
* @gvt: intel gvt device
|
||||
@@ -84,9 +101,66 @@ int intel_gvt_init_host(void)
|
||||
|
||||
static void init_device_info(struct intel_gvt *gvt)
|
||||
{
|
||||
if (IS_BROADWELL(gvt->dev_priv))
|
||||
gvt->device_info.max_support_vgpus = 8;
|
||||
/* This function will grow large in GVT device model patches. */
|
||||
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
|
||||
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
|
||||
info->max_support_vgpus = 8;
|
||||
info->cfg_space_size = 256;
|
||||
info->mmio_size = 2 * 1024 * 1024;
|
||||
info->mmio_bar = 0;
|
||||
info->msi_cap_offset = IS_SKYLAKE(gvt->dev_priv) ? 0xac : 0x90;
|
||||
info->gtt_start_offset = 8 * 1024 * 1024;
|
||||
info->gtt_entry_size = 8;
|
||||
info->gtt_entry_size_shift = 3;
|
||||
info->gmadr_bytes_in_cmd = 8;
|
||||
info->max_surface_size = 36 * 1024 * 1024;
|
||||
}
|
||||
}
|
||||
|
||||
static int gvt_service_thread(void *data)
|
||||
{
|
||||
struct intel_gvt *gvt = (struct intel_gvt *)data;
|
||||
int ret;
|
||||
|
||||
gvt_dbg_core("service thread start\n");
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
ret = wait_event_interruptible(gvt->service_thread_wq,
|
||||
kthread_should_stop() || gvt->service_request);
|
||||
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
|
||||
if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
|
||||
continue;
|
||||
|
||||
if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
|
||||
(void *)&gvt->service_request)) {
|
||||
mutex_lock(&gvt->lock);
|
||||
intel_gvt_emulate_vblank(gvt);
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void clean_service_thread(struct intel_gvt *gvt)
|
||||
{
|
||||
kthread_stop(gvt->service_thread);
|
||||
}
|
||||
|
||||
static int init_service_thread(struct intel_gvt *gvt)
|
||||
{
|
||||
init_waitqueue_head(&gvt->service_thread_wq);
|
||||
|
||||
gvt->service_thread = kthread_run(gvt_service_thread,
|
||||
gvt, "gvt_service_thread");
|
||||
if (IS_ERR(gvt->service_thread)) {
|
||||
gvt_err("fail to start service thread.\n");
|
||||
return PTR_ERR(gvt->service_thread);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -104,7 +178,15 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
|
||||
if (WARN_ON(!gvt->initialized))
|
||||
return;
|
||||
|
||||
/* Other de-initialization of GVT components will be introduced. */
|
||||
clean_service_thread(gvt);
|
||||
intel_gvt_clean_cmd_parser(gvt);
|
||||
intel_gvt_clean_sched_policy(gvt);
|
||||
intel_gvt_clean_workload_scheduler(gvt);
|
||||
intel_gvt_clean_opregion(gvt);
|
||||
intel_gvt_clean_gtt(gvt);
|
||||
intel_gvt_clean_irq(gvt);
|
||||
intel_gvt_clean_mmio_info(gvt);
|
||||
intel_gvt_free_firmware(gvt);
|
||||
|
||||
gvt->initialized = false;
|
||||
}
|
||||
@@ -123,6 +205,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
|
||||
int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_gvt *gvt = &dev_priv->gvt;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Cannot initialize GVT device without intel_gvt_host gets
|
||||
* initialized first.
|
||||
@@ -135,11 +219,66 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
||||
|
||||
gvt_dbg_core("init gvt device\n");
|
||||
|
||||
mutex_init(&gvt->lock);
|
||||
gvt->dev_priv = dev_priv;
|
||||
|
||||
init_device_info(gvt);
|
||||
/*
|
||||
* Other initialization of GVT components will be introduce here.
|
||||
*/
|
||||
|
||||
ret = intel_gvt_setup_mmio_info(gvt);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_gvt_load_firmware(gvt);
|
||||
if (ret)
|
||||
goto out_clean_mmio_info;
|
||||
|
||||
ret = intel_gvt_init_irq(gvt);
|
||||
if (ret)
|
||||
goto out_free_firmware;
|
||||
|
||||
ret = intel_gvt_init_gtt(gvt);
|
||||
if (ret)
|
||||
goto out_clean_irq;
|
||||
|
||||
ret = intel_gvt_init_opregion(gvt);
|
||||
if (ret)
|
||||
goto out_clean_gtt;
|
||||
|
||||
ret = intel_gvt_init_workload_scheduler(gvt);
|
||||
if (ret)
|
||||
goto out_clean_opregion;
|
||||
|
||||
ret = intel_gvt_init_sched_policy(gvt);
|
||||
if (ret)
|
||||
goto out_clean_workload_scheduler;
|
||||
|
||||
ret = intel_gvt_init_cmd_parser(gvt);
|
||||
if (ret)
|
||||
goto out_clean_sched_policy;
|
||||
|
||||
ret = init_service_thread(gvt);
|
||||
if (ret)
|
||||
goto out_clean_cmd_parser;
|
||||
|
||||
gvt_dbg_core("gvt device creation is done\n");
|
||||
gvt->initialized = true;
|
||||
return 0;
|
||||
|
||||
out_clean_cmd_parser:
|
||||
intel_gvt_clean_cmd_parser(gvt);
|
||||
out_clean_sched_policy:
|
||||
intel_gvt_clean_sched_policy(gvt);
|
||||
out_clean_workload_scheduler:
|
||||
intel_gvt_clean_workload_scheduler(gvt);
|
||||
out_clean_opregion:
|
||||
intel_gvt_clean_opregion(gvt);
|
||||
out_clean_gtt:
|
||||
intel_gvt_clean_gtt(gvt);
|
||||
out_clean_irq:
|
||||
intel_gvt_clean_irq(gvt);
|
||||
out_free_firmware:
|
||||
intel_gvt_free_firmware(gvt);
|
||||
out_clean_mmio_info:
|
||||
intel_gvt_clean_mmio_info(gvt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -19,6 +19,15 @@
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Kevin Tian <kevin.tian@intel.com>
|
||||
* Eddie Dong <eddie.dong@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Niu Bing <bing.niu@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _GVT_H_
|
||||
@@ -26,6 +35,17 @@
|
||||
|
||||
#include "debug.h"
|
||||
#include "hypercall.h"
|
||||
#include "mmio.h"
|
||||
#include "reg.h"
|
||||
#include "interrupt.h"
|
||||
#include "gtt.h"
|
||||
#include "display.h"
|
||||
#include "edid.h"
|
||||
#include "execlist.h"
|
||||
#include "scheduler.h"
|
||||
#include "sched_policy.h"
|
||||
#include "render.h"
|
||||
#include "cmd_parser.h"
|
||||
|
||||
#define GVT_MAX_VGPU 8
|
||||
|
||||
@@ -45,13 +65,129 @@ extern struct intel_gvt_host intel_gvt_host;
|
||||
/* Describe per-platform limitations. */
|
||||
struct intel_gvt_device_info {
|
||||
u32 max_support_vgpus;
|
||||
/* This data structure will grow bigger in GVT device model patches */
|
||||
u32 cfg_space_size;
|
||||
u32 mmio_size;
|
||||
u32 mmio_bar;
|
||||
unsigned long msi_cap_offset;
|
||||
u32 gtt_start_offset;
|
||||
u32 gtt_entry_size;
|
||||
u32 gtt_entry_size_shift;
|
||||
int gmadr_bytes_in_cmd;
|
||||
u32 max_surface_size;
|
||||
};
|
||||
|
||||
/* GM resources owned by a vGPU */
|
||||
struct intel_vgpu_gm {
|
||||
u64 aperture_sz;
|
||||
u64 hidden_sz;
|
||||
struct drm_mm_node low_gm_node;
|
||||
struct drm_mm_node high_gm_node;
|
||||
};
|
||||
|
||||
#define INTEL_GVT_MAX_NUM_FENCES 32
|
||||
|
||||
/* Fences owned by a vGPU */
|
||||
struct intel_vgpu_fence {
|
||||
struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
|
||||
u32 base;
|
||||
u32 size;
|
||||
};
|
||||
|
||||
struct intel_vgpu_mmio {
|
||||
void *vreg;
|
||||
void *sreg;
|
||||
bool disable_warn_untrack;
|
||||
};
|
||||
|
||||
#define INTEL_GVT_MAX_CFG_SPACE_SZ 256
|
||||
#define INTEL_GVT_MAX_BAR_NUM 4
|
||||
|
||||
struct intel_vgpu_pci_bar {
|
||||
u64 size;
|
||||
bool tracked;
|
||||
};
|
||||
|
||||
struct intel_vgpu_cfg_space {
|
||||
unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
|
||||
struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
|
||||
};
|
||||
|
||||
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
|
||||
|
||||
#define INTEL_GVT_MAX_PIPE 4
|
||||
|
||||
struct intel_vgpu_irq {
|
||||
bool irq_warn_once[INTEL_GVT_EVENT_MAX];
|
||||
DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
|
||||
INTEL_GVT_EVENT_MAX);
|
||||
};
|
||||
|
||||
struct intel_vgpu_opregion {
|
||||
void *va;
|
||||
u32 gfn[INTEL_GVT_OPREGION_PAGES];
|
||||
struct page *pages[INTEL_GVT_OPREGION_PAGES];
|
||||
};
|
||||
|
||||
#define vgpu_opregion(vgpu) (&(vgpu->opregion))
|
||||
|
||||
#define INTEL_GVT_MAX_PORT 5
|
||||
|
||||
struct intel_vgpu_display {
|
||||
struct intel_vgpu_i2c_edid i2c_edid;
|
||||
struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
|
||||
struct intel_vgpu_sbi sbi;
|
||||
};
|
||||
|
||||
struct intel_vgpu {
|
||||
struct intel_gvt *gvt;
|
||||
int id;
|
||||
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
|
||||
bool active;
|
||||
bool resetting;
|
||||
void *sched_data;
|
||||
|
||||
struct intel_vgpu_fence fence;
|
||||
struct intel_vgpu_gm gm;
|
||||
struct intel_vgpu_cfg_space cfg_space;
|
||||
struct intel_vgpu_mmio mmio;
|
||||
struct intel_vgpu_irq irq;
|
||||
struct intel_vgpu_gtt gtt;
|
||||
struct intel_vgpu_opregion opregion;
|
||||
struct intel_vgpu_display display;
|
||||
struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
|
||||
struct list_head workload_q_head[I915_NUM_ENGINES];
|
||||
struct kmem_cache *workloads;
|
||||
atomic_t running_workload_num;
|
||||
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
|
||||
struct i915_gem_context *shadow_ctx;
|
||||
struct notifier_block shadow_ctx_notifier_block;
|
||||
};
|
||||
|
||||
struct intel_gvt_gm {
|
||||
unsigned long vgpu_allocated_low_gm_size;
|
||||
unsigned long vgpu_allocated_high_gm_size;
|
||||
};
|
||||
|
||||
struct intel_gvt_fence {
|
||||
unsigned long vgpu_allocated_fence_num;
|
||||
};
|
||||
|
||||
#define INTEL_GVT_MMIO_HASH_BITS 9
|
||||
|
||||
struct intel_gvt_mmio {
|
||||
u32 *mmio_attribute;
|
||||
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
|
||||
};
|
||||
|
||||
struct intel_gvt_firmware {
|
||||
void *cfg_space;
|
||||
void *mmio;
|
||||
bool firmware_loaded;
|
||||
};
|
||||
|
||||
struct intel_gvt_opregion {
|
||||
void *opregion_va;
|
||||
u32 opregion_pa;
|
||||
};
|
||||
|
||||
struct intel_gvt {
|
||||
@@ -62,8 +198,188 @@ struct intel_gvt {
|
||||
struct idr vgpu_idr; /* vGPU IDR pool */
|
||||
|
||||
struct intel_gvt_device_info device_info;
|
||||
struct intel_gvt_gm gm;
|
||||
struct intel_gvt_fence fence;
|
||||
struct intel_gvt_mmio mmio;
|
||||
struct intel_gvt_firmware firmware;
|
||||
struct intel_gvt_irq irq;
|
||||
struct intel_gvt_gtt gtt;
|
||||
struct intel_gvt_opregion opregion;
|
||||
struct intel_gvt_workload_scheduler scheduler;
|
||||
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
|
||||
|
||||
struct task_struct *service_thread;
|
||||
wait_queue_head_t service_thread_wq;
|
||||
unsigned long service_request;
|
||||
};
|
||||
|
||||
enum {
|
||||
INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
|
||||
};
|
||||
|
||||
static inline void intel_gvt_request_service(struct intel_gvt *gvt,
|
||||
int service)
|
||||
{
|
||||
set_bit(service, (void *)&gvt->service_request);
|
||||
wake_up(&gvt->service_thread_wq);
|
||||
}
|
||||
|
||||
void intel_gvt_free_firmware(struct intel_gvt *gvt);
|
||||
int intel_gvt_load_firmware(struct intel_gvt *gvt);
|
||||
|
||||
/* Aperture/GM space definitions for GVT device */
|
||||
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
|
||||
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
|
||||
|
||||
#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
|
||||
#define gvt_ggtt_sz(gvt) \
|
||||
((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
|
||||
#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
|
||||
|
||||
#define gvt_aperture_gmadr_base(gvt) (0)
|
||||
#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
|
||||
+ gvt_aperture_sz(gvt) - 1)
|
||||
|
||||
#define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
|
||||
+ gvt_aperture_sz(gvt))
|
||||
#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
|
||||
+ gvt_hidden_sz(gvt) - 1)
|
||||
|
||||
#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
|
||||
|
||||
/* Aperture/GM space definitions for vGPU */
|
||||
#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
|
||||
#define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
|
||||
#define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
|
||||
#define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
|
||||
|
||||
#define vgpu_aperture_pa_base(vgpu) \
|
||||
(gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
|
||||
|
||||
#define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
|
||||
|
||||
#define vgpu_aperture_pa_end(vgpu) \
|
||||
(vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
|
||||
|
||||
#define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
|
||||
#define vgpu_aperture_gmadr_end(vgpu) \
|
||||
(vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
|
||||
|
||||
#define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
|
||||
#define vgpu_hidden_gmadr_end(vgpu) \
|
||||
(vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
|
||||
|
||||
#define vgpu_fence_base(vgpu) (vgpu->fence.base)
|
||||
#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
|
||||
|
||||
struct intel_vgpu_creation_params {
|
||||
__u64 handle;
|
||||
__u64 low_gm_sz; /* in MB */
|
||||
__u64 high_gm_sz; /* in MB */
|
||||
__u64 fence_sz;
|
||||
__s32 primary;
|
||||
__u64 vgpu_id;
|
||||
};
|
||||
|
||||
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_creation_params *param);
|
||||
void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
||||
u32 fence, u64 value);
|
||||
|
||||
/* Macros for easily accessing vGPU virtual/shadow register */
|
||||
#define vgpu_vreg(vgpu, reg) \
|
||||
(*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
#define vgpu_vreg8(vgpu, reg) \
|
||||
(*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
#define vgpu_vreg16(vgpu, reg) \
|
||||
(*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
#define vgpu_vreg64(vgpu, reg) \
|
||||
(*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
#define vgpu_sreg(vgpu, reg) \
|
||||
(*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
#define vgpu_sreg8(vgpu, reg) \
|
||||
(*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
#define vgpu_sreg16(vgpu, reg) \
|
||||
(*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
#define vgpu_sreg64(vgpu, reg) \
|
||||
(*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
|
||||
#define for_each_active_vgpu(gvt, vgpu, id) \
|
||||
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
|
||||
for_each_if(vgpu->active)
|
||||
|
||||
static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
|
||||
u32 offset, u32 val, bool low)
|
||||
{
|
||||
u32 *pval;
|
||||
|
||||
/* BAR offset should be 32 bits algiend */
|
||||
offset = rounddown(offset, 4);
|
||||
pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
|
||||
|
||||
if (low) {
|
||||
/*
|
||||
* only update bit 31 - bit 4,
|
||||
* leave the bit 3 - bit 0 unchanged.
|
||||
*/
|
||||
*pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
|
||||
}
|
||||
}
|
||||
|
||||
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
struct intel_vgpu_creation_params *
|
||||
param);
|
||||
|
||||
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
|
||||
|
||||
/* validating GM functions */
|
||||
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
|
||||
((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
|
||||
(gmadr <= vgpu_aperture_gmadr_end(vgpu)))
|
||||
|
||||
#define vgpu_gmadr_is_hidden(vgpu, gmadr) \
|
||||
((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
|
||||
(gmadr <= vgpu_hidden_gmadr_end(vgpu)))
|
||||
|
||||
#define vgpu_gmadr_is_valid(vgpu, gmadr) \
|
||||
((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
|
||||
(vgpu_gmadr_is_hidden(vgpu, gmadr))))
|
||||
|
||||
#define gvt_gmadr_is_aperture(gvt, gmadr) \
|
||||
((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
|
||||
(gmadr <= gvt_aperture_gmadr_end(gvt)))
|
||||
|
||||
#define gvt_gmadr_is_hidden(gvt, gmadr) \
|
||||
((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
|
||||
(gmadr <= gvt_hidden_gmadr_end(gvt)))
|
||||
|
||||
#define gvt_gmadr_is_valid(gvt, gmadr) \
|
||||
(gvt_gmadr_is_aperture(gvt, gmadr) || \
|
||||
gvt_gmadr_is_hidden(gvt, gmadr))
|
||||
|
||||
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
|
||||
int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
|
||||
int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
|
||||
int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
|
||||
unsigned long *h_index);
|
||||
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
|
||||
unsigned long *g_index);
|
||||
|
||||
int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes);
|
||||
|
||||
int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes);
|
||||
|
||||
void intel_gvt_clean_opregion(struct intel_gvt *gvt);
|
||||
int intel_gvt_init_opregion(struct intel_gvt *gvt);
|
||||
|
||||
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
|
||||
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
|
||||
|
||||
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
|
||||
|
||||
#include "mpt.h"
|
||||
|
||||
#endif
|
||||
|
||||
2794
drivers/gpu/drm/i915/gvt/handlers.c
Normal file
2794
drivers/gpu/drm/i915/gvt/handlers.c
Normal file
File diff suppressed because it is too large
Load Diff
@@ -19,17 +19,51 @@
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Eddie Dong <eddie.dong@intel.com>
|
||||
* Dexuan Cui
|
||||
* Jike Song <jike.song@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _GVT_HYPERCALL_H_
|
||||
#define _GVT_HYPERCALL_H_
|
||||
|
||||
struct intel_gvt_io_emulation_ops {
|
||||
int (*emulate_cfg_read)(void *, unsigned int, void *, unsigned int);
|
||||
int (*emulate_cfg_write)(void *, unsigned int, void *, unsigned int);
|
||||
int (*emulate_mmio_read)(void *, u64, void *, unsigned int);
|
||||
int (*emulate_mmio_write)(void *, u64, void *, unsigned int);
|
||||
};
|
||||
|
||||
extern struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops;
|
||||
|
||||
/*
|
||||
* Specific GVT-g MPT modules function collections. Currently GVT-g supports
|
||||
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
|
||||
*/
|
||||
struct intel_gvt_mpt {
|
||||
int (*detect_host)(void);
|
||||
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
|
||||
void (*detach_vgpu)(unsigned long handle);
|
||||
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
|
||||
unsigned long (*from_virt_to_mfn)(void *p);
|
||||
int (*set_wp_page)(unsigned long handle, u64 gfn);
|
||||
int (*unset_wp_page)(unsigned long handle, u64 gfn);
|
||||
int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
|
||||
unsigned long len);
|
||||
int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
|
||||
unsigned long len);
|
||||
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
|
||||
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
|
||||
unsigned long mfn, unsigned int nr, bool map,
|
||||
int type);
|
||||
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
|
||||
bool map);
|
||||
};
|
||||
|
||||
extern struct intel_gvt_mpt xengt_mpt;
|
||||
|
||||
740
drivers/gpu/drm/i915/gvt/interrupt.c
Normal file
740
drivers/gpu/drm/i915/gvt/interrupt.c
Normal file
@@ -0,0 +1,740 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Kevin Tian <kevin.tian@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Min he <min.he@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
/* common offset among interrupt control registers */
|
||||
#define regbase_to_isr(base) (base)
|
||||
#define regbase_to_imr(base) (base + 0x4)
|
||||
#define regbase_to_iir(base) (base + 0x8)
|
||||
#define regbase_to_ier(base) (base + 0xC)
|
||||
|
||||
#define iir_to_regbase(iir) (iir - 0x8)
|
||||
#define ier_to_regbase(ier) (ier - 0xC)
|
||||
|
||||
#define get_event_virt_handler(irq, e) (irq->events[e].v_handler)
|
||||
#define get_irq_info(irq, e) (irq->events[e].info)
|
||||
|
||||
#define irq_to_gvt(irq) \
|
||||
container_of(irq, struct intel_gvt, irq)
|
||||
|
||||
static void update_upstream_irq(struct intel_vgpu *vgpu,
|
||||
struct intel_gvt_irq_info *info);
|
||||
|
||||
const char * const irq_name[INTEL_GVT_EVENT_MAX] = {
|
||||
[RCS_MI_USER_INTERRUPT] = "Render CS MI USER INTERRUPT",
|
||||
[RCS_DEBUG] = "Render EU debug from SVG",
|
||||
[RCS_MMIO_SYNC_FLUSH] = "Render MMIO sync flush status",
|
||||
[RCS_CMD_STREAMER_ERR] = "Render CS error interrupt",
|
||||
[RCS_PIPE_CONTROL] = "Render PIPE CONTROL notify",
|
||||
[RCS_WATCHDOG_EXCEEDED] = "Render CS Watchdog counter exceeded",
|
||||
[RCS_PAGE_DIRECTORY_FAULT] = "Render page directory faults",
|
||||
[RCS_AS_CONTEXT_SWITCH] = "Render AS Context Switch Interrupt",
|
||||
|
||||
[VCS_MI_USER_INTERRUPT] = "Video CS MI USER INTERRUPT",
|
||||
[VCS_MMIO_SYNC_FLUSH] = "Video MMIO sync flush status",
|
||||
[VCS_CMD_STREAMER_ERR] = "Video CS error interrupt",
|
||||
[VCS_MI_FLUSH_DW] = "Video MI FLUSH DW notify",
|
||||
[VCS_WATCHDOG_EXCEEDED] = "Video CS Watchdog counter exceeded",
|
||||
[VCS_PAGE_DIRECTORY_FAULT] = "Video page directory faults",
|
||||
[VCS_AS_CONTEXT_SWITCH] = "Video AS Context Switch Interrupt",
|
||||
[VCS2_MI_USER_INTERRUPT] = "VCS2 Video CS MI USER INTERRUPT",
|
||||
[VCS2_MI_FLUSH_DW] = "VCS2 Video MI FLUSH DW notify",
|
||||
[VCS2_AS_CONTEXT_SWITCH] = "VCS2 Context Switch Interrupt",
|
||||
|
||||
[BCS_MI_USER_INTERRUPT] = "Blitter CS MI USER INTERRUPT",
|
||||
[BCS_MMIO_SYNC_FLUSH] = "Billter MMIO sync flush status",
|
||||
[BCS_CMD_STREAMER_ERR] = "Blitter CS error interrupt",
|
||||
[BCS_MI_FLUSH_DW] = "Blitter MI FLUSH DW notify",
|
||||
[BCS_PAGE_DIRECTORY_FAULT] = "Blitter page directory faults",
|
||||
[BCS_AS_CONTEXT_SWITCH] = "Blitter AS Context Switch Interrupt",
|
||||
|
||||
[VECS_MI_FLUSH_DW] = "Video Enhanced Streamer MI FLUSH DW notify",
|
||||
[VECS_AS_CONTEXT_SWITCH] = "VECS Context Switch Interrupt",
|
||||
|
||||
[PIPE_A_FIFO_UNDERRUN] = "Pipe A FIFO underrun",
|
||||
[PIPE_A_CRC_ERR] = "Pipe A CRC error",
|
||||
[PIPE_A_CRC_DONE] = "Pipe A CRC done",
|
||||
[PIPE_A_VSYNC] = "Pipe A vsync",
|
||||
[PIPE_A_LINE_COMPARE] = "Pipe A line compare",
|
||||
[PIPE_A_ODD_FIELD] = "Pipe A odd field",
|
||||
[PIPE_A_EVEN_FIELD] = "Pipe A even field",
|
||||
[PIPE_A_VBLANK] = "Pipe A vblank",
|
||||
[PIPE_B_FIFO_UNDERRUN] = "Pipe B FIFO underrun",
|
||||
[PIPE_B_CRC_ERR] = "Pipe B CRC error",
|
||||
[PIPE_B_CRC_DONE] = "Pipe B CRC done",
|
||||
[PIPE_B_VSYNC] = "Pipe B vsync",
|
||||
[PIPE_B_LINE_COMPARE] = "Pipe B line compare",
|
||||
[PIPE_B_ODD_FIELD] = "Pipe B odd field",
|
||||
[PIPE_B_EVEN_FIELD] = "Pipe B even field",
|
||||
[PIPE_B_VBLANK] = "Pipe B vblank",
|
||||
[PIPE_C_VBLANK] = "Pipe C vblank",
|
||||
[DPST_PHASE_IN] = "DPST phase in event",
|
||||
[DPST_HISTOGRAM] = "DPST histogram event",
|
||||
[GSE] = "GSE",
|
||||
[DP_A_HOTPLUG] = "DP A Hotplug",
|
||||
[AUX_CHANNEL_A] = "AUX Channel A",
|
||||
[PERF_COUNTER] = "Performance counter",
|
||||
[POISON] = "Poison",
|
||||
[GTT_FAULT] = "GTT fault",
|
||||
[PRIMARY_A_FLIP_DONE] = "Primary Plane A flip done",
|
||||
[PRIMARY_B_FLIP_DONE] = "Primary Plane B flip done",
|
||||
[PRIMARY_C_FLIP_DONE] = "Primary Plane C flip done",
|
||||
[SPRITE_A_FLIP_DONE] = "Sprite Plane A flip done",
|
||||
[SPRITE_B_FLIP_DONE] = "Sprite Plane B flip done",
|
||||
[SPRITE_C_FLIP_DONE] = "Sprite Plane C flip done",
|
||||
|
||||
[PCU_THERMAL] = "PCU Thermal Event",
|
||||
[PCU_PCODE2DRIVER_MAILBOX] = "PCU pcode2driver mailbox event",
|
||||
|
||||
[FDI_RX_INTERRUPTS_TRANSCODER_A] = "FDI RX Interrupts Combined A",
|
||||
[AUDIO_CP_CHANGE_TRANSCODER_A] = "Audio CP Change Transcoder A",
|
||||
[AUDIO_CP_REQUEST_TRANSCODER_A] = "Audio CP Request Transcoder A",
|
||||
[FDI_RX_INTERRUPTS_TRANSCODER_B] = "FDI RX Interrupts Combined B",
|
||||
[AUDIO_CP_CHANGE_TRANSCODER_B] = "Audio CP Change Transcoder B",
|
||||
[AUDIO_CP_REQUEST_TRANSCODER_B] = "Audio CP Request Transcoder B",
|
||||
[FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C",
|
||||
[AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C",
|
||||
[AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C",
|
||||
[ERR_AND_DBG] = "South Error and Debug Interupts Combined",
|
||||
[GMBUS] = "Gmbus",
|
||||
[SDVO_B_HOTPLUG] = "SDVO B hotplug",
|
||||
[CRT_HOTPLUG] = "CRT Hotplug",
|
||||
[DP_B_HOTPLUG] = "DisplayPort/HDMI/DVI B Hotplug",
|
||||
[DP_C_HOTPLUG] = "DisplayPort/HDMI/DVI C Hotplug",
|
||||
[DP_D_HOTPLUG] = "DisplayPort/HDMI/DVI D Hotplug",
|
||||
[AUX_CHANNEL_B] = "AUX Channel B",
|
||||
[AUX_CHANNEL_C] = "AUX Channel C",
|
||||
[AUX_CHANNEL_D] = "AUX Channel D",
|
||||
[AUDIO_POWER_STATE_CHANGE_B] = "Audio Power State change Port B",
|
||||
[AUDIO_POWER_STATE_CHANGE_C] = "Audio Power State change Port C",
|
||||
[AUDIO_POWER_STATE_CHANGE_D] = "Audio Power State change Port D",
|
||||
|
||||
[INTEL_GVT_EVENT_RESERVED] = "RESERVED EVENTS!!!",
|
||||
};
|
||||
|
||||
static inline struct intel_gvt_irq_info *regbase_to_irq_info(
|
||||
struct intel_gvt *gvt,
|
||||
unsigned int reg)
|
||||
{
|
||||
struct intel_gvt_irq *irq = &gvt->irq;
|
||||
int i;
|
||||
|
||||
for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
|
||||
if (i915_mmio_reg_offset(irq->info[i]->reg_base) == reg)
|
||||
return irq->info[i];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reg_imr_handler - Generic IMR register emulation write handler
|
||||
* @vgpu: a vGPU
|
||||
* @reg: register offset written by guest
|
||||
* @p_data: register data written by guest
|
||||
* @bytes: register data length
|
||||
*
|
||||
* This function is used to emulate the generic IMR register bit change
|
||||
* behavior.
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*
|
||||
*/
|
||||
int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
|
||||
unsigned int reg, void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
|
||||
u32 changed, masked, unmasked;
|
||||
u32 imr = *(u32 *)p_data;
|
||||
|
||||
gvt_dbg_irq("write IMR %x with val %x\n",
|
||||
reg, imr);
|
||||
|
||||
gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg));
|
||||
|
||||
/* figure out newly masked/unmasked bits */
|
||||
changed = vgpu_vreg(vgpu, reg) ^ imr;
|
||||
masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
|
||||
unmasked = masked ^ changed;
|
||||
|
||||
gvt_dbg_irq("changed %x, masked %x, unmasked %x\n",
|
||||
changed, masked, unmasked);
|
||||
|
||||
vgpu_vreg(vgpu, reg) = imr;
|
||||
|
||||
ops->check_pending_irq(vgpu);
|
||||
gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reg_master_irq_handler - master IRQ write emulation handler
|
||||
* @vgpu: a vGPU
|
||||
* @reg: register offset written by guest
|
||||
* @p_data: register data written by guest
|
||||
* @bytes: register data length
|
||||
*
|
||||
* This function is used to emulate the master IRQ register on gen8+.
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*
|
||||
*/
|
||||
int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
|
||||
unsigned int reg, void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
|
||||
u32 changed, enabled, disabled;
|
||||
u32 ier = *(u32 *)p_data;
|
||||
u32 virtual_ier = vgpu_vreg(vgpu, reg);
|
||||
|
||||
gvt_dbg_irq("write master irq reg %x with val %x\n",
|
||||
reg, ier);
|
||||
|
||||
gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg));
|
||||
|
||||
/*
|
||||
* GEN8_MASTER_IRQ is a special irq register,
|
||||
* only bit 31 is allowed to be modified
|
||||
* and treated as an IER bit.
|
||||
*/
|
||||
ier &= GEN8_MASTER_IRQ_CONTROL;
|
||||
virtual_ier &= GEN8_MASTER_IRQ_CONTROL;
|
||||
vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL;
|
||||
vgpu_vreg(vgpu, reg) |= ier;
|
||||
|
||||
/* figure out newly enabled/disable bits */
|
||||
changed = virtual_ier ^ ier;
|
||||
enabled = (virtual_ier & changed) ^ changed;
|
||||
disabled = enabled ^ changed;
|
||||
|
||||
gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
|
||||
changed, enabled, disabled);
|
||||
|
||||
ops->check_pending_irq(vgpu);
|
||||
gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reg_ier_handler - Generic IER write emulation handler
|
||||
* @vgpu: a vGPU
|
||||
* @reg: register offset written by guest
|
||||
* @p_data: register data written by guest
|
||||
* @bytes: register data length
|
||||
*
|
||||
* This function is used to emulate the generic IER register behavior.
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*
|
||||
*/
|
||||
int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
|
||||
unsigned int reg, void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
|
||||
struct intel_gvt_irq_info *info;
|
||||
u32 changed, enabled, disabled;
|
||||
u32 ier = *(u32 *)p_data;
|
||||
|
||||
gvt_dbg_irq("write IER %x with val %x\n",
|
||||
reg, ier);
|
||||
|
||||
gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg));
|
||||
|
||||
/* figure out newly enabled/disable bits */
|
||||
changed = vgpu_vreg(vgpu, reg) ^ ier;
|
||||
enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
|
||||
disabled = enabled ^ changed;
|
||||
|
||||
gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
|
||||
changed, enabled, disabled);
|
||||
vgpu_vreg(vgpu, reg) = ier;
|
||||
|
||||
info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
|
||||
if (WARN_ON(!info))
|
||||
return -EINVAL;
|
||||
|
||||
if (info->has_upstream_irq)
|
||||
update_upstream_irq(vgpu, info);
|
||||
|
||||
ops->check_pending_irq(vgpu);
|
||||
gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reg_iir_handler - Generic IIR write emulation handler
|
||||
* @vgpu: a vGPU
|
||||
* @reg: register offset written by guest
|
||||
* @p_data: register data written by guest
|
||||
* @bytes: register data length
|
||||
*
|
||||
* This function is used to emulate the generic IIR register behavior.
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*
|
||||
*/
|
||||
int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
|
||||
iir_to_regbase(reg));
|
||||
u32 iir = *(u32 *)p_data;
|
||||
|
||||
gvt_dbg_irq("write IIR %x with val %x\n", reg, iir);
|
||||
|
||||
if (WARN_ON(!info))
|
||||
return -EINVAL;
|
||||
|
||||
vgpu_vreg(vgpu, reg) &= ~iir;
|
||||
|
||||
if (info->has_upstream_irq)
|
||||
update_upstream_irq(vgpu, info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct intel_gvt_irq_map gen8_irq_map[] = {
|
||||
{ INTEL_GVT_IRQ_INFO_MASTER, 0, INTEL_GVT_IRQ_INFO_GT0, 0xffff },
|
||||
{ INTEL_GVT_IRQ_INFO_MASTER, 1, INTEL_GVT_IRQ_INFO_GT0, 0xffff0000 },
|
||||
{ INTEL_GVT_IRQ_INFO_MASTER, 2, INTEL_GVT_IRQ_INFO_GT1, 0xffff },
|
||||
{ INTEL_GVT_IRQ_INFO_MASTER, 3, INTEL_GVT_IRQ_INFO_GT1, 0xffff0000 },
|
||||
{ INTEL_GVT_IRQ_INFO_MASTER, 4, INTEL_GVT_IRQ_INFO_GT2, 0xffff },
|
||||
{ INTEL_GVT_IRQ_INFO_MASTER, 6, INTEL_GVT_IRQ_INFO_GT3, 0xffff },
|
||||
{ INTEL_GVT_IRQ_INFO_MASTER, 16, INTEL_GVT_IRQ_INFO_DE_PIPE_A, ~0 },
|
||||
{ INTEL_GVT_IRQ_INFO_MASTER, 17, INTEL_GVT_IRQ_INFO_DE_PIPE_B, ~0 },
|
||||
{ INTEL_GVT_IRQ_INFO_MASTER, 18, INTEL_GVT_IRQ_INFO_DE_PIPE_C, ~0 },
|
||||
{ INTEL_GVT_IRQ_INFO_MASTER, 20, INTEL_GVT_IRQ_INFO_DE_PORT, ~0 },
|
||||
{ INTEL_GVT_IRQ_INFO_MASTER, 22, INTEL_GVT_IRQ_INFO_DE_MISC, ~0 },
|
||||
{ INTEL_GVT_IRQ_INFO_MASTER, 23, INTEL_GVT_IRQ_INFO_PCH, ~0 },
|
||||
{ INTEL_GVT_IRQ_INFO_MASTER, 30, INTEL_GVT_IRQ_INFO_PCU, ~0 },
|
||||
{ -1, -1, ~0 },
|
||||
};
|
||||
|
||||
static void update_upstream_irq(struct intel_vgpu *vgpu,
|
||||
struct intel_gvt_irq_info *info)
|
||||
{
|
||||
struct intel_gvt_irq *irq = &vgpu->gvt->irq;
|
||||
struct intel_gvt_irq_map *map = irq->irq_map;
|
||||
struct intel_gvt_irq_info *up_irq_info = NULL;
|
||||
u32 set_bits = 0;
|
||||
u32 clear_bits = 0;
|
||||
int bit;
|
||||
u32 val = vgpu_vreg(vgpu,
|
||||
regbase_to_iir(i915_mmio_reg_offset(info->reg_base)))
|
||||
& vgpu_vreg(vgpu,
|
||||
regbase_to_ier(i915_mmio_reg_offset(info->reg_base)));
|
||||
|
||||
if (!info->has_upstream_irq)
|
||||
return;
|
||||
|
||||
for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
|
||||
if (info->group != map->down_irq_group)
|
||||
continue;
|
||||
|
||||
if (!up_irq_info)
|
||||
up_irq_info = irq->info[map->up_irq_group];
|
||||
else
|
||||
WARN_ON(up_irq_info != irq->info[map->up_irq_group]);
|
||||
|
||||
bit = map->up_irq_bit;
|
||||
|
||||
if (val & map->down_irq_bitmask)
|
||||
set_bits |= (1 << bit);
|
||||
else
|
||||
clear_bits |= (1 << bit);
|
||||
}
|
||||
|
||||
WARN_ON(!up_irq_info);
|
||||
|
||||
if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
|
||||
u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
|
||||
|
||||
vgpu_vreg(vgpu, isr) &= ~clear_bits;
|
||||
vgpu_vreg(vgpu, isr) |= set_bits;
|
||||
} else {
|
||||
u32 iir = regbase_to_iir(
|
||||
i915_mmio_reg_offset(up_irq_info->reg_base));
|
||||
u32 imr = regbase_to_imr(
|
||||
i915_mmio_reg_offset(up_irq_info->reg_base));
|
||||
|
||||
vgpu_vreg(vgpu, iir) |= (set_bits & ~vgpu_vreg(vgpu, imr));
|
||||
}
|
||||
|
||||
if (up_irq_info->has_upstream_irq)
|
||||
update_upstream_irq(vgpu, up_irq_info);
|
||||
}
|
||||
|
||||
static void init_irq_map(struct intel_gvt_irq *irq)
|
||||
{
|
||||
struct intel_gvt_irq_map *map;
|
||||
struct intel_gvt_irq_info *up_info, *down_info;
|
||||
int up_bit;
|
||||
|
||||
for (map = irq->irq_map; map->up_irq_bit != -1; map++) {
|
||||
up_info = irq->info[map->up_irq_group];
|
||||
up_bit = map->up_irq_bit;
|
||||
down_info = irq->info[map->down_irq_group];
|
||||
|
||||
set_bit(up_bit, up_info->downstream_irq_bitmap);
|
||||
down_info->has_upstream_irq = true;
|
||||
|
||||
gvt_dbg_irq("[up] grp %d bit %d -> [down] grp %d bitmask %x\n",
|
||||
up_info->group, up_bit,
|
||||
down_info->group, map->down_irq_bitmask);
|
||||
}
|
||||
}
|
||||
|
||||
/* =======================vEvent injection===================== */
|
||||
static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
|
||||
{
|
||||
return intel_gvt_hypervisor_inject_msi(vgpu);
|
||||
}
|
||||
|
||||
static void propagate_event(struct intel_gvt_irq *irq,
|
||||
enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt_irq_info *info;
|
||||
unsigned int reg_base;
|
||||
int bit;
|
||||
|
||||
info = get_irq_info(irq, event);
|
||||
if (WARN_ON(!info))
|
||||
return;
|
||||
|
||||
reg_base = i915_mmio_reg_offset(info->reg_base);
|
||||
bit = irq->events[event].bit;
|
||||
|
||||
if (!test_bit(bit, (void *)&vgpu_vreg(vgpu,
|
||||
regbase_to_imr(reg_base)))) {
|
||||
gvt_dbg_irq("set bit (%d) for (%s) for vgpu (%d)\n",
|
||||
bit, irq_name[event], vgpu->id);
|
||||
set_bit(bit, (void *)&vgpu_vreg(vgpu,
|
||||
regbase_to_iir(reg_base)));
|
||||
}
|
||||
}
|
||||
|
||||
/* =======================vEvent Handlers===================== */
|
||||
static void handle_default_event_virt(struct intel_gvt_irq *irq,
|
||||
enum intel_gvt_event_type event, struct intel_vgpu *vgpu)
|
||||
{
|
||||
if (!vgpu->irq.irq_warn_once[event]) {
|
||||
gvt_dbg_core("vgpu%d: IRQ receive event %d (%s)\n",
|
||||
vgpu->id, event, irq_name[event]);
|
||||
vgpu->irq.irq_warn_once[event] = true;
|
||||
}
|
||||
propagate_event(irq, event, vgpu);
|
||||
}
|
||||
|
||||
/* =====================GEN specific logic======================= */
|
||||
/* GEN8 interrupt routines. */
|
||||
|
||||
#define DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(regname, regbase) \
|
||||
static struct intel_gvt_irq_info gen8_##regname##_info = { \
|
||||
.name = #regname"-IRQ", \
|
||||
.reg_base = (regbase), \
|
||||
.bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] = \
|
||||
INTEL_GVT_EVENT_RESERVED}, \
|
||||
}
|
||||
|
||||
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt0, GEN8_GT_ISR(0));
|
||||
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt1, GEN8_GT_ISR(1));
|
||||
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt2, GEN8_GT_ISR(2));
|
||||
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(gt3, GEN8_GT_ISR(3));
|
||||
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_a, GEN8_DE_PIPE_ISR(PIPE_A));
|
||||
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_b, GEN8_DE_PIPE_ISR(PIPE_B));
|
||||
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_pipe_c, GEN8_DE_PIPE_ISR(PIPE_C));
|
||||
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_port, GEN8_DE_PORT_ISR);
|
||||
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(de_misc, GEN8_DE_MISC_ISR);
|
||||
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(pcu, GEN8_PCU_ISR);
|
||||
DEFINE_GVT_GEN8_INTEL_GVT_IRQ_INFO(master, GEN8_MASTER_IRQ);
|
||||
|
||||
static struct intel_gvt_irq_info gvt_base_pch_info = {
|
||||
.name = "PCH-IRQ",
|
||||
.reg_base = SDEISR,
|
||||
.bit_to_event = {[0 ... INTEL_GVT_IRQ_BITWIDTH-1] =
|
||||
INTEL_GVT_EVENT_RESERVED},
|
||||
};
|
||||
|
||||
static void gen8_check_pending_irq(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt_irq *irq = &vgpu->gvt->irq;
|
||||
int i;
|
||||
|
||||
if (!(vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ)) &
|
||||
GEN8_MASTER_IRQ_CONTROL))
|
||||
return;
|
||||
|
||||
for_each_set_bit(i, irq->irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX) {
|
||||
struct intel_gvt_irq_info *info = irq->info[i];
|
||||
u32 reg_base;
|
||||
|
||||
if (!info->has_upstream_irq)
|
||||
continue;
|
||||
|
||||
reg_base = i915_mmio_reg_offset(info->reg_base);
|
||||
if ((vgpu_vreg(vgpu, regbase_to_iir(reg_base))
|
||||
& vgpu_vreg(vgpu, regbase_to_ier(reg_base))))
|
||||
update_upstream_irq(vgpu, info);
|
||||
}
|
||||
|
||||
if (vgpu_vreg(vgpu, i915_mmio_reg_offset(GEN8_MASTER_IRQ))
|
||||
& ~GEN8_MASTER_IRQ_CONTROL)
|
||||
inject_virtual_interrupt(vgpu);
|
||||
}
|
||||
|
||||
static void gen8_init_irq(
|
||||
struct intel_gvt_irq *irq)
|
||||
{
|
||||
struct intel_gvt *gvt = irq_to_gvt(irq);
|
||||
|
||||
#define SET_BIT_INFO(s, b, e, i) \
|
||||
do { \
|
||||
s->events[e].bit = b; \
|
||||
s->events[e].info = s->info[i]; \
|
||||
s->info[i]->bit_to_event[b] = e;\
|
||||
} while (0)
|
||||
|
||||
#define SET_IRQ_GROUP(s, g, i) \
|
||||
do { \
|
||||
s->info[g] = i; \
|
||||
(i)->group = g; \
|
||||
set_bit(g, s->irq_info_bitmap); \
|
||||
} while (0)
|
||||
|
||||
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_MASTER, &gen8_master_info);
|
||||
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT0, &gen8_gt0_info);
|
||||
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT1, &gen8_gt1_info);
|
||||
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT2, &gen8_gt2_info);
|
||||
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_GT3, &gen8_gt3_info);
|
||||
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_A, &gen8_de_pipe_a_info);
|
||||
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_B, &gen8_de_pipe_b_info);
|
||||
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PIPE_C, &gen8_de_pipe_c_info);
|
||||
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_PORT, &gen8_de_port_info);
|
||||
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_DE_MISC, &gen8_de_misc_info);
|
||||
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCU, &gen8_pcu_info);
|
||||
SET_IRQ_GROUP(irq, INTEL_GVT_IRQ_INFO_PCH, &gvt_base_pch_info);
|
||||
|
||||
/* GEN8 level 2 interrupts. */
|
||||
|
||||
/* GEN8 interrupt GT0 events */
|
||||
SET_BIT_INFO(irq, 0, RCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
|
||||
SET_BIT_INFO(irq, 4, RCS_PIPE_CONTROL, INTEL_GVT_IRQ_INFO_GT0);
|
||||
SET_BIT_INFO(irq, 8, RCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
|
||||
|
||||
SET_BIT_INFO(irq, 16, BCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0);
|
||||
SET_BIT_INFO(irq, 20, BCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT0);
|
||||
SET_BIT_INFO(irq, 24, BCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0);
|
||||
|
||||
/* GEN8 interrupt GT1 events */
|
||||
SET_BIT_INFO(irq, 0, VCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1);
|
||||
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
|
||||
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
|
||||
|
||||
if (HAS_BSD2(gvt->dev_priv)) {
|
||||
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
|
||||
INTEL_GVT_IRQ_INFO_GT1);
|
||||
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
|
||||
INTEL_GVT_IRQ_INFO_GT1);
|
||||
SET_BIT_INFO(irq, 24, VCS2_AS_CONTEXT_SWITCH,
|
||||
INTEL_GVT_IRQ_INFO_GT1);
|
||||
}
|
||||
|
||||
/* GEN8 interrupt GT3 events */
|
||||
SET_BIT_INFO(irq, 0, VECS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT3);
|
||||
SET_BIT_INFO(irq, 4, VECS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT3);
|
||||
SET_BIT_INFO(irq, 8, VECS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT3);
|
||||
|
||||
SET_BIT_INFO(irq, 0, PIPE_A_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
|
||||
SET_BIT_INFO(irq, 0, PIPE_B_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
|
||||
SET_BIT_INFO(irq, 0, PIPE_C_VBLANK, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
|
||||
|
||||
/* GEN8 interrupt DE PORT events */
|
||||
SET_BIT_INFO(irq, 0, AUX_CHANNEL_A, INTEL_GVT_IRQ_INFO_DE_PORT);
|
||||
SET_BIT_INFO(irq, 3, DP_A_HOTPLUG, INTEL_GVT_IRQ_INFO_DE_PORT);
|
||||
|
||||
/* GEN8 interrupt DE MISC events */
|
||||
SET_BIT_INFO(irq, 0, GSE, INTEL_GVT_IRQ_INFO_DE_MISC);
|
||||
|
||||
/* PCH events */
|
||||
SET_BIT_INFO(irq, 17, GMBUS, INTEL_GVT_IRQ_INFO_PCH);
|
||||
SET_BIT_INFO(irq, 19, CRT_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
|
||||
SET_BIT_INFO(irq, 21, DP_B_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
|
||||
SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
|
||||
SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
|
||||
|
||||
if (IS_BROADWELL(gvt->dev_priv)) {
|
||||
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
|
||||
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
|
||||
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
|
||||
|
||||
SET_BIT_INFO(irq, 4, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
|
||||
SET_BIT_INFO(irq, 5, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
|
||||
|
||||
SET_BIT_INFO(irq, 4, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
|
||||
SET_BIT_INFO(irq, 5, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
|
||||
|
||||
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
|
||||
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
|
||||
} else if (IS_SKYLAKE(gvt->dev_priv)) {
|
||||
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
|
||||
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
|
||||
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
|
||||
|
||||
SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
|
||||
SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
|
||||
SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
|
||||
}
|
||||
|
||||
/* GEN8 interrupt PCU events */
|
||||
SET_BIT_INFO(irq, 24, PCU_THERMAL, INTEL_GVT_IRQ_INFO_PCU);
|
||||
SET_BIT_INFO(irq, 25, PCU_PCODE2DRIVER_MAILBOX, INTEL_GVT_IRQ_INFO_PCU);
|
||||
}
|
||||
|
||||
static struct intel_gvt_irq_ops gen8_irq_ops = {
|
||||
.init_irq = gen8_init_irq,
|
||||
.check_pending_irq = gen8_check_pending_irq,
|
||||
};
|
||||
|
||||
/**
|
||||
* intel_vgpu_trigger_virtual_event - Trigger a virtual event for a vGPU
|
||||
* @vgpu: a vGPU
|
||||
* @event: interrupt event
|
||||
*
|
||||
* This function is used to trigger a virtual interrupt event for vGPU.
|
||||
* The caller provides the event to be triggered, the framework itself
|
||||
* will emulate the IRQ register bit change.
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
|
||||
enum intel_gvt_event_type event)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_irq *irq = &gvt->irq;
|
||||
gvt_event_virt_handler_t handler;
|
||||
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
|
||||
|
||||
handler = get_event_virt_handler(irq, event);
|
||||
WARN_ON(!handler);
|
||||
|
||||
handler(irq, event, vgpu);
|
||||
|
||||
ops->check_pending_irq(vgpu);
|
||||
}
|
||||
|
||||
static void init_events(
|
||||
struct intel_gvt_irq *irq)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < INTEL_GVT_EVENT_MAX; i++) {
|
||||
irq->events[i].info = NULL;
|
||||
irq->events[i].v_handler = handle_default_event_virt;
|
||||
}
|
||||
}
|
||||
|
||||
static enum hrtimer_restart vblank_timer_fn(struct hrtimer *data)
|
||||
{
|
||||
struct intel_gvt_vblank_timer *vblank_timer;
|
||||
struct intel_gvt_irq *irq;
|
||||
struct intel_gvt *gvt;
|
||||
|
||||
vblank_timer = container_of(data, struct intel_gvt_vblank_timer, timer);
|
||||
irq = container_of(vblank_timer, struct intel_gvt_irq, vblank_timer);
|
||||
gvt = container_of(irq, struct intel_gvt, irq);
|
||||
|
||||
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EMULATE_VBLANK);
|
||||
hrtimer_add_expires_ns(&vblank_timer->timer, vblank_timer->period);
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_clean_irq - clean up GVT-g IRQ emulation subsystem
|
||||
* @gvt: a GVT device
|
||||
*
|
||||
* This function is called at driver unloading stage, to clean up GVT-g IRQ
|
||||
* emulation subsystem.
|
||||
*
|
||||
*/
|
||||
void intel_gvt_clean_irq(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_irq *irq = &gvt->irq;
|
||||
|
||||
hrtimer_cancel(&irq->vblank_timer.timer);
|
||||
}
|
||||
|
||||
#define VBLNAK_TIMER_PERIOD 16000000
|
||||
|
||||
/**
|
||||
* intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
|
||||
* @gvt: a GVT device
|
||||
*
|
||||
* This function is called at driver loading stage, to initialize the GVT-g IRQ
|
||||
* emulation subsystem.
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
int intel_gvt_init_irq(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_irq *irq = &gvt->irq;
|
||||
struct intel_gvt_vblank_timer *vblank_timer = &irq->vblank_timer;
|
||||
|
||||
gvt_dbg_core("init irq framework\n");
|
||||
|
||||
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
|
||||
irq->ops = &gen8_irq_ops;
|
||||
irq->irq_map = gen8_irq_map;
|
||||
} else {
|
||||
WARN_ON(1);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* common event initialization */
|
||||
init_events(irq);
|
||||
|
||||
/* gen specific initialization */
|
||||
irq->ops->init_irq(irq);
|
||||
|
||||
init_irq_map(irq);
|
||||
|
||||
hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
vblank_timer->timer.function = vblank_timer_fn;
|
||||
vblank_timer->period = VBLNAK_TIMER_PERIOD;
|
||||
|
||||
return 0;
|
||||
}
|
||||
233
drivers/gpu/drm/i915/gvt/interrupt.h
Normal file
233
drivers/gpu/drm/i915/gvt/interrupt.h
Normal file
@@ -0,0 +1,233 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Kevin Tian <kevin.tian@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Min he <min.he@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _GVT_INTERRUPT_H_
|
||||
#define _GVT_INTERRUPT_H_
|
||||
|
||||
enum intel_gvt_event_type {
|
||||
RCS_MI_USER_INTERRUPT = 0,
|
||||
RCS_DEBUG,
|
||||
RCS_MMIO_SYNC_FLUSH,
|
||||
RCS_CMD_STREAMER_ERR,
|
||||
RCS_PIPE_CONTROL,
|
||||
RCS_L3_PARITY_ERR,
|
||||
RCS_WATCHDOG_EXCEEDED,
|
||||
RCS_PAGE_DIRECTORY_FAULT,
|
||||
RCS_AS_CONTEXT_SWITCH,
|
||||
RCS_MONITOR_BUFF_HALF_FULL,
|
||||
|
||||
VCS_MI_USER_INTERRUPT,
|
||||
VCS_MMIO_SYNC_FLUSH,
|
||||
VCS_CMD_STREAMER_ERR,
|
||||
VCS_MI_FLUSH_DW,
|
||||
VCS_WATCHDOG_EXCEEDED,
|
||||
VCS_PAGE_DIRECTORY_FAULT,
|
||||
VCS_AS_CONTEXT_SWITCH,
|
||||
|
||||
VCS2_MI_USER_INTERRUPT,
|
||||
VCS2_MI_FLUSH_DW,
|
||||
VCS2_AS_CONTEXT_SWITCH,
|
||||
|
||||
BCS_MI_USER_INTERRUPT,
|
||||
BCS_MMIO_SYNC_FLUSH,
|
||||
BCS_CMD_STREAMER_ERR,
|
||||
BCS_MI_FLUSH_DW,
|
||||
BCS_PAGE_DIRECTORY_FAULT,
|
||||
BCS_AS_CONTEXT_SWITCH,
|
||||
|
||||
VECS_MI_USER_INTERRUPT,
|
||||
VECS_MI_FLUSH_DW,
|
||||
VECS_AS_CONTEXT_SWITCH,
|
||||
|
||||
PIPE_A_FIFO_UNDERRUN,
|
||||
PIPE_B_FIFO_UNDERRUN,
|
||||
PIPE_A_CRC_ERR,
|
||||
PIPE_B_CRC_ERR,
|
||||
PIPE_A_CRC_DONE,
|
||||
PIPE_B_CRC_DONE,
|
||||
PIPE_A_ODD_FIELD,
|
||||
PIPE_B_ODD_FIELD,
|
||||
PIPE_A_EVEN_FIELD,
|
||||
PIPE_B_EVEN_FIELD,
|
||||
PIPE_A_LINE_COMPARE,
|
||||
PIPE_B_LINE_COMPARE,
|
||||
PIPE_C_LINE_COMPARE,
|
||||
PIPE_A_VBLANK,
|
||||
PIPE_B_VBLANK,
|
||||
PIPE_C_VBLANK,
|
||||
PIPE_A_VSYNC,
|
||||
PIPE_B_VSYNC,
|
||||
PIPE_C_VSYNC,
|
||||
PRIMARY_A_FLIP_DONE,
|
||||
PRIMARY_B_FLIP_DONE,
|
||||
PRIMARY_C_FLIP_DONE,
|
||||
SPRITE_A_FLIP_DONE,
|
||||
SPRITE_B_FLIP_DONE,
|
||||
SPRITE_C_FLIP_DONE,
|
||||
|
||||
PCU_THERMAL,
|
||||
PCU_PCODE2DRIVER_MAILBOX,
|
||||
|
||||
DPST_PHASE_IN,
|
||||
DPST_HISTOGRAM,
|
||||
GSE,
|
||||
DP_A_HOTPLUG,
|
||||
AUX_CHANNEL_A,
|
||||
PERF_COUNTER,
|
||||
POISON,
|
||||
GTT_FAULT,
|
||||
ERROR_INTERRUPT_COMBINED,
|
||||
|
||||
FDI_RX_INTERRUPTS_TRANSCODER_A,
|
||||
AUDIO_CP_CHANGE_TRANSCODER_A,
|
||||
AUDIO_CP_REQUEST_TRANSCODER_A,
|
||||
FDI_RX_INTERRUPTS_TRANSCODER_B,
|
||||
AUDIO_CP_CHANGE_TRANSCODER_B,
|
||||
AUDIO_CP_REQUEST_TRANSCODER_B,
|
||||
FDI_RX_INTERRUPTS_TRANSCODER_C,
|
||||
AUDIO_CP_CHANGE_TRANSCODER_C,
|
||||
AUDIO_CP_REQUEST_TRANSCODER_C,
|
||||
ERR_AND_DBG,
|
||||
GMBUS,
|
||||
SDVO_B_HOTPLUG,
|
||||
CRT_HOTPLUG,
|
||||
DP_B_HOTPLUG,
|
||||
DP_C_HOTPLUG,
|
||||
DP_D_HOTPLUG,
|
||||
AUX_CHANNEL_B,
|
||||
AUX_CHANNEL_C,
|
||||
AUX_CHANNEL_D,
|
||||
AUDIO_POWER_STATE_CHANGE_B,
|
||||
AUDIO_POWER_STATE_CHANGE_C,
|
||||
AUDIO_POWER_STATE_CHANGE_D,
|
||||
|
||||
INTEL_GVT_EVENT_RESERVED,
|
||||
INTEL_GVT_EVENT_MAX,
|
||||
};
|
||||
|
||||
struct intel_gvt_irq;
|
||||
struct intel_gvt;
|
||||
|
||||
typedef void (*gvt_event_virt_handler_t)(struct intel_gvt_irq *irq,
|
||||
enum intel_gvt_event_type event, struct intel_vgpu *vgpu);
|
||||
|
||||
struct intel_gvt_irq_ops {
|
||||
void (*init_irq)(struct intel_gvt_irq *irq);
|
||||
void (*check_pending_irq)(struct intel_vgpu *vgpu);
|
||||
};
|
||||
|
||||
/* the list of physical interrupt control register groups */
|
||||
enum intel_gvt_irq_type {
|
||||
INTEL_GVT_IRQ_INFO_GT,
|
||||
INTEL_GVT_IRQ_INFO_DPY,
|
||||
INTEL_GVT_IRQ_INFO_PCH,
|
||||
INTEL_GVT_IRQ_INFO_PM,
|
||||
|
||||
INTEL_GVT_IRQ_INFO_MASTER,
|
||||
INTEL_GVT_IRQ_INFO_GT0,
|
||||
INTEL_GVT_IRQ_INFO_GT1,
|
||||
INTEL_GVT_IRQ_INFO_GT2,
|
||||
INTEL_GVT_IRQ_INFO_GT3,
|
||||
INTEL_GVT_IRQ_INFO_DE_PIPE_A,
|
||||
INTEL_GVT_IRQ_INFO_DE_PIPE_B,
|
||||
INTEL_GVT_IRQ_INFO_DE_PIPE_C,
|
||||
INTEL_GVT_IRQ_INFO_DE_PORT,
|
||||
INTEL_GVT_IRQ_INFO_DE_MISC,
|
||||
INTEL_GVT_IRQ_INFO_AUD,
|
||||
INTEL_GVT_IRQ_INFO_PCU,
|
||||
|
||||
INTEL_GVT_IRQ_INFO_MAX,
|
||||
};
|
||||
|
||||
#define INTEL_GVT_IRQ_BITWIDTH 32
|
||||
|
||||
/* device specific interrupt bit definitions */
|
||||
struct intel_gvt_irq_info {
|
||||
char *name;
|
||||
i915_reg_t reg_base;
|
||||
enum intel_gvt_event_type bit_to_event[INTEL_GVT_IRQ_BITWIDTH];
|
||||
unsigned long warned;
|
||||
int group;
|
||||
DECLARE_BITMAP(downstream_irq_bitmap, INTEL_GVT_IRQ_BITWIDTH);
|
||||
bool has_upstream_irq;
|
||||
};
|
||||
|
||||
/* per-event information */
|
||||
struct intel_gvt_event_info {
|
||||
int bit; /* map to register bit */
|
||||
int policy; /* forwarding policy */
|
||||
struct intel_gvt_irq_info *info; /* register info */
|
||||
gvt_event_virt_handler_t v_handler; /* for v_event */
|
||||
};
|
||||
|
||||
struct intel_gvt_irq_map {
|
||||
int up_irq_group;
|
||||
int up_irq_bit;
|
||||
int down_irq_group;
|
||||
u32 down_irq_bitmask;
|
||||
};
|
||||
|
||||
struct intel_gvt_vblank_timer {
|
||||
struct hrtimer timer;
|
||||
u64 period;
|
||||
};
|
||||
|
||||
/* structure containing device specific IRQ state */
|
||||
struct intel_gvt_irq {
|
||||
struct intel_gvt_irq_ops *ops;
|
||||
struct intel_gvt_irq_info *info[INTEL_GVT_IRQ_INFO_MAX];
|
||||
DECLARE_BITMAP(irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX);
|
||||
struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX];
|
||||
DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
|
||||
struct intel_gvt_irq_map *irq_map;
|
||||
struct intel_gvt_vblank_timer vblank_timer;
|
||||
};
|
||||
|
||||
int intel_gvt_init_irq(struct intel_gvt *gvt);
|
||||
void intel_gvt_clean_irq(struct intel_gvt *gvt);
|
||||
|
||||
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
|
||||
enum intel_gvt_event_type event);
|
||||
|
||||
int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
|
||||
void *p_data, unsigned int bytes);
|
||||
int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
|
||||
unsigned int reg, void *p_data, unsigned int bytes);
|
||||
int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
|
||||
unsigned int reg, void *p_data, unsigned int bytes);
|
||||
int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
|
||||
unsigned int reg, void *p_data, unsigned int bytes);
|
||||
|
||||
int gvt_ring_id_to_pipe_control_notify_event(int ring_id);
|
||||
int gvt_ring_id_to_mi_flush_dw_event(int ring_id);
|
||||
int gvt_ring_id_to_mi_user_interrupt_event(int ring_id);
|
||||
|
||||
#endif /* _GVT_INTERRUPT_H_ */
|
||||
305
drivers/gpu/drm/i915/gvt/mmio.c
Normal file
305
drivers/gpu/drm/i915/gvt/mmio.c
Normal file
@@ -0,0 +1,305 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Ke Yu
|
||||
* Kevin Tian <kevin.tian@intel.com>
|
||||
* Dexuan Cui
|
||||
*
|
||||
* Contributors:
|
||||
* Tina Zhang <tina.zhang@intel.com>
|
||||
* Min He <min.he@intel.com>
|
||||
* Niu Bing <bing.niu@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
/**
|
||||
* intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed
|
||||
*/
|
||||
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
|
||||
{
|
||||
u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
|
||||
~GENMASK(3, 0);
|
||||
return gpa - gttmmio_gpa;
|
||||
}
|
||||
|
||||
#define reg_is_mmio(gvt, reg) \
|
||||
(reg >= 0 && reg < gvt->device_info.mmio_size)
|
||||
|
||||
#define reg_is_gtt(gvt, reg) \
|
||||
(reg >= gvt->device_info.gtt_start_offset \
|
||||
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
|
||||
|
||||
/**
|
||||
* intel_vgpu_emulate_mmio_read - emulate MMIO read
|
||||
* @vgpu: a vGPU
|
||||
* @pa: guest physical address
|
||||
* @p_data: data return buffer
|
||||
* @bytes: access data length
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed
|
||||
*/
|
||||
int intel_vgpu_emulate_mmio_read(void *__vgpu, uint64_t pa,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct intel_vgpu *vgpu = __vgpu;
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_mmio_info *mmio;
|
||||
unsigned int offset = 0;
|
||||
int ret = -EINVAL;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
|
||||
struct intel_vgpu_guest_page *gp;
|
||||
|
||||
gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
|
||||
if (gp) {
|
||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
|
||||
p_data, bytes);
|
||||
if (ret) {
|
||||
gvt_err("vgpu%d: guest page read error %d, "
|
||||
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
|
||||
vgpu->id, ret,
|
||||
gp->gfn, pa, *(u32 *)p_data, bytes);
|
||||
}
|
||||
mutex_unlock(&gvt->lock);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
|
||||
|
||||
if (WARN_ON(bytes > 8))
|
||||
goto err;
|
||||
|
||||
if (reg_is_gtt(gvt, offset)) {
|
||||
if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
|
||||
goto err;
|
||||
if (WARN_ON(bytes != 4 && bytes != 8))
|
||||
goto err;
|
||||
if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
|
||||
goto err;
|
||||
|
||||
ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
|
||||
p_data, bytes);
|
||||
if (ret)
|
||||
goto err;
|
||||
mutex_unlock(&gvt->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
|
||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
|
||||
mutex_unlock(&gvt->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
|
||||
goto err;
|
||||
|
||||
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
|
||||
if (!mmio && !vgpu->mmio.disable_warn_untrack) {
|
||||
gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
|
||||
vgpu->id, offset, bytes, *(u32 *)p_data);
|
||||
|
||||
if (offset == 0x206c) {
|
||||
gvt_err("------------------------------------------\n");
|
||||
gvt_err("vgpu%d: likely triggers a gfx reset\n",
|
||||
vgpu->id);
|
||||
gvt_err("------------------------------------------\n");
|
||||
vgpu->mmio.disable_warn_untrack = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
|
||||
if (WARN_ON(!IS_ALIGNED(offset, bytes)))
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (mmio) {
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
|
||||
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
|
||||
goto err;
|
||||
if (WARN_ON(mmio->offset != offset))
|
||||
goto err;
|
||||
}
|
||||
ret = mmio->read(vgpu, offset, p_data, bytes);
|
||||
} else
|
||||
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
intel_gvt_mmio_set_accessed(gvt, offset);
|
||||
mutex_unlock(&gvt->lock);
|
||||
return 0;
|
||||
err:
|
||||
gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
|
||||
vgpu->id, offset, bytes);
|
||||
mutex_unlock(&gvt->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_emulate_mmio_write - emulate MMIO write
|
||||
* @vgpu: a vGPU
|
||||
* @pa: guest physical address
|
||||
* @p_data: write data buffer
|
||||
* @bytes: access data length
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed
|
||||
*/
|
||||
int intel_vgpu_emulate_mmio_write(void *__vgpu, uint64_t pa,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct intel_vgpu *vgpu = __vgpu;
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_mmio_info *mmio;
|
||||
unsigned int offset = 0;
|
||||
u32 old_vreg = 0, old_sreg = 0;
|
||||
int ret = -EINVAL;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
|
||||
struct intel_vgpu_guest_page *gp;
|
||||
|
||||
gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
|
||||
if (gp) {
|
||||
ret = gp->handler(gp, pa, p_data, bytes);
|
||||
if (ret) {
|
||||
gvt_err("vgpu%d: guest page write error %d, "
|
||||
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
|
||||
vgpu->id, ret,
|
||||
gp->gfn, pa, *(u32 *)p_data, bytes);
|
||||
}
|
||||
mutex_unlock(&gvt->lock);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
|
||||
|
||||
if (WARN_ON(bytes > 8))
|
||||
goto err;
|
||||
|
||||
if (reg_is_gtt(gvt, offset)) {
|
||||
if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
|
||||
goto err;
|
||||
if (WARN_ON(bytes != 4 && bytes != 8))
|
||||
goto err;
|
||||
if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
|
||||
goto err;
|
||||
|
||||
ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
|
||||
p_data, bytes);
|
||||
if (ret)
|
||||
goto err;
|
||||
mutex_unlock(&gvt->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
|
||||
ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
|
||||
mutex_unlock(&gvt->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
|
||||
if (!mmio && !vgpu->mmio.disable_warn_untrack)
|
||||
gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
|
||||
vgpu->id, offset, bytes, *(u32 *)p_data);
|
||||
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
|
||||
if (WARN_ON(!IS_ALIGNED(offset, bytes)))
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (mmio) {
|
||||
u64 ro_mask = mmio->ro_mask;
|
||||
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
|
||||
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
|
||||
goto err;
|
||||
if (WARN_ON(mmio->offset != offset))
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
|
||||
old_vreg = vgpu_vreg(vgpu, offset);
|
||||
old_sreg = vgpu_sreg(vgpu, offset);
|
||||
}
|
||||
|
||||
if (!ro_mask) {
|
||||
ret = mmio->write(vgpu, offset, p_data, bytes);
|
||||
} else {
|
||||
/* Protect RO bits like HW */
|
||||
u64 data = 0;
|
||||
|
||||
/* all register bits are RO. */
|
||||
if (ro_mask == ~(u64)0) {
|
||||
gvt_err("vgpu%d: try to write RO reg %x\n",
|
||||
vgpu->id, offset);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
/* keep the RO bits in the virtual register */
|
||||
memcpy(&data, p_data, bytes);
|
||||
data &= ~mmio->ro_mask;
|
||||
data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
|
||||
ret = mmio->write(vgpu, offset, &data, bytes);
|
||||
}
|
||||
|
||||
/* higher 16bits of mode ctl regs are mask bits for change */
|
||||
if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
|
||||
u32 mask = vgpu_vreg(vgpu, offset) >> 16;
|
||||
|
||||
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
|
||||
| (vgpu_vreg(vgpu, offset) & mask);
|
||||
vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
|
||||
| (vgpu_sreg(vgpu, offset) & mask);
|
||||
}
|
||||
} else
|
||||
ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
|
||||
bytes);
|
||||
if (ret)
|
||||
goto err;
|
||||
out:
|
||||
intel_gvt_mmio_set_accessed(gvt, offset);
|
||||
mutex_unlock(&gvt->lock);
|
||||
return 0;
|
||||
err:
|
||||
gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n",
|
||||
vgpu->id, offset, bytes);
|
||||
mutex_unlock(&gvt->lock);
|
||||
return ret;
|
||||
}
|
||||
105
drivers/gpu/drm/i915/gvt/mmio.h
Normal file
105
drivers/gpu/drm/i915/gvt/mmio.h
Normal file
@@ -0,0 +1,105 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Ke Yu
|
||||
* Kevin Tian <kevin.tian@intel.com>
|
||||
* Dexuan Cui
|
||||
*
|
||||
* Contributors:
|
||||
* Tina Zhang <tina.zhang@intel.com>
|
||||
* Min He <min.he@intel.com>
|
||||
* Niu Bing <bing.niu@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _GVT_MMIO_H_
|
||||
#define _GVT_MMIO_H_
|
||||
|
||||
struct intel_gvt;
|
||||
struct intel_vgpu;
|
||||
|
||||
#define D_SNB (1 << 0)
|
||||
#define D_IVB (1 << 1)
|
||||
#define D_HSW (1 << 2)
|
||||
#define D_BDW (1 << 3)
|
||||
#define D_SKL (1 << 4)
|
||||
|
||||
#define D_GEN9PLUS (D_SKL)
|
||||
#define D_GEN8PLUS (D_BDW | D_SKL)
|
||||
#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL)
|
||||
#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
|
||||
|
||||
#define D_SKL_PLUS (D_SKL)
|
||||
#define D_BDW_PLUS (D_BDW | D_SKL)
|
||||
#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL)
|
||||
#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
|
||||
|
||||
#define D_PRE_BDW (D_SNB | D_IVB | D_HSW)
|
||||
#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW)
|
||||
#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL)
|
||||
|
||||
struct intel_gvt_mmio_info {
|
||||
u32 offset;
|
||||
u32 size;
|
||||
u32 length;
|
||||
u32 addr_mask;
|
||||
u64 ro_mask;
|
||||
u32 device;
|
||||
int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int);
|
||||
int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int);
|
||||
u32 addr_range;
|
||||
struct hlist_node node;
|
||||
};
|
||||
|
||||
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
|
||||
bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
|
||||
|
||||
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
|
||||
void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
|
||||
|
||||
struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
|
||||
unsigned int offset);
|
||||
#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
|
||||
typeof(reg) __reg = reg; \
|
||||
u32 *offset = (u32 *)&__reg; \
|
||||
*offset; \
|
||||
})
|
||||
|
||||
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
|
||||
int intel_vgpu_emulate_mmio_read(void *__vgpu, u64 pa, void *p_data,
|
||||
unsigned int bytes);
|
||||
int intel_vgpu_emulate_mmio_write(void *__vgpu, u64 pa, void *p_data,
|
||||
unsigned int bytes);
|
||||
bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
|
||||
unsigned int offset);
|
||||
bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
|
||||
void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset);
|
||||
void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
|
||||
unsigned int offset);
|
||||
bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset);
|
||||
int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes);
|
||||
int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes);
|
||||
#endif
|
||||
@@ -19,6 +19,15 @@
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Eddie Dong <eddie.dong@intel.com>
|
||||
* Dexuan Cui
|
||||
* Jike Song <jike.song@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _GVT_MPT_H_
|
||||
@@ -46,4 +55,215 @@ static inline int intel_gvt_hypervisor_detect_host(void)
|
||||
return intel_gvt_host.mpt->detect_host();
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
|
||||
* related stuffs inside hypervisor.
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
|
||||
* related stuffs inside hypervisor.
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
|
||||
}
|
||||
|
||||
#define MSI_CAP_CONTROL(offset) (offset + 2)
|
||||
#define MSI_CAP_ADDRESS(offset) (offset + 4)
|
||||
#define MSI_CAP_DATA(offset) (offset + 8)
|
||||
#define MSI_CAP_EN 0x1
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
|
||||
{
|
||||
unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
|
||||
u16 control, data;
|
||||
u32 addr;
|
||||
int ret;
|
||||
|
||||
control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
|
||||
addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
|
||||
data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
|
||||
|
||||
/* Do not generate MSI if MSIEN is disable */
|
||||
if (!(control & MSI_CAP_EN))
|
||||
return 0;
|
||||
|
||||
if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
|
||||
return -EINVAL;
|
||||
|
||||
gvt_dbg_irq("vgpu%d: inject msi address %x data%x\n", vgpu->id, addr,
|
||||
data);
|
||||
|
||||
ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
|
||||
if (ret)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
|
||||
* @p: host kernel virtual address
|
||||
*
|
||||
* Returns:
|
||||
* MFN on success, INTEL_GVT_INVALID_ADDR if failed.
|
||||
*/
|
||||
static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
|
||||
{
|
||||
return intel_gvt_host.mpt->from_virt_to_mfn(p);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
|
||||
* @vgpu: a vGPU
|
||||
* @p: intel_vgpu_guest_page
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_guest_page *p)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (p->writeprotection)
|
||||
return 0;
|
||||
|
||||
ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
|
||||
if (ret)
|
||||
return ret;
|
||||
p->writeprotection = true;
|
||||
atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
|
||||
* guest page
|
||||
* @vgpu: a vGPU
|
||||
* @p: intel_vgpu_guest_page
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_guest_page *p)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!p->writeprotection)
|
||||
return 0;
|
||||
|
||||
ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
|
||||
if (ret)
|
||||
return ret;
|
||||
p->writeprotection = false;
|
||||
atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
|
||||
* @vgpu: a vGPU
|
||||
* @gpa: guest physical address
|
||||
* @buf: host data buffer
|
||||
* @len: data length
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
|
||||
unsigned long gpa, void *buf, unsigned long len)
|
||||
{
|
||||
return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
|
||||
* @vgpu: a vGPU
|
||||
* @gpa: guest physical address
|
||||
* @buf: host data buffer
|
||||
* @len: data length
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
|
||||
unsigned long gpa, void *buf, unsigned long len)
|
||||
{
|
||||
return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
|
||||
* @vgpu: a vGPU
|
||||
* @gpfn: guest pfn
|
||||
*
|
||||
* Returns:
|
||||
* MFN on success, INTEL_GVT_INVALID_ADDR if failed.
|
||||
*/
|
||||
static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
|
||||
struct intel_vgpu *vgpu, unsigned long gfn)
|
||||
{
|
||||
return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
|
||||
}
|
||||
|
||||
enum {
|
||||
GVT_MAP_APERTURE = 0,
|
||||
GVT_MAP_OPREGION,
|
||||
};
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
|
||||
* @vgpu: a vGPU
|
||||
* @gfn: guest PFN
|
||||
* @mfn: host PFN
|
||||
* @nr: amount of PFNs
|
||||
* @map: map or unmap
|
||||
* @type: map type
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
|
||||
struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
unsigned long mfn, unsigned int nr,
|
||||
bool map, int type)
|
||||
{
|
||||
return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
|
||||
map, type);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
|
||||
* @vgpu: a vGPU
|
||||
* @start: the beginning of the guest physical address region
|
||||
* @end: the end of the guest physical address region
|
||||
* @map: map or unmap
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_set_trap_area(
|
||||
struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
|
||||
{
|
||||
return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
|
||||
}
|
||||
|
||||
#endif /* _GVT_MPT_H_ */
|
||||
|
||||
343
drivers/gpu/drm/i915/gvt/opregion.c
Normal file
343
drivers/gpu/drm/i915/gvt/opregion.c
Normal file
@@ -0,0 +1,343 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
|
||||
{
|
||||
void *host_va = vgpu->gvt->opregion.opregion_va;
|
||||
u8 *buf;
|
||||
int i;
|
||||
|
||||
if (WARN((vgpu_opregion(vgpu)->va),
|
||||
"vgpu%d: opregion has been initialized already.\n",
|
||||
vgpu->id))
|
||||
return -EINVAL;
|
||||
|
||||
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
|
||||
GFP_DMA32 | __GFP_ZERO,
|
||||
INTEL_GVT_OPREGION_PORDER);
|
||||
|
||||
if (!vgpu_opregion(vgpu)->va)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_fromio(vgpu_opregion(vgpu)->va, host_va,
|
||||
INTEL_GVT_OPREGION_SIZE);
|
||||
|
||||
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
|
||||
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
|
||||
|
||||
/* for unknown reason, the value in LID field is incorrect
|
||||
* which block the windows guest, so workaround it by force
|
||||
* setting it to "OPEN"
|
||||
*/
|
||||
buf = (u8 *)vgpu_opregion(vgpu)->va;
|
||||
buf[INTEL_GVT_OPREGION_CLID] = 0x3;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
|
||||
{
|
||||
u64 mfn;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
|
||||
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)
|
||||
+ i * PAGE_SIZE);
|
||||
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_err("fail to get MFN from VA\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
|
||||
vgpu_opregion(vgpu)->gfn[i],
|
||||
mfn, 1, map, GVT_MAP_OPREGION);
|
||||
if (ret) {
|
||||
gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
|
||||
|
||||
if (!vgpu_opregion(vgpu)->va)
|
||||
return;
|
||||
|
||||
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
|
||||
vunmap(vgpu_opregion(vgpu)->va);
|
||||
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
|
||||
if (vgpu_opregion(vgpu)->pages[i]) {
|
||||
put_page(vgpu_opregion(vgpu)->pages[i]);
|
||||
vgpu_opregion(vgpu)->pages[i] = NULL;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
map_vgpu_opregion(vgpu, false);
|
||||
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
|
||||
INTEL_GVT_OPREGION_PORDER);
|
||||
}
|
||||
|
||||
vgpu_opregion(vgpu)->va = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
|
||||
* @vgpu: a vGPU
|
||||
* @gpa: guest physical address of opregion
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
|
||||
{
|
||||
int ret;
|
||||
|
||||
gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);
|
||||
|
||||
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
|
||||
gvt_dbg_core("emulate opregion from kernel\n");
|
||||
|
||||
ret = init_vgpu_opregion(vgpu, gpa);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = map_vgpu_opregion(vgpu, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
gvt_dbg_core("emulate opregion from userspace\n");
|
||||
|
||||
/*
|
||||
* If opregion pages are not allocated from host kenrel,
|
||||
* most of the params are meaningless
|
||||
*/
|
||||
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
|
||||
0, /* not used */
|
||||
0, /* not used */
|
||||
2, /* not used */
|
||||
1,
|
||||
GVT_MAP_OPREGION);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_clean_opregion - clean host opergion related stuffs
|
||||
* @gvt: a GVT device
|
||||
*
|
||||
*/
|
||||
void intel_gvt_clean_opregion(struct intel_gvt *gvt)
|
||||
{
|
||||
iounmap(gvt->opregion.opregion_va);
|
||||
gvt->opregion.opregion_va = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_init_opregion - initialize host opergion related stuffs
|
||||
* @gvt: a GVT device
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
int intel_gvt_init_opregion(struct intel_gvt *gvt)
|
||||
{
|
||||
gvt_dbg_core("init host opregion\n");
|
||||
|
||||
pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
|
||||
&gvt->opregion.opregion_pa);
|
||||
|
||||
gvt->opregion.opregion_va = acpi_os_ioremap(gvt->opregion.opregion_pa,
|
||||
INTEL_GVT_OPREGION_SIZE);
|
||||
if (!gvt->opregion.opregion_va) {
|
||||
gvt_err("fail to map host opregion\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define GVT_OPREGION_FUNC(scic) \
|
||||
({ \
|
||||
u32 __ret; \
|
||||
__ret = (scic & OPREGION_SCIC_FUNC_MASK) >> \
|
||||
OPREGION_SCIC_FUNC_SHIFT; \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define GVT_OPREGION_SUBFUNC(scic) \
|
||||
({ \
|
||||
u32 __ret; \
|
||||
__ret = (scic & OPREGION_SCIC_SUBFUNC_MASK) >> \
|
||||
OPREGION_SCIC_SUBFUNC_SHIFT; \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
static const char *opregion_func_name(u32 func)
|
||||
{
|
||||
const char *name = NULL;
|
||||
|
||||
switch (func) {
|
||||
case 0 ... 3:
|
||||
case 5:
|
||||
case 7 ... 15:
|
||||
name = "Reserved";
|
||||
break;
|
||||
|
||||
case 4:
|
||||
name = "Get BIOS Data";
|
||||
break;
|
||||
|
||||
case 6:
|
||||
name = "System BIOS Callbacks";
|
||||
break;
|
||||
|
||||
default:
|
||||
name = "Unknown";
|
||||
break;
|
||||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
static const char *opregion_subfunc_name(u32 subfunc)
|
||||
{
|
||||
const char *name = NULL;
|
||||
|
||||
switch (subfunc) {
|
||||
case 0:
|
||||
name = "Supported Calls";
|
||||
break;
|
||||
|
||||
case 1:
|
||||
name = "Requested Callbacks";
|
||||
break;
|
||||
|
||||
case 2 ... 3:
|
||||
case 8 ... 9:
|
||||
name = "Reserved";
|
||||
break;
|
||||
|
||||
case 5:
|
||||
name = "Boot Display";
|
||||
break;
|
||||
|
||||
case 6:
|
||||
name = "TV-Standard/Video-Connector";
|
||||
break;
|
||||
|
||||
case 7:
|
||||
name = "Internal Graphics";
|
||||
break;
|
||||
|
||||
case 10:
|
||||
name = "Spread Spectrum Clocks";
|
||||
break;
|
||||
|
||||
case 11:
|
||||
name = "Get AKSV";
|
||||
break;
|
||||
|
||||
default:
|
||||
name = "Unknown";
|
||||
break;
|
||||
}
|
||||
return name;
|
||||
};
|
||||
|
||||
static bool querying_capabilities(u32 scic)
|
||||
{
|
||||
u32 func, subfunc;
|
||||
|
||||
func = GVT_OPREGION_FUNC(scic);
|
||||
subfunc = GVT_OPREGION_SUBFUNC(scic);
|
||||
|
||||
if ((func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
|
||||
subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)
|
||||
|| (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA &&
|
||||
subfunc == INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS)
|
||||
|| (func == INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS &&
|
||||
subfunc == INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_emulate_opregion_request - emulating OpRegion request
|
||||
* @vgpu: a vGPU
|
||||
* @swsci: SWSCI request
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed
|
||||
*/
|
||||
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
|
||||
{
|
||||
u32 *scic, *parm;
|
||||
u32 func, subfunc;
|
||||
|
||||
scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
|
||||
parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
|
||||
|
||||
if (!(swsci & SWSCI_SCI_SELECT)) {
|
||||
gvt_err("vgpu%d: requesting SMI service\n", vgpu->id);
|
||||
return 0;
|
||||
}
|
||||
/* ignore non 0->1 trasitions */
|
||||
if ((vgpu_cfg_space(vgpu)[INTEL_GVT_PCI_SWSCI]
|
||||
& SWSCI_SCI_TRIGGER) ||
|
||||
!(swsci & SWSCI_SCI_TRIGGER)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
func = GVT_OPREGION_FUNC(*scic);
|
||||
subfunc = GVT_OPREGION_SUBFUNC(*scic);
|
||||
if (!querying_capabilities(*scic)) {
|
||||
gvt_err("vgpu%d: requesting runtime service: func \"%s\","
|
||||
" subfunc \"%s\"\n",
|
||||
vgpu->id,
|
||||
opregion_func_name(func),
|
||||
opregion_subfunc_name(subfunc));
|
||||
/*
|
||||
* emulate exit status of function call, '0' means
|
||||
* "failure, generic, unsupported or unknown cause"
|
||||
*/
|
||||
*scic &= ~OPREGION_SCIC_EXIT_MASK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*scic = 0;
|
||||
*parm = 0;
|
||||
return 0;
|
||||
}
|
||||
80
drivers/gpu/drm/i915/gvt/reg.h
Normal file
80
drivers/gpu/drm/i915/gvt/reg.h
Normal file
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _GVT_REG_H
|
||||
#define _GVT_REG_H
|
||||
|
||||
#define INTEL_GVT_PCI_CLASS_VGA_OTHER 0x80
|
||||
|
||||
#define INTEL_GVT_PCI_GMCH_CONTROL 0x50
|
||||
#define BDW_GMCH_GMS_SHIFT 8
|
||||
#define BDW_GMCH_GMS_MASK 0xff
|
||||
|
||||
#define INTEL_GVT_PCI_SWSCI 0xe8
|
||||
#define SWSCI_SCI_SELECT (1 << 15)
|
||||
#define SWSCI_SCI_TRIGGER 1
|
||||
|
||||
#define INTEL_GVT_PCI_OPREGION 0xfc
|
||||
|
||||
#define INTEL_GVT_OPREGION_CLID 0x1AC
|
||||
#define INTEL_GVT_OPREGION_SCIC 0x200
|
||||
#define OPREGION_SCIC_FUNC_MASK 0x1E
|
||||
#define OPREGION_SCIC_FUNC_SHIFT 1
|
||||
#define OPREGION_SCIC_SUBFUNC_MASK 0xFF00
|
||||
#define OPREGION_SCIC_SUBFUNC_SHIFT 8
|
||||
#define OPREGION_SCIC_EXIT_MASK 0xE0
|
||||
#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSDATA 4
|
||||
#define INTEL_GVT_OPREGION_SCIC_F_GETBIOSCALLBACKS 6
|
||||
#define INTEL_GVT_OPREGION_SCIC_SF_SUPPRTEDCALLS 0
|
||||
#define INTEL_GVT_OPREGION_SCIC_SF_REQEUSTEDCALLBACKS 1
|
||||
#define INTEL_GVT_OPREGION_PARM 0x204
|
||||
|
||||
#define INTEL_GVT_OPREGION_PAGES 2
|
||||
#define INTEL_GVT_OPREGION_PORDER 1
|
||||
#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
|
||||
|
||||
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
|
||||
|
||||
#define _REG_VECS_EXCC 0x1A028
|
||||
#define _REG_VCS2_EXCC 0x1c028
|
||||
|
||||
#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
|
||||
#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
|
||||
|
||||
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
|
||||
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
|
||||
|
||||
#define FORCEWAKE_RENDER_GEN9_REG 0xa278
|
||||
#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
|
||||
#define FORCEWAKE_BLITTER_GEN9_REG 0xa188
|
||||
#define FORCEWAKE_ACK_BLITTER_GEN9_REG 0x130044
|
||||
#define FORCEWAKE_MEDIA_GEN9_REG 0xa270
|
||||
#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
|
||||
#define FORCEWAKE_ACK_HSW_REG 0x130044
|
||||
|
||||
#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
|
||||
#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
|
||||
#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
|
||||
#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + GTT_PAGE_SIZE)
|
||||
|
||||
#endif
|
||||
290
drivers/gpu/drm/i915/gvt/render.c
Normal file
290
drivers/gpu/drm/i915/gvt/render.c
Normal file
@@ -0,0 +1,290 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Eddie Dong <eddie.dong@intel.com>
|
||||
* Kevin Tian <kevin.tian@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
* Changbin Du <changbin.du@intel.com>
|
||||
* Zhenyu Wang <zhenyuw@linux.intel.com>
|
||||
* Tina Zhang <tina.zhang@intel.com>
|
||||
* Bing Niu <bing.niu@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
struct render_mmio {
|
||||
int ring_id;
|
||||
i915_reg_t reg;
|
||||
u32 mask;
|
||||
bool in_context;
|
||||
u32 value;
|
||||
};
|
||||
|
||||
static struct render_mmio gen8_render_mmio_list[] = {
|
||||
{RCS, _MMIO(0x229c), 0xffff, false},
|
||||
{RCS, _MMIO(0x2248), 0x0, false},
|
||||
{RCS, _MMIO(0x2098), 0x0, false},
|
||||
{RCS, _MMIO(0x20c0), 0xffff, true},
|
||||
{RCS, _MMIO(0x24d0), 0, false},
|
||||
{RCS, _MMIO(0x24d4), 0, false},
|
||||
{RCS, _MMIO(0x24d8), 0, false},
|
||||
{RCS, _MMIO(0x24dc), 0, false},
|
||||
{RCS, _MMIO(0x7004), 0xffff, true},
|
||||
{RCS, _MMIO(0x7008), 0xffff, true},
|
||||
{RCS, _MMIO(0x7000), 0xffff, true},
|
||||
{RCS, _MMIO(0x7010), 0xffff, true},
|
||||
{RCS, _MMIO(0x7300), 0xffff, true},
|
||||
{RCS, _MMIO(0x83a4), 0xffff, true},
|
||||
|
||||
{BCS, _MMIO(0x2229c), 0xffff, false},
|
||||
{BCS, _MMIO(0x2209c), 0xffff, false},
|
||||
{BCS, _MMIO(0x220c0), 0xffff, false},
|
||||
{BCS, _MMIO(0x22098), 0x0, false},
|
||||
{BCS, _MMIO(0x22028), 0x0, false},
|
||||
};
|
||||
|
||||
static struct render_mmio gen9_render_mmio_list[] = {
|
||||
{RCS, _MMIO(0x229c), 0xffff, false},
|
||||
{RCS, _MMIO(0x2248), 0x0, false},
|
||||
{RCS, _MMIO(0x2098), 0x0, false},
|
||||
{RCS, _MMIO(0x20c0), 0xffff, true},
|
||||
{RCS, _MMIO(0x24d0), 0, false},
|
||||
{RCS, _MMIO(0x24d4), 0, false},
|
||||
{RCS, _MMIO(0x24d8), 0, false},
|
||||
{RCS, _MMIO(0x24dc), 0, false},
|
||||
{RCS, _MMIO(0x7004), 0xffff, true},
|
||||
{RCS, _MMIO(0x7008), 0xffff, true},
|
||||
{RCS, _MMIO(0x7000), 0xffff, true},
|
||||
{RCS, _MMIO(0x7010), 0xffff, true},
|
||||
{RCS, _MMIO(0x7300), 0xffff, true},
|
||||
{RCS, _MMIO(0x83a4), 0xffff, true},
|
||||
|
||||
{RCS, _MMIO(0x40e0), 0, false},
|
||||
{RCS, _MMIO(0x40e4), 0, false},
|
||||
{RCS, _MMIO(0x2580), 0xffff, true},
|
||||
{RCS, _MMIO(0x7014), 0xffff, true},
|
||||
{RCS, _MMIO(0x20ec), 0xffff, false},
|
||||
{RCS, _MMIO(0xb118), 0, false},
|
||||
{RCS, _MMIO(0xe100), 0xffff, true},
|
||||
{RCS, _MMIO(0xe180), 0xffff, true},
|
||||
{RCS, _MMIO(0xe184), 0xffff, true},
|
||||
{RCS, _MMIO(0xe188), 0xffff, true},
|
||||
{RCS, _MMIO(0xe194), 0xffff, true},
|
||||
{RCS, _MMIO(0x4de0), 0, false},
|
||||
{RCS, _MMIO(0x4de4), 0, false},
|
||||
{RCS, _MMIO(0x4de8), 0, false},
|
||||
{RCS, _MMIO(0x4dec), 0, false},
|
||||
{RCS, _MMIO(0x4df0), 0, false},
|
||||
{RCS, _MMIO(0x4df4), 0, false},
|
||||
|
||||
{BCS, _MMIO(0x2229c), 0xffff, false},
|
||||
{BCS, _MMIO(0x2209c), 0xffff, false},
|
||||
{BCS, _MMIO(0x220c0), 0xffff, false},
|
||||
{BCS, _MMIO(0x22098), 0x0, false},
|
||||
{BCS, _MMIO(0x22028), 0x0, false},
|
||||
|
||||
{VCS2, _MMIO(0x1c028), 0xffff, false},
|
||||
|
||||
{VECS, _MMIO(0x1a028), 0xffff, false},
|
||||
};
|
||||
|
||||
static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
|
||||
static u32 gen9_render_mocs_L3[32];
|
||||
|
||||
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
i915_reg_t reg;
|
||||
u32 regs[] = {
|
||||
[RCS] = 0x4260,
|
||||
[VCS] = 0x4264,
|
||||
[VCS2] = 0x4268,
|
||||
[BCS] = 0x426c,
|
||||
[VECS] = 0x4270,
|
||||
};
|
||||
|
||||
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
|
||||
return;
|
||||
|
||||
if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
|
||||
return;
|
||||
|
||||
reg = _MMIO(regs[ring_id]);
|
||||
|
||||
I915_WRITE(reg, 0x1);
|
||||
|
||||
if (wait_for_atomic((I915_READ(reg) == 0), 50))
|
||||
gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
|
||||
|
||||
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
|
||||
}
|
||||
|
||||
static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
i915_reg_t offset, l3_offset;
|
||||
u32 regs[] = {
|
||||
[RCS] = 0xc800,
|
||||
[VCS] = 0xc900,
|
||||
[VCS2] = 0xca00,
|
||||
[BCS] = 0xcc00,
|
||||
[VECS] = 0xcb00,
|
||||
};
|
||||
int i;
|
||||
|
||||
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
|
||||
return;
|
||||
|
||||
if (!IS_SKYLAKE(dev_priv))
|
||||
return;
|
||||
|
||||
for (i = 0; i < 64; i++) {
|
||||
gen9_render_mocs[ring_id][i] = I915_READ(offset);
|
||||
I915_WRITE(offset, vgpu_vreg(vgpu, offset));
|
||||
POSTING_READ(offset);
|
||||
offset.reg += 4;
|
||||
}
|
||||
|
||||
if (ring_id == RCS) {
|
||||
l3_offset.reg = 0xb020;
|
||||
for (i = 0; i < 32; i++) {
|
||||
gen9_render_mocs_L3[i] = I915_READ(l3_offset);
|
||||
I915_WRITE(l3_offset, vgpu_vreg(vgpu, offset));
|
||||
POSTING_READ(l3_offset);
|
||||
l3_offset.reg += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
i915_reg_t offset, l3_offset;
|
||||
u32 regs[] = {
|
||||
[RCS] = 0xc800,
|
||||
[VCS] = 0xc900,
|
||||
[VCS2] = 0xca00,
|
||||
[BCS] = 0xcc00,
|
||||
[VECS] = 0xcb00,
|
||||
};
|
||||
int i;
|
||||
|
||||
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
|
||||
return;
|
||||
|
||||
if (!IS_SKYLAKE(dev_priv))
|
||||
return;
|
||||
|
||||
for (i = 0; i < 64; i++) {
|
||||
vgpu_vreg(vgpu, offset) = I915_READ(offset);
|
||||
I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
|
||||
POSTING_READ(offset);
|
||||
offset.reg += 4;
|
||||
}
|
||||
|
||||
if (ring_id == RCS) {
|
||||
l3_offset.reg = 0xb020;
|
||||
for (i = 0; i < 32; i++) {
|
||||
vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
|
||||
I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
|
||||
POSTING_READ(l3_offset);
|
||||
l3_offset.reg += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct render_mmio *mmio;
|
||||
u32 v;
|
||||
int i, array_size;
|
||||
|
||||
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
|
||||
mmio = gen9_render_mmio_list;
|
||||
array_size = ARRAY_SIZE(gen9_render_mmio_list);
|
||||
load_mocs(vgpu, ring_id);
|
||||
} else {
|
||||
mmio = gen8_render_mmio_list;
|
||||
array_size = ARRAY_SIZE(gen8_render_mmio_list);
|
||||
}
|
||||
|
||||
for (i = 0; i < array_size; i++, mmio++) {
|
||||
if (mmio->ring_id != ring_id)
|
||||
continue;
|
||||
|
||||
mmio->value = I915_READ(mmio->reg);
|
||||
if (mmio->mask)
|
||||
v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
|
||||
else
|
||||
v = vgpu_vreg(vgpu, mmio->reg);
|
||||
|
||||
I915_WRITE(mmio->reg, v);
|
||||
POSTING_READ(mmio->reg);
|
||||
|
||||
gvt_dbg_render("load reg %x old %x new %x\n",
|
||||
i915_mmio_reg_offset(mmio->reg),
|
||||
mmio->value, v);
|
||||
}
|
||||
handle_tlb_pending_event(vgpu, ring_id);
|
||||
}
|
||||
|
||||
void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct render_mmio *mmio;
|
||||
u32 v;
|
||||
int i, array_size;
|
||||
|
||||
if (IS_SKYLAKE(dev_priv)) {
|
||||
mmio = gen9_render_mmio_list;
|
||||
array_size = ARRAY_SIZE(gen9_render_mmio_list);
|
||||
restore_mocs(vgpu, ring_id);
|
||||
} else {
|
||||
mmio = gen8_render_mmio_list;
|
||||
array_size = ARRAY_SIZE(gen8_render_mmio_list);
|
||||
}
|
||||
|
||||
for (i = 0; i < array_size; i++, mmio++) {
|
||||
if (mmio->ring_id != ring_id)
|
||||
continue;
|
||||
|
||||
vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
|
||||
|
||||
if (mmio->mask) {
|
||||
vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
|
||||
v = mmio->value | (mmio->mask << 16);
|
||||
} else
|
||||
v = mmio->value;
|
||||
|
||||
I915_WRITE(mmio->reg, v);
|
||||
POSTING_READ(mmio->reg);
|
||||
|
||||
gvt_dbg_render("restore reg %x old %x new %x\n",
|
||||
i915_mmio_reg_offset(mmio->reg),
|
||||
mmio->value, v);
|
||||
}
|
||||
}
|
||||
43
drivers/gpu/drm/i915/gvt/render.h
Normal file
43
drivers/gpu/drm/i915/gvt/render.h
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Eddie Dong <eddie.dong@intel.com>
|
||||
* Kevin Tian <kevin.tian@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
* Changbin Du <changbin.du@intel.com>
|
||||
* Zhenyu Wang <zhenyuw@linux.intel.com>
|
||||
* Tina Zhang <tina.zhang@intel.com>
|
||||
* Bing Niu <bing.niu@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __GVT_RENDER_H__
|
||||
#define __GVT_RENDER_H__
|
||||
|
||||
void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id);
|
||||
|
||||
void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id);
|
||||
|
||||
#endif
|
||||
291
drivers/gpu/drm/i915/gvt/sched_policy.c
Normal file
291
drivers/gpu/drm/i915/gvt/sched_policy.c
Normal file
@@ -0,0 +1,291 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Anhua Xu
|
||||
* Kevin Tian <kevin.tian@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Min He <min.he@intel.com>
|
||||
* Bing Niu <bing.niu@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_vgpu_execlist *execlist;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
execlist = &vgpu->execlist[i];
|
||||
if (!list_empty(workload_q_head(vgpu, i)))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
int i;
|
||||
|
||||
/* no target to schedule */
|
||||
if (!scheduler->next_vgpu)
|
||||
return;
|
||||
|
||||
gvt_dbg_sched("try to schedule next vgpu %d\n",
|
||||
scheduler->next_vgpu->id);
|
||||
|
||||
/*
|
||||
* after the flag is set, workload dispatch thread will
|
||||
* stop dispatching workload for current vgpu
|
||||
*/
|
||||
scheduler->need_reschedule = true;
|
||||
|
||||
/* still have uncompleted workload? */
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
if (scheduler->current_workload[i]) {
|
||||
gvt_dbg_sched("still have running workload\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
gvt_dbg_sched("switch to next vgpu %d\n",
|
||||
scheduler->next_vgpu->id);
|
||||
|
||||
/* switch current vgpu */
|
||||
scheduler->current_vgpu = scheduler->next_vgpu;
|
||||
scheduler->next_vgpu = NULL;
|
||||
|
||||
scheduler->need_reschedule = false;
|
||||
|
||||
/* wake up workload dispatch thread */
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++)
|
||||
wake_up(&scheduler->waitq[i]);
|
||||
}
|
||||
|
||||
struct tbs_vgpu_data {
|
||||
struct list_head list;
|
||||
struct intel_vgpu *vgpu;
|
||||
/* put some per-vgpu sched stats here */
|
||||
};
|
||||
|
||||
struct tbs_sched_data {
|
||||
struct intel_gvt *gvt;
|
||||
struct delayed_work work;
|
||||
unsigned long period;
|
||||
struct list_head runq_head;
|
||||
};
|
||||
|
||||
#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
|
||||
|
||||
static void tbs_sched_func(struct work_struct *work)
|
||||
{
|
||||
struct tbs_sched_data *sched_data = container_of(work,
|
||||
struct tbs_sched_data, work.work);
|
||||
struct tbs_vgpu_data *vgpu_data;
|
||||
|
||||
struct intel_gvt *gvt = sched_data->gvt;
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
|
||||
struct intel_vgpu *vgpu = NULL;
|
||||
struct list_head *pos, *head;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
/* no vgpu or has already had a target */
|
||||
if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
|
||||
goto out;
|
||||
|
||||
if (scheduler->current_vgpu) {
|
||||
vgpu_data = scheduler->current_vgpu->sched_data;
|
||||
head = &vgpu_data->list;
|
||||
} else {
|
||||
gvt_dbg_sched("no current vgpu search from q head\n");
|
||||
head = &sched_data->runq_head;
|
||||
}
|
||||
|
||||
/* search a vgpu with pending workload */
|
||||
list_for_each(pos, head) {
|
||||
if (pos == &sched_data->runq_head)
|
||||
continue;
|
||||
|
||||
vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
|
||||
if (!vgpu_has_pending_workload(vgpu_data->vgpu))
|
||||
continue;
|
||||
|
||||
vgpu = vgpu_data->vgpu;
|
||||
break;
|
||||
}
|
||||
|
||||
if (vgpu) {
|
||||
scheduler->next_vgpu = vgpu;
|
||||
gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
|
||||
}
|
||||
out:
|
||||
if (scheduler->next_vgpu) {
|
||||
gvt_dbg_sched("try to schedule next vgpu %d\n",
|
||||
scheduler->next_vgpu->id);
|
||||
try_to_schedule_next_vgpu(gvt);
|
||||
}
|
||||
|
||||
/*
|
||||
* still have vgpu on runq
|
||||
* or last schedule haven't finished due to running workload
|
||||
*/
|
||||
if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
|
||||
schedule_delayed_work(&sched_data->work, sched_data->period);
|
||||
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
static int tbs_sched_init(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler =
|
||||
&gvt->scheduler;
|
||||
|
||||
struct tbs_sched_data *data;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&data->runq_head);
|
||||
INIT_DELAYED_WORK(&data->work, tbs_sched_func);
|
||||
data->period = GVT_DEFAULT_TIME_SLICE;
|
||||
data->gvt = gvt;
|
||||
|
||||
scheduler->sched_data = data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tbs_sched_clean(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler =
|
||||
&gvt->scheduler;
|
||||
struct tbs_sched_data *data = scheduler->sched_data;
|
||||
|
||||
cancel_delayed_work(&data->work);
|
||||
kfree(data);
|
||||
scheduler->sched_data = NULL;
|
||||
}
|
||||
|
||||
static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct tbs_vgpu_data *data;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
data->vgpu = vgpu;
|
||||
INIT_LIST_HEAD(&data->list);
|
||||
|
||||
vgpu->sched_data = data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
kfree(vgpu->sched_data);
|
||||
vgpu->sched_data = NULL;
|
||||
}
|
||||
|
||||
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
|
||||
struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
|
||||
|
||||
if (!list_empty(&vgpu_data->list))
|
||||
return;
|
||||
|
||||
list_add_tail(&vgpu_data->list, &sched_data->runq_head);
|
||||
schedule_delayed_work(&sched_data->work, sched_data->period);
|
||||
}
|
||||
|
||||
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
|
||||
|
||||
list_del_init(&vgpu_data->list);
|
||||
}
|
||||
|
||||
struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
|
||||
.init = tbs_sched_init,
|
||||
.clean = tbs_sched_clean,
|
||||
.init_vgpu = tbs_sched_init_vgpu,
|
||||
.clean_vgpu = tbs_sched_clean_vgpu,
|
||||
.start_schedule = tbs_sched_start_schedule,
|
||||
.stop_schedule = tbs_sched_stop_schedule,
|
||||
};
|
||||
|
||||
int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
|
||||
{
|
||||
gvt->scheduler.sched_ops = &tbs_schedule_ops;
|
||||
|
||||
return gvt->scheduler.sched_ops->init(gvt);
|
||||
}
|
||||
|
||||
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
|
||||
{
|
||||
gvt->scheduler.sched_ops->clean(gvt);
|
||||
}
|
||||
|
||||
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
|
||||
{
|
||||
return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
|
||||
}
|
||||
|
||||
void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
|
||||
{
|
||||
vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
|
||||
}
|
||||
|
||||
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
|
||||
{
|
||||
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
|
||||
|
||||
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
|
||||
}
|
||||
|
||||
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler =
|
||||
&vgpu->gvt->scheduler;
|
||||
|
||||
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
|
||||
|
||||
scheduler->sched_ops->stop_schedule(vgpu);
|
||||
|
||||
if (scheduler->next_vgpu == vgpu)
|
||||
scheduler->next_vgpu = NULL;
|
||||
|
||||
if (scheduler->current_vgpu == vgpu) {
|
||||
/* stop workload dispatching */
|
||||
scheduler->need_reschedule = true;
|
||||
scheduler->current_vgpu = NULL;
|
||||
}
|
||||
}
|
||||
58
drivers/gpu/drm/i915/gvt/sched_policy.h
Normal file
58
drivers/gpu/drm/i915/gvt/sched_policy.h
Normal file
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Anhua Xu
|
||||
* Kevin Tian <kevin.tian@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Min He <min.he@intel.com>
|
||||
* Bing Niu <bing.niu@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __GVT_SCHED_POLICY__
|
||||
#define __GVT_SCHED_POLICY__
|
||||
|
||||
struct intel_gvt_sched_policy_ops {
|
||||
int (*init)(struct intel_gvt *gvt);
|
||||
void (*clean)(struct intel_gvt *gvt);
|
||||
int (*init_vgpu)(struct intel_vgpu *vgpu);
|
||||
void (*clean_vgpu)(struct intel_vgpu *vgpu);
|
||||
void (*start_schedule)(struct intel_vgpu *vgpu);
|
||||
void (*stop_schedule)(struct intel_vgpu *vgpu);
|
||||
};
|
||||
|
||||
int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
|
||||
|
||||
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
|
||||
|
||||
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu);
|
||||
|
||||
void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu);
|
||||
|
||||
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu);
|
||||
|
||||
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
|
||||
|
||||
#endif
|
||||
572
drivers/gpu/drm/i915/gvt/scheduler.c
Normal file
572
drivers/gpu/drm/i915/gvt/scheduler.c
Normal file
@@ -0,0 +1,572 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Ping Gao <ping.a.gao@intel.com>
|
||||
* Tina Zhang <tina.zhang@intel.com>
|
||||
* Chanbin Du <changbin.du@intel.com>
|
||||
* Min He <min.he@intel.com>
|
||||
* Bing Niu <bing.niu@intel.com>
|
||||
* Zhenyu Wang <zhenyuw@linux.intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#define RING_CTX_OFF(x) \
|
||||
offsetof(struct execlist_ring_context, x)
|
||||
|
||||
void set_context_pdp_root_pointer(struct execlist_ring_context *ring_context,
|
||||
u32 pdp[8])
|
||||
{
|
||||
struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
pdp_pair[i].val = pdp[7 - i];
|
||||
}
|
||||
|
||||
static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
int ring_id = workload->ring_id;
|
||||
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
|
||||
struct drm_i915_gem_object *ctx_obj =
|
||||
shadow_ctx->engine[ring_id].state->obj;
|
||||
struct execlist_ring_context *shadow_ring_context;
|
||||
struct page *page;
|
||||
void *dst;
|
||||
unsigned long context_gpa, context_page_num;
|
||||
int i;
|
||||
|
||||
gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
|
||||
workload->ctx_desc.lrca);
|
||||
|
||||
context_page_num = intel_lr_context_size(
|
||||
&gvt->dev_priv->engine[ring_id]);
|
||||
|
||||
context_page_num = context_page_num >> PAGE_SHIFT;
|
||||
|
||||
if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
|
||||
context_page_num = 19;
|
||||
|
||||
i = 2;
|
||||
|
||||
while (i < context_page_num) {
|
||||
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
|
||||
(u32)((workload->ctx_desc.lrca + i) <<
|
||||
GTT_PAGE_SHIFT));
|
||||
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_err("Invalid guest context descriptor\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
|
||||
dst = kmap_atomic(page);
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
|
||||
GTT_PAGE_SIZE);
|
||||
kunmap_atomic(dst);
|
||||
i++;
|
||||
}
|
||||
|
||||
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
|
||||
shadow_ring_context = kmap_atomic(page);
|
||||
|
||||
#define COPY_REG(name) \
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
|
||||
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
|
||||
|
||||
COPY_REG(ctx_ctrl);
|
||||
COPY_REG(ctx_timestamp);
|
||||
|
||||
if (ring_id == RCS) {
|
||||
COPY_REG(bb_per_ctx_ptr);
|
||||
COPY_REG(rcs_indirect_ctx);
|
||||
COPY_REG(rcs_indirect_ctx_offset);
|
||||
}
|
||||
#undef COPY_REG
|
||||
|
||||
set_context_pdp_root_pointer(shadow_ring_context,
|
||||
workload->shadow_mm->shadow_page_table);
|
||||
|
||||
intel_gvt_hypervisor_read_gpa(vgpu,
|
||||
workload->ring_context_gpa +
|
||||
sizeof(*shadow_ring_context),
|
||||
(void *)shadow_ring_context +
|
||||
sizeof(*shadow_ring_context),
|
||||
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
|
||||
|
||||
kunmap_atomic(shadow_ring_context);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int shadow_context_status_change(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct intel_vgpu *vgpu = container_of(nb,
|
||||
struct intel_vgpu, shadow_ctx_notifier_block);
|
||||
struct drm_i915_gem_request *req =
|
||||
(struct drm_i915_gem_request *)data;
|
||||
struct intel_gvt_workload_scheduler *scheduler =
|
||||
&vgpu->gvt->scheduler;
|
||||
struct intel_vgpu_workload *workload =
|
||||
scheduler->current_workload[req->engine->id];
|
||||
|
||||
switch (action) {
|
||||
case INTEL_CONTEXT_SCHEDULE_IN:
|
||||
intel_gvt_load_render_mmio(workload->vgpu,
|
||||
workload->ring_id);
|
||||
atomic_set(&workload->shadow_ctx_active, 1);
|
||||
break;
|
||||
case INTEL_CONTEXT_SCHEDULE_OUT:
|
||||
intel_gvt_restore_render_mmio(workload->vgpu,
|
||||
workload->ring_id);
|
||||
atomic_set(&workload->shadow_ctx_active, 0);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
wake_up(&workload->shadow_ctx_status_wq);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int dispatch_workload(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
int ring_id = workload->ring_id;
|
||||
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
|
||||
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
||||
int ret;
|
||||
|
||||
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
||||
ring_id, workload);
|
||||
|
||||
shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
|
||||
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
||||
|
||||
workload->req = i915_gem_request_alloc(&dev_priv->engine[ring_id],
|
||||
shadow_ctx);
|
||||
if (IS_ERR_OR_NULL(workload->req)) {
|
||||
gvt_err("fail to allocate gem request\n");
|
||||
workload->status = PTR_ERR(workload->req);
|
||||
workload->req = NULL;
|
||||
return workload->status;
|
||||
}
|
||||
|
||||
gvt_dbg_sched("ring id %d get i915 gem request %p\n",
|
||||
ring_id, workload->req);
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
ret = intel_gvt_scan_and_shadow_workload(workload);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = populate_shadow_context(workload);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (workload->prepare) {
|
||||
ret = workload->prepare(workload);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
mutex_unlock(&gvt->lock);
|
||||
|
||||
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
|
||||
ring_id, workload->req);
|
||||
|
||||
i915_add_request_no_flush(workload->req);
|
||||
|
||||
workload->dispatched = true;
|
||||
return 0;
|
||||
err:
|
||||
workload->status = ret;
|
||||
if (workload->req)
|
||||
workload->req = NULL;
|
||||
|
||||
mutex_unlock(&gvt->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct intel_vgpu_workload *pick_next_workload(
|
||||
struct intel_gvt *gvt, int ring_id)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
struct intel_vgpu_workload *workload = NULL;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
/*
|
||||
* no current vgpu / will be scheduled out / no workload
|
||||
* bail out
|
||||
*/
|
||||
if (!scheduler->current_vgpu) {
|
||||
gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (scheduler->need_reschedule) {
|
||||
gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
|
||||
gvt_dbg_sched("ring id %d stop - no available workload\n",
|
||||
ring_id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* still have current workload, maybe the workload disptacher
|
||||
* fail to submit it for some reason, resubmit it.
|
||||
*/
|
||||
if (scheduler->current_workload[ring_id]) {
|
||||
workload = scheduler->current_workload[ring_id];
|
||||
gvt_dbg_sched("ring id %d still have current workload %p\n",
|
||||
ring_id, workload);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* pick a workload as current workload
|
||||
* once current workload is set, schedule policy routines
|
||||
* will wait the current workload is finished when trying to
|
||||
* schedule out a vgpu.
|
||||
*/
|
||||
scheduler->current_workload[ring_id] = container_of(
|
||||
workload_q_head(scheduler->current_vgpu, ring_id)->next,
|
||||
struct intel_vgpu_workload, list);
|
||||
|
||||
workload = scheduler->current_workload[ring_id];
|
||||
|
||||
gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
|
||||
|
||||
atomic_inc(&workload->vgpu->running_workload_num);
|
||||
out:
|
||||
mutex_unlock(&gvt->lock);
|
||||
return workload;
|
||||
}
|
||||
|
||||
static void update_guest_context(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
int ring_id = workload->ring_id;
|
||||
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
|
||||
struct drm_i915_gem_object *ctx_obj =
|
||||
shadow_ctx->engine[ring_id].state->obj;
|
||||
struct execlist_ring_context *shadow_ring_context;
|
||||
struct page *page;
|
||||
void *src;
|
||||
unsigned long context_gpa, context_page_num;
|
||||
int i;
|
||||
|
||||
gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
|
||||
workload->ctx_desc.lrca);
|
||||
|
||||
context_page_num = intel_lr_context_size(
|
||||
&gvt->dev_priv->engine[ring_id]);
|
||||
|
||||
context_page_num = context_page_num >> PAGE_SHIFT;
|
||||
|
||||
if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
|
||||
context_page_num = 19;
|
||||
|
||||
i = 2;
|
||||
|
||||
while (i < context_page_num) {
|
||||
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
|
||||
(u32)((workload->ctx_desc.lrca + i) <<
|
||||
GTT_PAGE_SHIFT));
|
||||
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_err("invalid guest context descriptor\n");
|
||||
return;
|
||||
}
|
||||
|
||||
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
|
||||
src = kmap_atomic(page);
|
||||
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
|
||||
GTT_PAGE_SIZE);
|
||||
kunmap_atomic(src);
|
||||
i++;
|
||||
}
|
||||
|
||||
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
|
||||
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
|
||||
|
||||
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
|
||||
shadow_ring_context = kmap_atomic(page);
|
||||
|
||||
#define COPY_REG(name) \
|
||||
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
|
||||
RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
|
||||
|
||||
COPY_REG(ctx_ctrl);
|
||||
COPY_REG(ctx_timestamp);
|
||||
|
||||
#undef COPY_REG
|
||||
|
||||
intel_gvt_hypervisor_write_gpa(vgpu,
|
||||
workload->ring_context_gpa +
|
||||
sizeof(*shadow_ring_context),
|
||||
(void *)shadow_ring_context +
|
||||
sizeof(*shadow_ring_context),
|
||||
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
|
||||
|
||||
kunmap_atomic(shadow_ring_context);
|
||||
}
|
||||
|
||||
static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
struct intel_vgpu_workload *workload;
|
||||
int event;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
workload = scheduler->current_workload[ring_id];
|
||||
|
||||
if (!workload->status && !workload->vgpu->resetting) {
|
||||
wait_event(workload->shadow_ctx_status_wq,
|
||||
!atomic_read(&workload->shadow_ctx_active));
|
||||
|
||||
update_guest_context(workload);
|
||||
|
||||
for_each_set_bit(event, workload->pending_events,
|
||||
INTEL_GVT_EVENT_MAX)
|
||||
intel_vgpu_trigger_virtual_event(workload->vgpu,
|
||||
event);
|
||||
}
|
||||
|
||||
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
|
||||
ring_id, workload, workload->status);
|
||||
|
||||
scheduler->current_workload[ring_id] = NULL;
|
||||
|
||||
atomic_dec(&workload->vgpu->running_workload_num);
|
||||
|
||||
list_del_init(&workload->list);
|
||||
workload->complete(workload);
|
||||
|
||||
wake_up(&scheduler->workload_complete_wq);
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
struct workload_thread_param {
|
||||
struct intel_gvt *gvt;
|
||||
int ring_id;
|
||||
};
|
||||
|
||||
static int workload_thread(void *priv)
|
||||
{
|
||||
struct workload_thread_param *p = (struct workload_thread_param *)priv;
|
||||
struct intel_gvt *gvt = p->gvt;
|
||||
int ring_id = p->ring_id;
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
struct intel_vgpu_workload *workload = NULL;
|
||||
int ret;
|
||||
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
|
||||
|
||||
kfree(p);
|
||||
|
||||
gvt_dbg_core("workload thread for ring %d started\n", ring_id);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
ret = wait_event_interruptible(scheduler->waitq[ring_id],
|
||||
kthread_should_stop() ||
|
||||
(workload = pick_next_workload(gvt, ring_id)));
|
||||
|
||||
WARN_ON_ONCE(ret);
|
||||
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
|
||||
gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
|
||||
workload->ring_id, workload,
|
||||
workload->vgpu->id);
|
||||
|
||||
intel_runtime_pm_get(gvt->dev_priv);
|
||||
|
||||
/*
|
||||
* Always take i915 big lock first
|
||||
*/
|
||||
ret = i915_mutex_lock_interruptible(&gvt->dev_priv->drm);
|
||||
if (ret < 0) {
|
||||
gvt_err("i915 submission is not available, retry\n");
|
||||
schedule_timeout(1);
|
||||
continue;
|
||||
}
|
||||
|
||||
gvt_dbg_sched("ring id %d will dispatch workload %p\n",
|
||||
workload->ring_id, workload);
|
||||
|
||||
if (need_force_wake)
|
||||
intel_uncore_forcewake_get(gvt->dev_priv,
|
||||
FORCEWAKE_ALL);
|
||||
|
||||
ret = dispatch_workload(workload);
|
||||
if (ret) {
|
||||
gvt_err("fail to dispatch workload, skip\n");
|
||||
goto complete;
|
||||
}
|
||||
|
||||
gvt_dbg_sched("ring id %d wait workload %p\n",
|
||||
workload->ring_id, workload);
|
||||
|
||||
workload->status = i915_wait_request(workload->req,
|
||||
I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
|
||||
NULL, NULL);
|
||||
if (workload->status != 0)
|
||||
gvt_err("fail to wait workload, skip\n");
|
||||
|
||||
complete:
|
||||
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
|
||||
workload, workload->status);
|
||||
|
||||
complete_current_workload(gvt, ring_id);
|
||||
|
||||
if (need_force_wake)
|
||||
intel_uncore_forcewake_put(gvt->dev_priv,
|
||||
FORCEWAKE_ALL);
|
||||
|
||||
mutex_unlock(&gvt->dev_priv->drm.struct_mutex);
|
||||
|
||||
intel_runtime_pm_put(gvt->dev_priv);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
|
||||
if (atomic_read(&vgpu->running_workload_num)) {
|
||||
gvt_dbg_sched("wait vgpu idle\n");
|
||||
|
||||
wait_event(scheduler->workload_complete_wq,
|
||||
!atomic_read(&vgpu->running_workload_num));
|
||||
}
|
||||
}
|
||||
|
||||
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
int i;
|
||||
|
||||
gvt_dbg_core("clean workload scheduler\n");
|
||||
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
if (scheduler->thread[i]) {
|
||||
kthread_stop(scheduler->thread[i]);
|
||||
scheduler->thread[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
struct workload_thread_param *param = NULL;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
gvt_dbg_core("init workload scheduler\n");
|
||||
|
||||
init_waitqueue_head(&scheduler->workload_complete_wq);
|
||||
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
init_waitqueue_head(&scheduler->waitq[i]);
|
||||
|
||||
param = kzalloc(sizeof(*param), GFP_KERNEL);
|
||||
if (!param) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
param->gvt = gvt;
|
||||
param->ring_id = i;
|
||||
|
||||
scheduler->thread[i] = kthread_run(workload_thread, param,
|
||||
"gvt workload %d", i);
|
||||
if (IS_ERR(scheduler->thread[i])) {
|
||||
gvt_err("fail to create workload thread\n");
|
||||
ret = PTR_ERR(scheduler->thread[i]);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
err:
|
||||
intel_gvt_clean_workload_scheduler(gvt);
|
||||
kfree(param);
|
||||
param = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
|
||||
&vgpu->shadow_ctx_notifier_block);
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
/* a little hacky to mark as ctx closed */
|
||||
vgpu->shadow_ctx->closed = true;
|
||||
i915_gem_context_put(vgpu->shadow_ctx);
|
||||
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
|
||||
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
|
||||
{
|
||||
atomic_set(&vgpu->running_workload_num, 0);
|
||||
|
||||
vgpu->shadow_ctx = i915_gem_context_create_gvt(
|
||||
&vgpu->gvt->dev_priv->drm);
|
||||
if (IS_ERR(vgpu->shadow_ctx))
|
||||
return PTR_ERR(vgpu->shadow_ctx);
|
||||
|
||||
vgpu->shadow_ctx->engine[RCS].initialised = true;
|
||||
|
||||
vgpu->shadow_ctx_notifier_block.notifier_call =
|
||||
shadow_context_status_change;
|
||||
|
||||
atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
|
||||
&vgpu->shadow_ctx_notifier_block);
|
||||
return 0;
|
||||
}
|
||||
139
drivers/gpu/drm/i915/gvt/scheduler.h
Normal file
139
drivers/gpu/drm/i915/gvt/scheduler.h
Normal file
@@ -0,0 +1,139 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Ping Gao <ping.a.gao@intel.com>
|
||||
* Tina Zhang <tina.zhang@intel.com>
|
||||
* Chanbin Du <changbin.du@intel.com>
|
||||
* Min He <min.he@intel.com>
|
||||
* Bing Niu <bing.niu@intel.com>
|
||||
* Zhenyu Wang <zhenyuw@linux.intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _GVT_SCHEDULER_H_
|
||||
#define _GVT_SCHEDULER_H_
|
||||
|
||||
struct intel_gvt_workload_scheduler {
|
||||
struct intel_vgpu *current_vgpu;
|
||||
struct intel_vgpu *next_vgpu;
|
||||
struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES];
|
||||
bool need_reschedule;
|
||||
|
||||
wait_queue_head_t workload_complete_wq;
|
||||
struct task_struct *thread[I915_NUM_ENGINES];
|
||||
wait_queue_head_t waitq[I915_NUM_ENGINES];
|
||||
|
||||
void *sched_data;
|
||||
struct intel_gvt_sched_policy_ops *sched_ops;
|
||||
};
|
||||
|
||||
#define INDIRECT_CTX_ADDR_MASK 0xffffffc0
|
||||
#define INDIRECT_CTX_SIZE_MASK 0x3f
|
||||
struct shadow_indirect_ctx {
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long guest_gma;
|
||||
unsigned long shadow_gma;
|
||||
void *shadow_va;
|
||||
uint32_t size;
|
||||
};
|
||||
|
||||
#define PER_CTX_ADDR_MASK 0xfffff000
|
||||
struct shadow_per_ctx {
|
||||
unsigned long guest_gma;
|
||||
unsigned long shadow_gma;
|
||||
};
|
||||
|
||||
struct intel_shadow_wa_ctx {
|
||||
struct intel_vgpu_workload *workload;
|
||||
struct shadow_indirect_ctx indirect_ctx;
|
||||
struct shadow_per_ctx per_ctx;
|
||||
|
||||
};
|
||||
|
||||
struct intel_vgpu_workload {
|
||||
struct intel_vgpu *vgpu;
|
||||
int ring_id;
|
||||
struct drm_i915_gem_request *req;
|
||||
/* if this workload has been dispatched to i915? */
|
||||
bool dispatched;
|
||||
int status;
|
||||
|
||||
struct intel_vgpu_mm *shadow_mm;
|
||||
|
||||
/* different submission model may need different handler */
|
||||
int (*prepare)(struct intel_vgpu_workload *);
|
||||
int (*complete)(struct intel_vgpu_workload *);
|
||||
struct list_head list;
|
||||
|
||||
DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
|
||||
void *shadow_ring_buffer_va;
|
||||
|
||||
/* execlist context information */
|
||||
struct execlist_ctx_descriptor_format ctx_desc;
|
||||
struct execlist_ring_context *ring_context;
|
||||
unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
|
||||
bool restore_inhibit;
|
||||
struct intel_vgpu_elsp_dwords elsp_dwords;
|
||||
bool emulate_schedule_in;
|
||||
atomic_t shadow_ctx_active;
|
||||
wait_queue_head_t shadow_ctx_status_wq;
|
||||
u64 ring_context_gpa;
|
||||
|
||||
/* shadow batch buffer */
|
||||
struct list_head shadow_bb;
|
||||
struct intel_shadow_wa_ctx wa_ctx;
|
||||
};
|
||||
|
||||
/* Intel shadow batch buffer is a i915 gem object */
|
||||
struct intel_shadow_bb_entry {
|
||||
struct list_head list;
|
||||
struct drm_i915_gem_object *obj;
|
||||
void *va;
|
||||
unsigned long len;
|
||||
void *bb_start_cmd_va;
|
||||
};
|
||||
|
||||
#define workload_q_head(vgpu, ring_id) \
|
||||
(&(vgpu->workload_q_head[ring_id]))
|
||||
|
||||
#define queue_workload(workload) do { \
|
||||
list_add_tail(&workload->list, \
|
||||
workload_q_head(workload->vgpu, workload->ring_id)); \
|
||||
wake_up(&workload->vgpu->gvt-> \
|
||||
scheduler.waitq[workload->ring_id]); \
|
||||
} while (0)
|
||||
|
||||
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
|
||||
|
||||
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
|
||||
|
||||
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
|
||||
|
||||
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
|
||||
|
||||
#endif
|
||||
286
drivers/gpu/drm/i915/gvt/trace.h
Normal file
286
drivers/gpu/drm/i915/gvt/trace.h
Normal file
@@ -0,0 +1,286 @@
|
||||
/*
|
||||
* Copyright © 2011-2016 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Jike Song <jike.song@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#if !defined(_GVT_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _GVT_TRACE_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/tracepoint.h>
|
||||
#include <asm/tsc.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM gvt
|
||||
|
||||
TRACE_EVENT(spt_alloc,
|
||||
TP_PROTO(int id, void *spt, int type, unsigned long mfn,
|
||||
unsigned long gpt_gfn),
|
||||
|
||||
TP_ARGS(id, spt, type, mfn, gpt_gfn),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, id)
|
||||
__field(void *, spt)
|
||||
__field(int, type)
|
||||
__field(unsigned long, mfn)
|
||||
__field(unsigned long, gpt_gfn)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = id;
|
||||
__entry->spt = spt;
|
||||
__entry->type = type;
|
||||
__entry->mfn = mfn;
|
||||
__entry->gpt_gfn = gpt_gfn;
|
||||
),
|
||||
|
||||
TP_printk("VM%d [alloc] spt %p type %d mfn 0x%lx gfn 0x%lx\n",
|
||||
__entry->id,
|
||||
__entry->spt,
|
||||
__entry->type,
|
||||
__entry->mfn,
|
||||
__entry->gpt_gfn)
|
||||
);
|
||||
|
||||
TRACE_EVENT(spt_free,
|
||||
TP_PROTO(int id, void *spt, int type),
|
||||
|
||||
TP_ARGS(id, spt, type),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, id)
|
||||
__field(void *, spt)
|
||||
__field(int, type)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = id;
|
||||
__entry->spt = spt;
|
||||
__entry->type = type;
|
||||
),
|
||||
|
||||
TP_printk("VM%u [free] spt %p type %d\n",
|
||||
__entry->id,
|
||||
__entry->spt,
|
||||
__entry->type)
|
||||
);
|
||||
|
||||
#define MAX_BUF_LEN 256
|
||||
|
||||
TRACE_EVENT(gma_index,
|
||||
TP_PROTO(const char *prefix, unsigned long gma,
|
||||
unsigned long index),
|
||||
|
||||
TP_ARGS(prefix, gma, index),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, buf, MAX_BUF_LEN)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
snprintf(__entry->buf, MAX_BUF_LEN,
|
||||
"%s gma 0x%lx index 0x%lx\n", prefix, gma, index);
|
||||
),
|
||||
|
||||
TP_printk("%s", __entry->buf)
|
||||
);
|
||||
|
||||
TRACE_EVENT(gma_translate,
|
||||
TP_PROTO(int id, char *type, int ring_id, int pt_level,
|
||||
unsigned long gma, unsigned long gpa),
|
||||
|
||||
TP_ARGS(id, type, ring_id, pt_level, gma, gpa),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, buf, MAX_BUF_LEN)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
snprintf(__entry->buf, MAX_BUF_LEN,
|
||||
"VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n",
|
||||
id, type, ring_id, pt_level, gma, gpa);
|
||||
),
|
||||
|
||||
TP_printk("%s", __entry->buf)
|
||||
);
|
||||
|
||||
TRACE_EVENT(spt_refcount,
|
||||
TP_PROTO(int id, char *action, void *spt, int before, int after),
|
||||
|
||||
TP_ARGS(id, action, spt, before, after),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, buf, MAX_BUF_LEN)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
snprintf(__entry->buf, MAX_BUF_LEN,
|
||||
"VM%d [%s] spt %p before %d -> after %d\n",
|
||||
id, action, spt, before, after);
|
||||
),
|
||||
|
||||
TP_printk("%s", __entry->buf)
|
||||
);
|
||||
|
||||
TRACE_EVENT(spt_change,
|
||||
TP_PROTO(int id, char *action, void *spt, unsigned long gfn,
|
||||
int type),
|
||||
|
||||
TP_ARGS(id, action, spt, gfn, type),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, buf, MAX_BUF_LEN)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
snprintf(__entry->buf, MAX_BUF_LEN,
|
||||
"VM%d [%s] spt %p gfn 0x%lx type %d\n",
|
||||
id, action, spt, gfn, type);
|
||||
),
|
||||
|
||||
TP_printk("%s", __entry->buf)
|
||||
);
|
||||
|
||||
TRACE_EVENT(gpt_change,
|
||||
TP_PROTO(int id, const char *tag, void *spt, int type, u64 v,
|
||||
unsigned long index),
|
||||
|
||||
TP_ARGS(id, tag, spt, type, v, index),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, buf, MAX_BUF_LEN)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
snprintf(__entry->buf, MAX_BUF_LEN,
|
||||
"VM%d [%s] spt %p type %d entry 0x%llx index 0x%lx\n",
|
||||
id, tag, spt, type, v, index);
|
||||
),
|
||||
|
||||
TP_printk("%s", __entry->buf)
|
||||
);
|
||||
|
||||
TRACE_EVENT(oos_change,
|
||||
TP_PROTO(int id, const char *tag, int page_id, void *gpt, int type),
|
||||
|
||||
TP_ARGS(id, tag, page_id, gpt, type),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, buf, MAX_BUF_LEN)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
snprintf(__entry->buf, MAX_BUF_LEN,
|
||||
"VM%d [oos %s] page id %d gpt %p type %d\n",
|
||||
id, tag, page_id, gpt, type);
|
||||
),
|
||||
|
||||
TP_printk("%s", __entry->buf)
|
||||
);
|
||||
|
||||
TRACE_EVENT(oos_sync,
|
||||
TP_PROTO(int id, int page_id, void *gpt, int type, u64 v,
|
||||
unsigned long index),
|
||||
|
||||
TP_ARGS(id, page_id, gpt, type, v, index),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, buf, MAX_BUF_LEN)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
snprintf(__entry->buf, MAX_BUF_LEN,
|
||||
"VM%d [oos sync] page id %d gpt %p type %d entry 0x%llx index 0x%lx\n",
|
||||
id, page_id, gpt, type, v, index);
|
||||
),
|
||||
|
||||
TP_printk("%s", __entry->buf)
|
||||
);
|
||||
|
||||
#define MAX_CMD_STR_LEN 256
|
||||
TRACE_EVENT(gvt_command,
|
||||
TP_PROTO(u8 vm_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len, bool ring_buffer_cmd, cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler),
|
||||
|
||||
TP_ARGS(vm_id, ring_id, ip_gma, cmd_va, cmd_len, ring_buffer_cmd, cost_pre_cmd_handler, cost_cmd_handler),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u8, vm_id)
|
||||
__field(u8, ring_id)
|
||||
__field(int, i)
|
||||
__array(char, tmp_buf, MAX_CMD_STR_LEN)
|
||||
__array(char, cmd_str, MAX_CMD_STR_LEN)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vm_id = vm_id;
|
||||
__entry->ring_id = ring_id;
|
||||
__entry->cmd_str[0] = '\0';
|
||||
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "VM(%d) Ring(%d): %s ip(%08x) pre handler cost (%llu), handler cost (%llu) ", vm_id, ring_id, ring_buffer_cmd ? "RB":"BB", ip_gma, cost_pre_cmd_handler, cost_cmd_handler);
|
||||
strcat(__entry->cmd_str, __entry->tmp_buf);
|
||||
entry->i = 0;
|
||||
while (cmd_len > 0) {
|
||||
if (cmd_len >= 8) {
|
||||
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x %08x %08x %08x %08x ",
|
||||
cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3],
|
||||
cmd_va[__entry->i+4], cmd_va[__entry->i+5], cmd_va[__entry->i+6], cmd_va[__entry->i+7]);
|
||||
__entry->i += 8;
|
||||
cmd_len -= 8;
|
||||
strcat(__entry->cmd_str, __entry->tmp_buf);
|
||||
} else if (cmd_len >= 4) {
|
||||
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x ",
|
||||
cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3]);
|
||||
__entry->i += 4;
|
||||
cmd_len -= 4;
|
||||
strcat(__entry->cmd_str, __entry->tmp_buf);
|
||||
} else if (cmd_len >= 2) {
|
||||
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x ", cmd_va[__entry->i], cmd_va[__entry->i+1]);
|
||||
__entry->i += 2;
|
||||
cmd_len -= 2;
|
||||
strcat(__entry->cmd_str, __entry->tmp_buf);
|
||||
} else if (cmd_len == 1) {
|
||||
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x ", cmd_va[__entry->i]);
|
||||
__entry->i += 1;
|
||||
cmd_len -= 1;
|
||||
strcat(__entry->cmd_str, __entry->tmp_buf);
|
||||
}
|
||||
}
|
||||
strcat(__entry->cmd_str, "\n");
|
||||
),
|
||||
|
||||
TP_printk("%s", __entry->cmd_str)
|
||||
);
|
||||
#endif /* _GVT_TRACE_H_ */
|
||||
|
||||
/* This part must be out of protection */
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
#include <trace/define_trace.h>
|
||||
36
drivers/gpu/drm/i915/gvt/trace_points.c
Normal file
36
drivers/gpu/drm/i915/gvt/trace_points.c
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Jike Song <jike.song@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
#ifndef __CHECKER__
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
#endif
|
||||
272
drivers/gpu/drm/i915/gvt/vgpu.c
Normal file
272
drivers/gpu/drm/i915/gvt/vgpu.c
Normal file
@@ -0,0 +1,272 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Eddie Dong <eddie.dong@intel.com>
|
||||
* Kevin Tian <kevin.tian@intel.com>
|
||||
*
|
||||
* Contributors:
|
||||
* Ping Gao <ping.a.gao@intel.com>
|
||||
* Zhi Wang <zhi.a.wang@intel.com>
|
||||
* Bing Niu <bing.niu@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
vfree(vgpu->mmio.vreg);
|
||||
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
|
||||
}
|
||||
|
||||
static int setup_vgpu_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
|
||||
vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
|
||||
if (!vgpu->mmio.vreg)
|
||||
return -ENOMEM;
|
||||
|
||||
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
|
||||
|
||||
memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
|
||||
memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
|
||||
|
||||
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
|
||||
|
||||
/* set the bit 0:2(Core C-State ) to C0 */
|
||||
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_creation_params *param)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
u16 *gmch_ctl;
|
||||
int i;
|
||||
|
||||
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
|
||||
info->cfg_space_size);
|
||||
|
||||
if (!param->primary) {
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
}
|
||||
|
||||
/* Show guest that there isn't any stolen memory.*/
|
||||
gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
|
||||
*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
|
||||
|
||||
intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
|
||||
gvt_aperture_pa_base(gvt), true);
|
||||
|
||||
vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
|
||||
| PCI_COMMAND_MEMORY
|
||||
| PCI_COMMAND_MASTER);
|
||||
/*
|
||||
* Clear the bar upper 32bit and let guest to assign the new value
|
||||
*/
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
|
||||
|
||||
for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
|
||||
vgpu->cfg_space.bar[i].size = pci_resource_len(
|
||||
gvt->dev_priv->drm.pdev, i * 2);
|
||||
vgpu->cfg_space.bar[i].tracked = false;
|
||||
}
|
||||
}
|
||||
|
||||
static void populate_pvinfo_page(struct intel_vgpu *vgpu)
|
||||
{
|
||||
/* setup the ballooning information */
|
||||
vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
|
||||
vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
|
||||
vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
|
||||
vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
|
||||
vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
|
||||
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
|
||||
vgpu_aperture_gmadr_base(vgpu);
|
||||
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
|
||||
vgpu_aperture_sz(vgpu);
|
||||
vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
|
||||
vgpu_hidden_gmadr_base(vgpu);
|
||||
vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
|
||||
vgpu_hidden_sz(vgpu);
|
||||
|
||||
vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
|
||||
|
||||
gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
|
||||
gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
|
||||
vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
|
||||
gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
|
||||
vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
|
||||
gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
|
||||
|
||||
WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_destroy_vgpu - destroy a virtual GPU
|
||||
* @vgpu: virtual GPU
|
||||
*
|
||||
* This function is called when user wants to destroy a virtual GPU.
|
||||
*
|
||||
*/
|
||||
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
vgpu->active = false;
|
||||
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
||||
|
||||
if (atomic_read(&vgpu->running_workload_num)) {
|
||||
mutex_unlock(&gvt->lock);
|
||||
intel_gvt_wait_vgpu_idle(vgpu);
|
||||
mutex_lock(&gvt->lock);
|
||||
}
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
intel_vgpu_clean_sched_policy(vgpu);
|
||||
intel_vgpu_clean_gvt_context(vgpu);
|
||||
intel_vgpu_clean_execlist(vgpu);
|
||||
intel_vgpu_clean_display(vgpu);
|
||||
intel_vgpu_clean_opregion(vgpu);
|
||||
intel_vgpu_clean_gtt(vgpu);
|
||||
intel_gvt_hypervisor_detach_vgpu(vgpu);
|
||||
intel_vgpu_free_resource(vgpu);
|
||||
clean_vgpu_mmio(vgpu);
|
||||
vfree(vgpu);
|
||||
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_create_vgpu - create a virtual GPU
|
||||
* @gvt: GVT device
|
||||
* @param: vGPU creation parameters
|
||||
*
|
||||
* This function is called when user wants to create a virtual GPU.
|
||||
*
|
||||
* Returns:
|
||||
* pointer to intel_vgpu, error pointer if failed.
|
||||
*/
|
||||
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
struct intel_vgpu_creation_params *param)
|
||||
{
|
||||
struct intel_vgpu *vgpu;
|
||||
int ret;
|
||||
|
||||
gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
|
||||
param->handle, param->low_gm_sz, param->high_gm_sz,
|
||||
param->fence_sz);
|
||||
|
||||
vgpu = vzalloc(sizeof(*vgpu));
|
||||
if (!vgpu)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto out_free_vgpu;
|
||||
|
||||
vgpu->id = ret;
|
||||
vgpu->handle = param->handle;
|
||||
vgpu->gvt = gvt;
|
||||
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
|
||||
|
||||
setup_vgpu_cfg_space(vgpu, param);
|
||||
|
||||
ret = setup_vgpu_mmio(vgpu);
|
||||
if (ret)
|
||||
goto out_free_vgpu;
|
||||
|
||||
ret = intel_vgpu_alloc_resource(vgpu, param);
|
||||
if (ret)
|
||||
goto out_clean_vgpu_mmio;
|
||||
|
||||
populate_pvinfo_page(vgpu);
|
||||
|
||||
ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
|
||||
if (ret)
|
||||
goto out_clean_vgpu_resource;
|
||||
|
||||
ret = intel_vgpu_init_gtt(vgpu);
|
||||
if (ret)
|
||||
goto out_detach_hypervisor_vgpu;
|
||||
|
||||
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
|
||||
ret = intel_vgpu_init_opregion(vgpu, 0);
|
||||
if (ret)
|
||||
goto out_clean_gtt;
|
||||
}
|
||||
|
||||
ret = intel_vgpu_init_display(vgpu);
|
||||
if (ret)
|
||||
goto out_clean_opregion;
|
||||
|
||||
ret = intel_vgpu_init_execlist(vgpu);
|
||||
if (ret)
|
||||
goto out_clean_display;
|
||||
|
||||
ret = intel_vgpu_init_gvt_context(vgpu);
|
||||
if (ret)
|
||||
goto out_clean_execlist;
|
||||
|
||||
ret = intel_vgpu_init_sched_policy(vgpu);
|
||||
if (ret)
|
||||
goto out_clean_shadow_ctx;
|
||||
|
||||
vgpu->active = true;
|
||||
mutex_unlock(&gvt->lock);
|
||||
|
||||
return vgpu;
|
||||
|
||||
out_clean_shadow_ctx:
|
||||
intel_vgpu_clean_gvt_context(vgpu);
|
||||
out_clean_execlist:
|
||||
intel_vgpu_clean_execlist(vgpu);
|
||||
out_clean_display:
|
||||
intel_vgpu_clean_display(vgpu);
|
||||
out_clean_opregion:
|
||||
intel_vgpu_clean_opregion(vgpu);
|
||||
out_clean_gtt:
|
||||
intel_vgpu_clean_gtt(vgpu);
|
||||
out_detach_hypervisor_vgpu:
|
||||
intel_gvt_hypervisor_detach_vgpu(vgpu);
|
||||
out_clean_vgpu_resource:
|
||||
intel_vgpu_free_resource(vgpu);
|
||||
out_clean_vgpu_mmio:
|
||||
clean_vgpu_mmio(vgpu);
|
||||
out_free_vgpu:
|
||||
vfree(vgpu);
|
||||
mutex_unlock(&gvt->lock);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@@ -39,6 +39,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
return true;
|
||||
if (IS_SKYLAKE(dev_priv))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#ifndef _INTEL_GVT_H_
|
||||
#define _INTEL_GVT_H_
|
||||
|
||||
#include "i915_pvinfo.h"
|
||||
#include "gvt/gvt.h"
|
||||
|
||||
#ifdef CONFIG_DRM_I915_GVT
|
||||
|
||||
Reference in New Issue
Block a user