This patch enables Intel Precise Touch & Stylus(IPTS) support.

This commit is contained in:
Jake Day 2017-08-10 13:46:52 -04:00
parent 3673909c5b
commit c6fc0d7601
14 changed files with 841 additions and 12 deletions

View file

@ -116,6 +116,9 @@ i915-y += dvo_ch7017.o \
intel_sdvo.o \
intel_tv.o
# intel precise touch & stylus
i915-y += intel_ipts.o
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \

View file

@ -51,6 +51,7 @@
#include "i915_vgpu.h"
#include "intel_drv.h"
#include "intel_uc.h"
#include "intel_ipts.h"
static struct drm_driver driver;
@ -626,6 +627,10 @@ static int i915_load_modeset_init(struct drm_device *dev)
drm_kms_helper_poll_init(dev);
pr_info(">> let init ipts\n");
if (INTEL_GEN(dev) >= 9 && i915.enable_guc_submission)
intel_ipts_init(dev);
return 0;
cleanup_gem:
@ -1316,6 +1321,9 @@ void i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
if (INTEL_GEN(dev) >= 9 && i915.enable_guc_submission)
intel_ipts_cleanup(dev);
intel_fbdev_fini(dev);
if (i915_gem_suspend(dev_priv))

View file

@ -3500,6 +3500,9 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
struct i915_gem_context *
i915_gem_context_create_ipts(struct drm_i915_private *dev_priv);
static inline struct i915_gem_context *
i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
{

View file

@ -422,6 +422,17 @@ out:
return ctx;
}
struct i915_gem_context *i915_gem_context_create_ipts(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
ctx = i915_gem_create_context(dev_priv, NULL);
return ctx;
}
int i915_gem_context_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;

View file

@ -331,7 +331,14 @@ static void guc_stage_desc_init(struct intel_guc *guc,
desc = __get_stage_desc(client);
memset(desc, 0, sizeof(*desc));
desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | GUC_STAGE_DESC_ATTR_KERNEL;
desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE;
if ((client->priority == GUC_CTX_PRIORITY_KMD_NORMAL) ||
(client->priority == GUC_CTX_PRIORITY_KMD_HIGH)) {
desc->attribute |= GUC_STAGE_DESC_ATTR_KERNEL;
} else {
desc->attribute |= GUC_STAGE_DESC_ATTR_PCH;
}
desc->stage_id = client->stage_id;
desc->priority = client->priority;
desc->db_id = client->doorbell_id;
@ -1143,7 +1150,8 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
irqs = (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)
<< GEN8_RCS_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
/* These three registers have the same bit definitions */
I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
@ -1324,3 +1332,45 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
int i915_guc_ipts_submission_enable(struct drm_i915_private *dev_priv,
struct i915_gem_context *ctx)
{
struct intel_guc *guc = &dev_priv->guc;
struct i915_guc_client *client;
/* client for execbuf submission */
client = guc_client_alloc(dev_priv,
INTEL_INFO(dev_priv)->ring_mask,
GUC_CTX_PRIORITY_NORMAL,
ctx);
if (!client) {
DRM_ERROR("Failed to create normal GuC client!\n");
return -ENOMEM;
}
guc->ipts_client = client;
host2guc_sample_forcewake(guc, client);
guc_init_doorbell_hw(guc);
return 0;
}
void i915_guc_ipts_submission_disable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
if (!guc->ipts_client)
return;
guc_client_free(dev_priv, guc->ipts_client);
guc->ipts_client = NULL;
}
void i915_guc_ipts_reacquire_doorbell(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
if (host2guc_allocate_doorbell(guc, guc->ipts_client))
DRM_ERROR("Not able to reacquire IPTS doorbell\n");
}

View file

@ -36,6 +36,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_ipts.h"
/**
* DOC: interrupt handling
@ -1368,6 +1369,9 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
tasklet |= i915.enable_guc_submission;
}
if (iir & (GT_RENDER_PIPECTL_NOTIFY_INTERRUPT << test_shift))
intel_ipts_notify_complete();
if (tasklet)
tasklet_hi_schedule(&engine->irq_tasklet);
}
@ -3423,7 +3427,8 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
/* These are interrupts we'll toggle with the ring mask register */
uint32_t gt_interrupts[] = {
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_PIPECTL_NOTIFY_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,

View file

@ -56,8 +56,8 @@ struct i915_params i915 __read_mostly = {
.verbose_state_checks = 1,
.nuclear_pageflip = 0,
.edp_vswing = 0,
.enable_guc_loading = 0,
.enable_guc_submission = 0,
.enable_guc_loading = 2,
.enable_guc_submission = 2,
.guc_log_level = -1,
.guc_firmware_path = NULL,
.huc_firmware_path = NULL,
@ -221,12 +221,12 @@ MODULE_PARM_DESC(edp_vswing,
module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400);
MODULE_PARM_DESC(enable_guc_loading,
"Enable GuC firmware loading "
"(-1=auto, 0=never [default], 1=if available, 2=required)");
"(-1=auto, 0=never, 1=if available, 2=required [default])");
module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400);
MODULE_PARM_DESC(enable_guc_submission,
"Enable GuC submission "
"(-1=auto, 0=never [default], 1=if available, 2=required)");
"(-1=auto, 0=never, 1=if available, 2=required [default])");
module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
MODULE_PARM_DESC(guc_log_level,

View file

@ -0,0 +1,621 @@
/*
* Copyright 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/intel_ipts_if.h>
#include <drm/drmP.h>
#include "i915_drv.h"
#define SUPPORTED_IPTS_INTERFACE_VERSION 1
#define REACQUIRE_DB_THRESHOLD 8
#define DB_LOST_CHECK_STEP1_INTERVAL 2000 /* ms */
#define DB_LOST_CHECK_STEP2_INTERVAL 500 /* ms */
/* intel IPTS ctx for ipts support */
typedef struct intel_ipts {
struct drm_device *dev;
struct i915_gem_context *ipts_context;
intel_ipts_callback_t ipts_clbks;
/* buffers' list */
struct {
spinlock_t lock;
struct list_head list;
} buffers;
void *data;
struct delayed_work reacquire_db_work;
intel_ipts_wq_info_t wq_info;
u32 old_tail;
u32 old_head;
bool need_reacquire_db;
bool connected;
bool initialized;
} intel_ipts_t;
intel_ipts_t intel_ipts;
typedef struct intel_ipts_object {
struct list_head list;
struct drm_i915_gem_object *gem_obj;
void *cpu_addr;
} intel_ipts_object_t;
static intel_ipts_object_t *ipts_object_create(size_t size, u32 flags)
{
intel_ipts_object_t *obj = NULL;
struct drm_i915_gem_object *gem_obj = NULL;
int ret = 0;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return NULL;
size = roundup(size, PAGE_SIZE);
if (size == 0) {
ret = -EINVAL;
goto err_out;
}
/* Allocate the new object */
gem_obj = i915_gem_object_create(intel_ipts.dev, size);
if (gem_obj == NULL) {
ret = -ENOMEM;
goto err_out;
}
if (flags & IPTS_BUF_FLAG_CONTIGUOUS) {
ret = i915_gem_object_attach_phys(gem_obj, PAGE_SIZE);
if (ret) {
pr_info(">> ipts no contiguous : %d\n", ret);
goto err_out;
}
}
obj->gem_obj = gem_obj;
spin_lock(&intel_ipts.buffers.lock);
list_add_tail(&obj->list, &intel_ipts.buffers.list);
spin_unlock(&intel_ipts.buffers.lock);
return obj;
err_out:
if (gem_obj)
i915_gem_free_object(&gem_obj->base);
if (obj)
kfree(obj);
return NULL;
}
static void ipts_object_free(intel_ipts_object_t* obj)
{
spin_lock(&intel_ipts.buffers.lock);
list_del(&obj->list);
spin_unlock(&intel_ipts.buffers.lock);
i915_gem_free_object(&obj->gem_obj->base);
kfree(obj);
}
static int ipts_object_pin(intel_ipts_object_t* obj,
struct i915_gem_context *ipts_ctx)
{
struct i915_address_space *vm = NULL;
struct i915_vma *vma = NULL;
struct drm_i915_private *dev_priv = intel_ipts.dev->dev_private;
int ret = 0;
if (ipts_ctx->ppgtt) {
vm = &ipts_ctx->ppgtt->base;
} else {
vm = &dev_priv->ggtt.base;
}
vma = i915_gem_obj_lookup_or_create_vma(obj->gem_obj, vm, NULL);
if (IS_ERR(vma)) {
DRM_ERROR("cannot find or create vma\n");
return -1;
}
ret = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_USER);
return ret;
}
static void ipts_object_unpin(intel_ipts_object_t *obj)
{
/* TBD: Add support */
}
static void* ipts_object_map(intel_ipts_object_t *obj)
{
return i915_gem_object_pin_map(obj->gem_obj, I915_MAP_WB);
}
static void ipts_object_unmap(intel_ipts_object_t* obj)
{
i915_gem_object_unpin_map(obj->gem_obj);
obj->cpu_addr = NULL;
}
static int create_ipts_context(void)
{
struct i915_gem_context *ipts_ctx = NULL;
struct drm_i915_private *dev_priv = intel_ipts.dev->dev_private;
int ret = 0;
/* Initialize the context right away.*/
ret = i915_mutex_lock_interruptible(intel_ipts.dev);
if (ret) {
DRM_ERROR("i915_mutex_lock_interruptible failed \n");
return ret;
}
ipts_ctx = i915_gem_context_create_ipts(dev_priv);
if (IS_ERR(ipts_ctx)) {
DRM_ERROR("Failed to create IPTS context (error %ld)\n",
PTR_ERR(ipts_ctx));
ret = PTR_ERR(ipts_ctx);
goto err_unlock;
}
ret = execlists_context_deferred_alloc(ipts_ctx, dev_priv->engine[RCS]);
if (ret) {
DRM_DEBUG("lr context allocation failed : %d\n", ret);
goto err_ctx;
}
ret = execlists_context_pin(dev_priv->engine[RCS], ipts_ctx);
if (ret) {
DRM_DEBUG("lr context pinning failed : %d\n", ret);
goto err_ctx;
}
/* Release the mutex */
mutex_unlock(&intel_ipts.dev->struct_mutex);
spin_lock_init(&intel_ipts.buffers.lock);
INIT_LIST_HEAD(&intel_ipts.buffers.list);
intel_ipts.ipts_context = ipts_ctx;
return 0;
err_ctx:
if (ipts_ctx)
i915_gem_context_put(ipts_ctx);
err_unlock:
mutex_unlock(&intel_ipts.dev->struct_mutex);
return ret;
}
static void destroy_ipts_context(void)
{
struct i915_gem_context *ipts_ctx = NULL;
struct drm_i915_private *dev_priv = intel_ipts.dev->dev_private;
int ret = 0;
ipts_ctx = intel_ipts.ipts_context;
/* Initialize the context right away.*/
ret = i915_mutex_lock_interruptible(intel_ipts.dev);
if (ret) {
DRM_ERROR("i915_mutex_lock_interruptible failed \n");
return;
}
execlists_context_unpin(dev_priv->engine[RCS], ipts_ctx);
i915_gem_context_put(ipts_ctx);
mutex_unlock(&intel_ipts.dev->struct_mutex);
}
int intel_ipts_notify_complete(void)
{
if (intel_ipts.ipts_clbks.workload_complete)
intel_ipts.ipts_clbks.workload_complete(intel_ipts.data);
return 0;
}
int intel_ipts_notify_backlight_status(bool backlight_on)
{
if (intel_ipts.ipts_clbks.notify_gfx_status) {
if (backlight_on) {
intel_ipts.ipts_clbks.notify_gfx_status(
IPTS_NOTIFY_STA_BACKLIGHT_ON,
intel_ipts.data);
schedule_delayed_work(&intel_ipts.reacquire_db_work,
msecs_to_jiffies(DB_LOST_CHECK_STEP1_INTERVAL));
} else {
intel_ipts.ipts_clbks.notify_gfx_status(
IPTS_NOTIFY_STA_BACKLIGHT_OFF,
intel_ipts.data);
cancel_delayed_work(&intel_ipts.reacquire_db_work);
}
}
return 0;
}
static void intel_ipts_reacquire_db(intel_ipts_t *intel_ipts_p)
{
int ret = 0;
ret = i915_mutex_lock_interruptible(intel_ipts_p->dev);
if (ret) {
DRM_ERROR("i915_mutex_lock_interruptible failed \n");
return;
}
/* Reacquire the doorbell */
i915_guc_ipts_reacquire_doorbell(intel_ipts_p->dev->dev_private);
mutex_unlock(&intel_ipts_p->dev->struct_mutex);
return;
}
static int intel_ipts_get_wq_info(uint64_t gfx_handle,
intel_ipts_wq_info_t *wq_info)
{
if (gfx_handle != (uint64_t)&intel_ipts) {
DRM_ERROR("invalid gfx handle\n");
return -EINVAL;
}
*wq_info = intel_ipts.wq_info;
intel_ipts_reacquire_db(&intel_ipts);
schedule_delayed_work(&intel_ipts.reacquire_db_work,
msecs_to_jiffies(DB_LOST_CHECK_STEP1_INTERVAL));
return 0;
}
static int set_wq_info(void)
{
struct drm_i915_private *dev_priv = intel_ipts.dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
struct i915_guc_client *client;
struct guc_process_desc *desc;
void *base = NULL;
intel_ipts_wq_info_t *wq_info;
u64 phy_base = 0;
wq_info = &intel_ipts.wq_info;
client = guc->ipts_client;
if (!client) {
DRM_ERROR("IPTS GuC client is NOT available\n");
return -EINVAL;
}
base = client->client_base;
desc = (struct guc_process_desc *)((u64)base + client->proc_desc_offset);
desc->wq_base_addr = (u64)base + client->wq_offset;
desc->db_base_addr = (u64)base + client->doorbell_offset;
/* IPTS expects physical addresses to pass it to ME */
phy_base = sg_dma_address(client->vma->pages->sgl);
wq_info->db_addr = desc->db_base_addr;
wq_info->db_phy_addr = phy_base + client->doorbell_offset;
wq_info->db_cookie_offset = offsetof(struct guc_doorbell_info, cookie);
wq_info->wq_addr = desc->wq_base_addr;
wq_info->wq_phy_addr = phy_base + client->wq_offset;
wq_info->wq_head_addr = (u64)&desc->head;
wq_info->wq_head_phy_addr = phy_base + client->proc_desc_offset +
offsetof(struct guc_process_desc, head);
wq_info->wq_tail_addr = (u64)&desc->tail;
wq_info->wq_tail_phy_addr = phy_base + client->proc_desc_offset +
offsetof(struct guc_process_desc, tail);
wq_info->wq_size = desc->wq_size_bytes;
return 0;
}
static int intel_ipts_init_wq(void)
{
int ret = 0;
ret = i915_mutex_lock_interruptible(intel_ipts.dev);
if (ret) {
DRM_ERROR("i915_mutex_lock_interruptible failed\n");
return ret;
}
/* disable IPTS submission */
i915_guc_ipts_submission_disable(intel_ipts.dev->dev_private);
/* enable IPTS submission */
ret = i915_guc_ipts_submission_enable(intel_ipts.dev->dev_private,
intel_ipts.ipts_context);
if (ret) {
DRM_ERROR("i915_guc_ipts_submission_enable failed : %d\n", ret);
goto out;
}
ret = set_wq_info();
if (ret) {
DRM_ERROR("set_wq_info failed\n");
goto out;
}
out:
mutex_unlock(&intel_ipts.dev->struct_mutex);
return ret;
}
static void intel_ipts_release_wq(void)
{
int ret = 0;
ret = i915_mutex_lock_interruptible(intel_ipts.dev);
if (ret) {
DRM_ERROR("i915_mutex_lock_interruptible failed\n");
return;
}
/* disable IPTS submission */
i915_guc_ipts_submission_disable(intel_ipts.dev->dev_private);
mutex_unlock(&intel_ipts.dev->struct_mutex);
}
static int intel_ipts_map_buffer(u64 gfx_handle, intel_ipts_mapbuffer_t *mapbuf)
{
intel_ipts_object_t* obj;
struct i915_gem_context *ipts_ctx = NULL;
struct drm_i915_private *dev_priv = intel_ipts.dev->dev_private;
struct i915_address_space *vm = NULL;
struct i915_vma *vma = NULL;
int ret = 0;
if (gfx_handle != (uint64_t)&intel_ipts) {
DRM_ERROR("invalid gfx handle\n");
return -EINVAL;
}
/* Acquire mutex first */
ret = i915_mutex_lock_interruptible(intel_ipts.dev);
if (ret) {
DRM_ERROR("i915_mutex_lock_interruptible failed \n");
return -EINVAL;
}
obj = ipts_object_create(mapbuf->size, mapbuf->flags);
if (!obj)
return -ENOMEM;
ipts_ctx = intel_ipts.ipts_context;
ret = ipts_object_pin(obj, ipts_ctx);
if (ret) {
DRM_ERROR("Not able to pin iTouch obj\n");
ipts_object_free(obj);
mutex_unlock(&intel_ipts.dev->struct_mutex);
return -ENOMEM;
}
if (mapbuf->flags & IPTS_BUF_FLAG_CONTIGUOUS) {
obj->cpu_addr = obj->gem_obj->phys_handle->vaddr;
} else {
obj->cpu_addr = ipts_object_map(obj);
}
if (ipts_ctx->ppgtt) {
vm = &ipts_ctx->ppgtt->base;
} else {
vm = &dev_priv->ggtt.base;
}
vma = i915_gem_obj_lookup_or_create_vma(obj->gem_obj, vm, NULL);
if (IS_ERR(vma)) {
DRM_ERROR("cannot find or create vma\n");
return -EINVAL;
}
mapbuf->gfx_addr = (void*)vma->node.start;
mapbuf->cpu_addr = (void*)obj->cpu_addr;
mapbuf->buf_handle = (u64)obj;
if (mapbuf->flags & IPTS_BUF_FLAG_CONTIGUOUS) {
mapbuf->phy_addr = (u64)obj->gem_obj->phys_handle->busaddr;
}
/* Release the mutex */
mutex_unlock(&intel_ipts.dev->struct_mutex);
return 0;
}
static int intel_ipts_unmap_buffer(uint64_t gfx_handle, uint64_t buf_handle)
{
intel_ipts_object_t* obj = (intel_ipts_object_t*)buf_handle;
if (gfx_handle != (uint64_t)&intel_ipts) {
DRM_ERROR("invalid gfx handle\n");
return -EINVAL;
}
if (!obj->gem_obj->phys_handle)
ipts_object_unmap(obj);
ipts_object_unpin(obj);
ipts_object_free(obj);
return 0;
}
int intel_ipts_connect(intel_ipts_connect_t *ipts_connect)
{
int ret = 0;
if (!intel_ipts.initialized)
return -EIO;
if (ipts_connect && ipts_connect->if_version <=
SUPPORTED_IPTS_INTERFACE_VERSION) {
/* return gpu operations for ipts */
ipts_connect->ipts_ops.get_wq_info = intel_ipts_get_wq_info;
ipts_connect->ipts_ops.map_buffer = intel_ipts_map_buffer;
ipts_connect->ipts_ops.unmap_buffer = intel_ipts_unmap_buffer;
ipts_connect->gfx_version = INTEL_INFO(intel_ipts.dev)->gen;
ipts_connect->gfx_handle = (uint64_t)&intel_ipts;
/* save callback and data */
intel_ipts.data = ipts_connect->data;
intel_ipts.ipts_clbks = ipts_connect->ipts_cb;
intel_ipts.connected = true;
} else {
ret = -EINVAL;
}
return ret;
}
EXPORT_SYMBOL_GPL(intel_ipts_connect);
void intel_ipts_disconnect(uint64_t gfx_handle)
{
if (!intel_ipts.initialized)
return;
if (gfx_handle != (uint64_t)&intel_ipts ||
intel_ipts.connected == false) {
DRM_ERROR("invalid gfx handle\n");
return;
}
intel_ipts.data = 0;
memset(&intel_ipts.ipts_clbks, 0, sizeof(intel_ipts_callback_t));
intel_ipts.connected = false;
}
EXPORT_SYMBOL_GPL(intel_ipts_disconnect);
static void reacquire_db_work_func(struct work_struct *work)
{
struct delayed_work *d_work = container_of(work, struct delayed_work,
work);
intel_ipts_t *intel_ipts_p = container_of(d_work, intel_ipts_t,
reacquire_db_work);
u32 head;
u32 tail;
u32 size;
u32 load;
head = *(u32*)intel_ipts_p->wq_info.wq_head_addr;
tail = *(u32*)intel_ipts_p->wq_info.wq_tail_addr;
size = intel_ipts_p->wq_info.wq_size;
if (head >= tail)
load = head - tail;
else
load = head + size - tail;
if (load < REACQUIRE_DB_THRESHOLD) {
intel_ipts_p->need_reacquire_db = false;
goto reschedule_work;
}
if (intel_ipts_p->need_reacquire_db) {
if (intel_ipts_p->old_head == head && intel_ipts_p->old_tail == tail)
intel_ipts_reacquire_db(intel_ipts_p);
intel_ipts_p->need_reacquire_db = false;
} else {
intel_ipts_p->old_head = head;
intel_ipts_p->old_tail = tail;
intel_ipts_p->need_reacquire_db = true;
/* recheck */
schedule_delayed_work(&intel_ipts_p->reacquire_db_work,
msecs_to_jiffies(DB_LOST_CHECK_STEP2_INTERVAL));
return;
}
reschedule_work:
schedule_delayed_work(&intel_ipts_p->reacquire_db_work,
msecs_to_jiffies(DB_LOST_CHECK_STEP1_INTERVAL));
}
/**
* intel_ipts_init - Initialize ipts support
* @dev: drm device
*
* Setup the required structures for ipts.
*/
int intel_ipts_init(struct drm_device *dev)
{
int ret = 0;
intel_ipts.dev = dev;
INIT_DELAYED_WORK(&intel_ipts.reacquire_db_work, reacquire_db_work_func);
ret = create_ipts_context();
if (ret)
return -ENOMEM;
ret = intel_ipts_init_wq();
if (ret)
return ret;
intel_ipts.initialized = true;
DRM_DEBUG_DRIVER("Intel iTouch framework initialized\n");
return ret;
}
void intel_ipts_cleanup(struct drm_device *dev)
{
intel_ipts_object_t *obj, *n;
if (intel_ipts.dev == dev) {
list_for_each_entry_safe(obj, n, &intel_ipts.buffers.list, list) {
list_del(&obj->list);
if (!obj->gem_obj->phys_handle)
ipts_object_unmap(obj);
ipts_object_unpin(obj);
i915_gem_free_object(&obj->gem_obj->base);
kfree(obj);
}
intel_ipts_release_wq();
destroy_ipts_context();
cancel_delayed_work(&intel_ipts.reacquire_db_work);
}
}

View file

@ -0,0 +1,34 @@
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef _INTEL_IPTS_H_
#define _INTEL_IPTS_H_
struct drm_device;
int intel_ipts_init(struct drm_device *dev);
void intel_ipts_cleanup(struct drm_device *dev);
int intel_ipts_notify_backlight_status(bool backlight_on);
int intel_ipts_notify_complete(void);
#endif //_INTEL_IPTS_H_

View file

@ -735,7 +735,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
/* XXX Do we need to preempt to make room for us and our deps? */
}
static int execlists_context_pin(struct intel_engine_cs *engine,
int execlists_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
@ -794,7 +794,7 @@ err:
return ret;
}
static void execlists_context_unpin(struct intel_engine_cs *engine,
void execlists_context_unpin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
@ -1683,7 +1683,8 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
int ret;
logical_ring_setup(engine);
engine->irq_keep_mask |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT;
if (HAS_L3_DPF(dev_priv))
engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
@ -1944,7 +1945,7 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
return ret;
}
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *ctx_obj;

View file

@ -78,8 +78,13 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
struct drm_i915_private;
struct i915_gem_context;
int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
int execlists_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx);
void execlists_context_unpin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx);
uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);

View file

@ -34,6 +34,7 @@
#include <linux/moduleparam.h>
#include <linux/pwm.h>
#include "intel_drv.h"
#include "intel_ipts.h"
#define CRC_PMIC_PWM_PERIOD_NS 21333
@ -712,6 +713,9 @@ static void lpt_disable_backlight(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
if (INTEL_GEN(connector->base.dev) >= 9 && i915.enable_guc_submission)
intel_ipts_notify_backlight_status(false);
intel_panel_actually_set_backlight(connector, 0);
/*
@ -881,6 +885,9 @@ static void lpt_enable_backlight(struct intel_connector *connector)
/* This won't stick until the above enable. */
intel_panel_actually_set_backlight(connector, panel->backlight.level);
if (INTEL_GEN(connector->base.dev) >= 9 && i915.enable_guc_submission)
intel_ipts_notify_backlight_status(true);
}
static void pch_enable_backlight(struct intel_connector *connector)

View file

@ -189,6 +189,7 @@ struct intel_guc {
struct ida stage_ids;
struct i915_guc_client *execbuf_client;
struct i915_guc_client *ipts_client;
DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
uint32_t db_cacheline; /* Cyclic counter mod pagesize */
@ -267,4 +268,9 @@ void intel_huc_select_fw(struct intel_huc *huc);
int intel_huc_init_hw(struct intel_huc *huc);
void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
int i915_guc_ipts_submission_enable(struct drm_i915_private *dev_priv,
struct i915_gem_context *ctx);
void i915_guc_ipts_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_ipts_reacquire_doorbell(struct drm_i915_private *dev_priv);
#endif

View file

@ -0,0 +1,75 @@
/*
*
* GFX interface to support Intel Precise Touch & Stylus
* Copyright (c) 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#ifndef INTEL_IPTS_IF_H
#define INTEL_IPTS_IF_H
enum {
IPTS_INTERFACE_V1 = 1,
};
#define IPTS_BUF_FLAG_CONTIGUOUS 0x01
#define IPTS_NOTIFY_STA_BACKLIGHT_OFF 0x00
#define IPTS_NOTIFY_STA_BACKLIGHT_ON 0x01
typedef struct intel_ipts_mapbuffer {
u32 size;
u32 flags;
void *gfx_addr;
void *cpu_addr;
u64 buf_handle;
u64 phy_addr;
} intel_ipts_mapbuffer_t;
typedef struct intel_ipts_wq_info {
u64 db_addr;
u64 db_phy_addr;
u32 db_cookie_offset;
u32 wq_size;
u64 wq_addr;
u64 wq_phy_addr;
u64 wq_head_addr; /* head of wq is managed by GPU */
u64 wq_head_phy_addr; /* head of wq is managed by GPU */
u64 wq_tail_addr; /* tail of wq is managed by CSME */
u64 wq_tail_phy_addr; /* tail of wq is managed by CSME */
} intel_ipts_wq_info_t;
typedef struct intel_ipts_ops {
int (*get_wq_info)(uint64_t gfx_handle, intel_ipts_wq_info_t *wq_info);
int (*map_buffer)(uint64_t gfx_handle, intel_ipts_mapbuffer_t *mapbuffer);
int (*unmap_buffer)(uint64_t gfx_handle, uint64_t buf_handle);
} intel_ipts_ops_t;
typedef struct intel_ipts_callback {
void (*workload_complete)(void *data);
void (*notify_gfx_status)(u32 status, void *data);
} intel_ipts_callback_t;
typedef struct intel_ipts_connect {
intel_ipts_callback_t ipts_cb; /* input : callback addresses */
void *data; /* input : callback data */
u32 if_version; /* input : interface version */
u32 gfx_version; /* output : gfx version */
u64 gfx_handle; /* output : gfx handle */
intel_ipts_ops_t ipts_ops; /* output : gfx ops for IPTS */
} intel_ipts_connect_t;
int intel_ipts_connect(intel_ipts_connect_t *ipts_connect);
void intel_ipts_disconnect(uint64_t gfx_handle);
#endif // INTEL_IPTS_IF_H