diff --git a/configs/surface-5.8.config b/configs/surface-5.8.config new file mode 100644 index 000000000..7e194ae7e --- /dev/null +++ b/configs/surface-5.8.config @@ -0,0 +1,32 @@ +# +# Surface Aggregator Module +# +CONFIG_GPIO_SYSFS=y # required for SURFACE_SAM_HPS +CONFIG_SURFACE_SAM=m +CONFIG_SURFACE_SAM_SSH=m +CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION=n +CONFIG_SURFACE_SAM_DEBUGFS=m +CONFIG_SURFACE_SAM_SAN=m +CONFIG_SURFACE_SAM_VHF=m +CONFIG_SURFACE_SAM_DTX=m +CONFIG_SURFACE_SAM_HPS=m +CONFIG_SURFACE_SAM_SID=m +CONFIG_SURFACE_SAM_SID_GPELID=m +CONFIG_SURFACE_SAM_SID_PERFMODE=m +CONFIG_SURFACE_SAM_SID_VHF=m +CONFIG_SURFACE_SAM_SID_POWER=m + + +# +# IPTS touchscreen +# +CONFIG_TOUCHSCREEN_IPTS=m + +# +# Other Drivers +# +CONFIG_INPUT_SOC_BUTTON_ARRAY=m +CONFIG_SURFACE_3_POWER_OPREGION=m +CONFIG_SURFACE_3_BUTTON=m +CONFIG_SURFACE_3_POWER_OPREGION=m +CONFIG_SURFACE_PRO3_BUTTON=m diff --git a/patches/5.8/0001-surface3-oemb.patch b/patches/5.8/0001-surface3-oemb.patch new file mode 100644 index 000000000..1d9aa407c --- /dev/null +++ b/patches/5.8/0001-surface3-oemb.patch @@ -0,0 +1,71 @@ +From 0ea332f8971bbe537bbc43bb76ef790981ce001d Mon Sep 17 00:00:00 2001 +From: Chih-Wei Huang +Date: Tue, 18 Sep 2018 11:01:37 +0800 +Subject: [PATCH 1/5] surface3-oemb + +--- + drivers/platform/x86/surface3-wmi.c | 7 +++++++ + sound/soc/codecs/rt5645.c | 9 +++++++++ + sound/soc/intel/common/soc-acpi-intel-cht-match.c | 8 ++++++++ + 3 files changed, 24 insertions(+) + +diff --git a/drivers/platform/x86/surface3-wmi.c b/drivers/platform/x86/surface3-wmi.c +index 130b6f52a6001..801083aa56d6d 100644 +--- a/drivers/platform/x86/surface3-wmi.c ++++ b/drivers/platform/x86/surface3-wmi.c +@@ -37,6 +37,13 @@ static const struct dmi_system_id surface3_dmi_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"), + }, + }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), ++ DMI_MATCH(DMI_SYS_VENDOR, "OEMB"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "OEMB"), ++ }, ++ }, + #endif + { } + }; +diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c +index e2e1d5b03b381..5ff38592ba6db 100644 +--- a/sound/soc/codecs/rt5645.c ++++ b/sound/soc/codecs/rt5645.c +@@ -3687,6 +3687,15 @@ static const struct dmi_system_id dmi_platform_data[] = { + }, + .driver_data = (void *)&intel_braswell_platform_data, + }, ++ { ++ .ident = "Microsoft Surface 3", ++ .matches = { ++ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), ++ DMI_MATCH(DMI_SYS_VENDOR, "OEMB"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "OEMB"), ++ }, ++ .driver_data = (void *)&intel_braswell_platform_data, ++ }, + { + /* + * Match for the GPDwin which unfortunately uses somewhat +diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c +index 2752dc9557334..ef36a316e2ed6 100644 +--- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c ++++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c +@@ -27,6 +27,14 @@ static const struct dmi_system_id cht_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"), + }, + }, ++ { ++ .callback = cht_surface_quirk_cb, ++ .matches = { ++ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), ++ DMI_MATCH(DMI_SYS_VENDOR, "OEMB"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "OEMB"), ++ }, ++ }, + { } + }; + +-- +2.28.0 + diff --git a/patches/5.8/0002-surface-sam.patch b/patches/5.8/0002-surface-sam.patch new file mode 100644 index 000000000..e88dc382d --- /dev/null +++ b/patches/5.8/0002-surface-sam.patch @@ -0,0 +1,12611 @@ +From 0be98f240951d46cc8871a777d47a7d502d03305 Mon Sep 17 00:00:00 2001 +From: Maximilian Luz +Date: Fri, 6 Dec 2019 11:56:12 +0100 +Subject: [PATCH 2/5] surface-sam + +--- + drivers/platform/x86/Kconfig | 2 + + drivers/platform/x86/Makefile | 1 + + drivers/platform/x86/surface_sam/Kconfig | 176 + + drivers/platform/x86/surface_sam/Makefile | 16 + + .../x86/surface_sam/surface_sam_debugfs.c | 270 + + .../x86/surface_sam/surface_sam_dtx.c | 582 ++ + .../x86/surface_sam/surface_sam_hps.c | 1287 ++++ + .../x86/surface_sam/surface_sam_san.c | 930 +++ + .../x86/surface_sam/surface_sam_san.h | 30 + + .../x86/surface_sam/surface_sam_sid.c | 283 + + .../x86/surface_sam/surface_sam_sid_gpelid.c | 232 + + .../surface_sam/surface_sam_sid_perfmode.c | 214 + + .../x86/surface_sam/surface_sam_sid_power.c | 1054 ++++ + .../x86/surface_sam/surface_sam_sid_power.h | 16 + + .../x86/surface_sam/surface_sam_sid_vhf.c | 429 ++ + .../x86/surface_sam/surface_sam_sid_vhf.h | 14 + + .../x86/surface_sam/surface_sam_ssh.c | 5329 +++++++++++++++++ + .../x86/surface_sam/surface_sam_ssh.h | 717 +++ + .../x86/surface_sam/surface_sam_ssh_trace.h | 587 ++ + .../x86/surface_sam/surface_sam_vhf.c | 266 + + 20 files changed, 12435 insertions(+) + create mode 100644 drivers/platform/x86/surface_sam/Kconfig + create mode 100644 drivers/platform/x86/surface_sam/Makefile + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_debugfs.c + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_dtx.c + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_hps.c + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_san.c + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_san.h + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid.c + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_gpelid.c + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_perfmode.c + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_power.c + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_power.h + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_vhf.h + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_ssh.c + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_ssh.h + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_vhf.c + +diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig +index 0581a54cf562f..998007444059b 100644 +--- a/drivers/platform/x86/Kconfig ++++ b/drivers/platform/x86/Kconfig +@@ -1376,6 +1376,8 @@ config INTEL_SCU_PLATFORM + and SCU (sometimes called PMC as well). The driver currently + supports Intel Elkhart Lake and compatible platforms. + ++source "drivers/platform/x86/surface_sam/Kconfig" ++ + config INTEL_SCU_IPC_UTIL + tristate "Intel SCU IPC utility driver" + depends on INTEL_SCU +diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile +index 2b85852a1a872..e154e679db453 100644 +--- a/drivers/platform/x86/Makefile ++++ b/drivers/platform/x86/Makefile +@@ -148,3 +148,4 @@ obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \ + intel_telemetry_pltdrv.o \ + intel_telemetry_debugfs.o + obj-$(CONFIG_PMC_ATOM) += pmc_atom.o ++obj-$(CONFIG_SURFACE_SAM) += surface_sam/ +diff --git a/drivers/platform/x86/surface_sam/Kconfig b/drivers/platform/x86/surface_sam/Kconfig +new file mode 100644 +index 0000000000000..b5bb55248a5d5 +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/Kconfig +@@ -0,0 +1,176 @@ ++menuconfig SURFACE_SAM ++ depends on ACPI ++ tristate "Microsoft Surface/System Aggregator Module and Platform Drivers" ++ help ++ Drivers for the Surface/System Aggregator Module (SAM) of Microsoft ++ Surface devices. ++ ++ SAM is an embedded controller that provides access to various ++ functionalities on these devices, including battery status, keyboard ++ events (on the Laptops) and many more. ++ ++ Say M/Y here if you have a Microsoft Surface device with a SAM device ++ (i.e. 5th generation or later). ++ ++config SURFACE_SAM_SSH ++ tristate "Surface Serial Hub Driver" ++ depends on SURFACE_SAM ++ depends on SERIAL_DEV_CTRL_TTYPORT ++ select CRC_CCITT ++ default m ++ help ++ Surface Serial Hub driver for 5th generation (or later) Microsoft ++ Surface devices. ++ ++ This is the base driver for the embedded serial controller found on ++ 5th generation (and later) Microsoft Surface devices (e.g. Book 2, ++ Laptop, Laptop 2, Pro 2017, Pro 6, ...). This driver itself only ++ provides access to the embedded controller (SAM) and subsequent ++ drivers are required for the respective functionalities. ++ ++ If you have a 5th generation (or later) Microsoft Surface device, say ++ Y or M here. ++ ++config SURFACE_SAM_SSH_ERROR_INJECTION ++ bool "Surface Serial Hub Error Injection Capabilities" ++ depends on SURFACE_SAM_SSH ++ depends on FUNCTION_ERROR_INJECTION ++ default n ++ help ++ Enable error injection capabilities for the Surface Serial Hub. ++ This is used to debug the driver, specifically the communication ++ interface. It is not required for normal use. ++ ++ If you are not sure, say N here. ++ ++config SURFACE_SAM_DEBUGFS ++ tristate "Surface Serial Hub Debug Device" ++ depends on SURFACE_SAM_SSH ++ depends on DEBUG_FS ++ default n ++ help ++ Debug device for direct communication with the embedded controller ++ found on 5th generation (and later) Microsoft Surface devices (e.g. ++ Book 2, Laptop, Laptop 2, Pro 2017, Pro 6, ...) via debugfs. ++ ++ If you are not sure, say N here. ++ ++config SURFACE_SAM_SAN ++ tristate "Surface ACPI Notify Driver" ++ depends on SURFACE_SAM_SSH ++ default m ++ help ++ Surface ACPI Notify driver for 5th generation (or later) Microsoft ++ Surface devices. ++ ++ This driver enables basic ACPI events and requests, such as battery ++ status requests/events, thermal events, lid status, and possibly more, ++ which would otherwise not work on these devices. ++ ++ If you are not sure, say M here. ++ ++config SURFACE_SAM_VHF ++ tristate "Surface Virtual HID Framework Driver" ++ depends on SURFACE_SAM_SSH ++ depends on HID ++ default m ++ help ++ Surface Virtual HID Framework driver for 5th generation (or later) ++ Microsoft Surface devices. ++ ++ This driver provides support for the Microsoft Virtual HID framework, ++ which is required for keyboard support on the Surface Laptop 1 and 2. ++ ++ If you are not sure, say M here. ++ ++config SURFACE_SAM_DTX ++ tristate "Surface Detachment System (DTX) Driver" ++ depends on SURFACE_SAM_SSH ++ depends on INPUT ++ default m ++ help ++ Surface Detachment System (DTX) driver for the Microsoft Surface Book ++ 2. This driver provides support for proper detachment handling in ++ user-space, status-events relating to the base and support for ++ the safe-guard keeping the base attached when the discrete GPU ++ contained in it is running via the special /dev/surface-dtx device. ++ ++ Also provides a standard input device to provide SW_TABLET_MODE events ++ upon device mode change. ++ ++ If you are not sure, say M here. ++ ++config SURFACE_SAM_HPS ++ tristate "Surface dGPU Hot-Plug System (dGPU-HPS) Driver" ++ depends on SURFACE_SAM_SSH ++ depends on SURFACE_SAM_SAN ++ depends on GPIO_SYSFS ++ default m ++ help ++ Driver to properly handle hot-plugging and explicit power-on/power-off ++ of the discrete GPU (dGPU) on the Surface Book 2 and 3. ++ ++ If you are not sure, say M here. ++ ++config SURFACE_SAM_SID ++ tristate "Surface Platform Integration Driver" ++ depends on SURFACE_SAM_SSH ++ default m ++ help ++ Surface Platform Integration Driver for the Microsoft Surface Devices. ++ This driver loads various model-specific sub-drivers, including ++ battery and keyboard support on 7th generation Surface devices, proper ++ lid setup to enable device wakeup when the lid is opened on multiple ++ models, as well as performance mode setting support on the Surface ++ Book 2. ++ ++ If you are not sure, say M here. ++ ++config SURFACE_SAM_SID_GPELID ++ tristate "Surface Lid Wakeup Driver" ++ depends on SURFACE_SAM_SID ++ default m ++ help ++ Driver to set up device wake-up via lid on Intel-based Microsoft ++ Surface devices. These devices do not wake up from sleep as their GPE ++ interrupt is not configured automatically. This driver solves that ++ problem. ++ ++ If you are not sure, say M here. ++ ++config SURFACE_SAM_SID_PERFMODE ++ tristate "Surface Performance Mode Driver" ++ depends on SURFACE_SAM_SID ++ depends on SYSFS ++ default m ++ help ++ This driver provides support for setting performance-modes on Surface ++ devices via the perf_mode sysfs attribute. Currently only supports the ++ Surface Book 2. Performance-modes directly influence the fan-profile ++ of the device, allowing to choose between higher performance or ++ quieter operation. ++ ++ If you are not sure, say M here. ++ ++config SURFACE_SAM_SID_VHF ++ tristate "Surface SAM HID Driver" ++ depends on SURFACE_SAM_SID ++ depends on HID ++ default m ++ help ++ This driver provides support for HID devices connected via the Surface ++ SAM embedded controller. It provides support for keyboard and touchpad ++ on the Surface Laptop 3 models. ++ ++ If you are not sure, say M here. ++ ++config SURFACE_SAM_SID_POWER ++ tristate "Surface SAM Battery/AC Driver" ++ depends on SURFACE_SAM_SID ++ select POWER_SUPPLY ++ default m ++ help ++ This driver provides support for the battery and AC on 7th generation ++ Surface devices. ++ ++ If you are not sure, say M here. +diff --git a/drivers/platform/x86/surface_sam/Makefile b/drivers/platform/x86/surface_sam/Makefile +new file mode 100644 +index 0000000000000..89bced46ebcdd +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/Makefile +@@ -0,0 +1,16 @@ ++# SPDX-License-Identifier: GPL-2.0-or-later ++ ++# For include/trace/define_trace.h to include surface_sam_ssh_trace.h ++CFLAGS_surface_sam_ssh.o = -I$(src) ++ ++obj-$(CONFIG_SURFACE_SAM_SSH) += surface_sam_ssh.o ++obj-$(CONFIG_SURFACE_SAM_SAN) += surface_sam_san.o ++obj-$(CONFIG_SURFACE_SAM_DTX) += surface_sam_dtx.o ++obj-$(CONFIG_SURFACE_SAM_HPS) += surface_sam_hps.o ++obj-$(CONFIG_SURFACE_SAM_VHF) += surface_sam_vhf.o ++obj-$(CONFIG_SURFACE_SAM_SID) += surface_sam_sid.o ++obj-$(CONFIG_SURFACE_SAM_SID_GPELID) += surface_sam_sid_gpelid.o ++obj-$(CONFIG_SURFACE_SAM_SID_PERFMODE) += surface_sam_sid_perfmode.o ++obj-$(CONFIG_SURFACE_SAM_SID_POWER) += surface_sam_sid_power.o ++obj-$(CONFIG_SURFACE_SAM_SID_VHF) += surface_sam_sid_vhf.o ++obj-$(CONFIG_SURFACE_SAM_DEBUGFS) += surface_sam_debugfs.o +diff --git a/drivers/platform/x86/surface_sam/surface_sam_debugfs.c b/drivers/platform/x86/surface_sam/surface_sam_debugfs.c +new file mode 100644 +index 0000000000000..13e93404775c5 +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_debugfs.c +@@ -0,0 +1,270 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "surface_sam_ssh.h" ++ ++#define SSAM_DBGDEV_NAME "surface_sam_dbgdev" ++#define SSAM_DBGDEV_VERS 0x0100 ++ ++ ++struct ssam_dbgdev_request { ++ __u8 target_category; ++ __u8 command_id; ++ __u8 instance_id; ++ __u8 channel; ++ __u16 flags; ++ __s16 status; ++ ++ struct { ++ __u8 __pad[6]; ++ __u16 length; ++ const __u8 __user *data; ++ } payload; ++ ++ struct { ++ __u8 __pad[6]; ++ __u16 length; ++ __u8 __user *data; ++ } response; ++}; ++ ++#define SSAM_DBGDEV_IOCTL_GETVERSION _IOR(0xA5, 0, __u32) ++#define SSAM_DBGDEV_IOCTL_REQUEST _IOWR(0xA5, 1, struct ssam_dbgdev_request) ++ ++ ++struct ssam_dbgdev { ++ struct ssam_controller *ctrl; ++ struct dentry *dentry_dir; ++ struct dentry *dentry_dev; ++}; ++ ++ ++static int ssam_dbgdev_open(struct inode *inode, struct file *filp) ++{ ++ filp->private_data = inode->i_private; ++ return nonseekable_open(inode, filp); ++} ++ ++static long ssam_dbgdev_request(struct file *file, unsigned long arg) ++{ ++ struct ssam_dbgdev *ddev = file->private_data; ++ struct ssam_dbgdev_request __user *r; ++ struct ssam_dbgdev_request rqst; ++ struct ssam_request spec; ++ struct ssam_response rsp; ++ u8 *pldbuf = NULL; ++ u8 *rspbuf = NULL; ++ int status = 0, ret = 0, tmp; ++ ++ r = (struct ssam_dbgdev_request __user *)arg; ++ ret = copy_struct_from_user(&rqst, sizeof(rqst), r, sizeof(*r)); ++ if (ret) ++ goto out; ++ ++ // setup basic request fields ++ spec.target_category = rqst.target_category; ++ spec.command_id = rqst.command_id; ++ spec.instance_id = rqst.instance_id; ++ spec.channel = rqst.channel; ++ spec.flags = rqst.flags; ++ spec.length = rqst.payload.length; ++ ++ rsp.capacity = rqst.response.length; ++ rsp.length = 0; ++ ++ // get request payload from user-space ++ if (spec.length) { ++ if (!rqst.payload.data) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ pldbuf = kzalloc(spec.length, GFP_KERNEL); ++ if (!pldbuf) { ++ status = -ENOMEM; ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ if (copy_from_user(pldbuf, rqst.payload.data, spec.length)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ } ++ spec.payload = pldbuf; ++ ++ // allocate response buffer ++ if (rsp.capacity) { ++ if (!rqst.response.data) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ rspbuf = kzalloc(rsp.capacity, GFP_KERNEL); ++ if (!rspbuf) { ++ status = -ENOMEM; ++ ret = -EFAULT; ++ goto out; ++ } ++ } ++ rsp.pointer = rspbuf; ++ ++ // perform request ++ status = ssam_request_sync(ddev->ctrl, &spec, &rsp); ++ if (status) ++ goto out; ++ ++ // copy response to user-space ++ if (rsp.length) { ++ if (copy_to_user(rqst.response.data, rsp.pointer, rsp.length)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ } ++ ++out: ++ // always try to set response-length and status ++ tmp = put_user(rsp.length, &r->response.length); ++ if (!ret) ++ ret = tmp; ++ ++ tmp = put_user(status, &r->status); ++ if (!ret) ++ ret = tmp; ++ ++ // cleanup ++ if (pldbuf) ++ kfree(pldbuf); ++ ++ if (rspbuf) ++ kfree(rspbuf); ++ ++ return ret; ++} ++ ++static long ssam_dbgdev_getversion(struct file *file, unsigned long arg) ++{ ++ put_user(SSAM_DBGDEV_VERS, (u32 __user *)arg); ++ return 0; ++} ++ ++static long ssam_dbgdev_ioctl(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ switch (cmd) { ++ case SSAM_DBGDEV_IOCTL_GETVERSION: ++ return ssam_dbgdev_getversion(file, arg); ++ ++ case SSAM_DBGDEV_IOCTL_REQUEST: ++ return ssam_dbgdev_request(file, arg); ++ ++ default: ++ return -EINVAL; ++ } ++} ++ ++const struct file_operations ssam_dbgdev_fops = { ++ .owner = THIS_MODULE, ++ .open = ssam_dbgdev_open, ++ .unlocked_ioctl = ssam_dbgdev_ioctl, ++ .compat_ioctl = ssam_dbgdev_ioctl, ++ .llseek = noop_llseek, ++}; ++ ++static int ssam_dbgdev_probe(struct platform_device *pdev) ++{ ++ struct ssam_dbgdev *ddev; ++ struct ssam_controller *ctrl; ++ int status; ++ ++ status = ssam_client_bind(&pdev->dev, &ctrl); ++ if (status) ++ return status == -ENXIO ? -EPROBE_DEFER : status; ++ ++ ddev = devm_kzalloc(&pdev->dev, sizeof(struct ssam_dbgdev), GFP_KERNEL); ++ if (!ddev) ++ return -ENOMEM; ++ ++ ddev->ctrl = ctrl; ++ ++ ddev->dentry_dir = debugfs_create_dir("surface_sam", NULL); ++ if (IS_ERR(ddev->dentry_dir)) ++ return PTR_ERR(ddev->dentry_dir); ++ ++ ddev->dentry_dev = debugfs_create_file("controller", 0600, ++ ddev->dentry_dir, ddev, ++ &ssam_dbgdev_fops); ++ if (IS_ERR(ddev->dentry_dev)) { ++ debugfs_remove(ddev->dentry_dir); ++ return PTR_ERR(ddev->dentry_dev); ++ } ++ ++ platform_set_drvdata(pdev, ddev); ++ return 0; ++} ++ ++static int ssam_dbgdev_remove(struct platform_device *pdev) ++{ ++ struct ssam_dbgdev *ddev = platform_get_drvdata(pdev); ++ ++ debugfs_remove(ddev->dentry_dev); ++ debugfs_remove(ddev->dentry_dir); ++ ++ platform_set_drvdata(pdev, NULL); ++ return 0; ++} ++ ++static void ssam_dbgdev_release(struct device *dev) ++{ ++ // nothing to do ++} ++ ++ ++static struct platform_device ssam_dbgdev_device = { ++ .name = SSAM_DBGDEV_NAME, ++ .id = PLATFORM_DEVID_NONE, ++ .dev.release = ssam_dbgdev_release, ++}; ++ ++static struct platform_driver ssam_dbgdev_driver = { ++ .probe = ssam_dbgdev_probe, ++ .remove = ssam_dbgdev_remove, ++ .driver = { ++ .name = SSAM_DBGDEV_NAME, ++ }, ++}; ++ ++static int __init surface_sam_debugfs_init(void) ++{ ++ int status; ++ ++ status = platform_device_register(&ssam_dbgdev_device); ++ if (status) ++ return status; ++ ++ status = platform_driver_register(&ssam_dbgdev_driver); ++ if (status) ++ platform_device_unregister(&ssam_dbgdev_device); ++ ++ return status; ++} ++ ++static void __exit surface_sam_debugfs_exit(void) ++{ ++ platform_driver_unregister(&ssam_dbgdev_driver); ++ platform_device_unregister(&ssam_dbgdev_device); ++} ++ ++module_init(surface_sam_debugfs_init); ++module_exit(surface_sam_debugfs_exit); ++ ++MODULE_AUTHOR("Maximilian Luz "); ++MODULE_DESCRIPTION("DebugFS entries for Surface Aggregator Module"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/platform/x86/surface_sam/surface_sam_dtx.c b/drivers/platform/x86/surface_sam/surface_sam_dtx.c +new file mode 100644 +index 0000000000000..9c844bb0f7739 +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_dtx.c +@@ -0,0 +1,582 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Detachment system (DTX) driver for Microsoft Surface Book 2. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "surface_sam_ssh.h" ++ ++ ++#define USB_VENDOR_ID_MICROSOFT 0x045e ++#define USB_DEVICE_ID_MS_SURFACE_BASE_2_INTEGRATION 0x0922 ++ ++// name copied from MS device manager ++#define DTX_INPUT_NAME "Microsoft Surface Base 2 Integration Device" ++ ++ ++#define DTX_CMD_LATCH_LOCK _IO(0x11, 0x01) ++#define DTX_CMD_LATCH_UNLOCK _IO(0x11, 0x02) ++#define DTX_CMD_LATCH_REQUEST _IO(0x11, 0x03) ++#define DTX_CMD_LATCH_OPEN _IO(0x11, 0x04) ++#define DTX_CMD_GET_OPMODE _IOR(0x11, 0x05, int) ++ ++#define SAM_EVENT_DTX_CID_CONNECTION 0x0c ++#define SAM_EVENT_DTX_CID_BUTTON 0x0e ++#define SAM_EVENT_DTX_CID_ERROR 0x0f ++#define SAM_EVENT_DTX_CID_LATCH_STATUS 0x11 ++ ++#define DTX_OPMODE_TABLET 0x00 ++#define DTX_OPMODE_LAPTOP 0x01 ++#define DTX_OPMODE_STUDIO 0x02 ++ ++#define DTX_LATCH_CLOSED 0x00 ++#define DTX_LATCH_OPENED 0x01 ++ ++ ++// Warning: This must always be a power of 2! ++#define DTX_CLIENT_BUF_SIZE 16 ++ ++#define DTX_CONNECT_OPMODE_DELAY 1000 ++ ++#define DTX_ERR KERN_ERR "surface_sam_dtx: " ++#define DTX_WARN KERN_WARNING "surface_sam_dtx: " ++ ++ ++struct surface_dtx_event { ++ u8 type; ++ u8 code; ++ u8 arg0; ++ u8 arg1; ++} __packed; ++ ++struct surface_dtx_dev { ++ struct ssam_controller *ctrl; ++ ++ struct ssam_event_notifier notif; ++ struct delayed_work opmode_work; ++ wait_queue_head_t waitq; ++ struct miscdevice mdev; ++ spinlock_t client_lock; ++ struct list_head client_list; ++ struct mutex mutex; ++ bool active; ++ spinlock_t input_lock; ++ struct input_dev *input_dev; ++}; ++ ++struct surface_dtx_client { ++ struct list_head node; ++ struct surface_dtx_dev *ddev; ++ struct fasync_struct *fasync; ++ spinlock_t buffer_lock; ++ unsigned int buffer_head; ++ unsigned int buffer_tail; ++ struct surface_dtx_event buffer[DTX_CLIENT_BUF_SIZE]; ++}; ++ ++ ++static struct surface_dtx_dev surface_dtx_dev; ++ ++ ++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, { ++ .target_category = SSAM_SSH_TC_BAS, ++ .command_id = 0x06, ++ .instance_id = 0x00, ++ .channel = 0x01, ++}); ++ ++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, { ++ .target_category = SSAM_SSH_TC_BAS, ++ .command_id = 0x07, ++ .instance_id = 0x00, ++ .channel = 0x01, ++}); ++ ++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, { ++ .target_category = SSAM_SSH_TC_BAS, ++ .command_id = 0x08, ++ .instance_id = 0x00, ++ .channel = 0x01, ++}); ++ ++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_open, { ++ .target_category = SSAM_SSH_TC_BAS, ++ .command_id = 0x09, ++ .instance_id = 0x00, ++ .channel = 0x01, ++}); ++ ++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, { ++ .target_category = SSAM_SSH_TC_BAS, ++ .command_id = 0x0d, ++ .instance_id = 0x00, ++ .channel = 0x01, ++}); ++ ++ ++static int dtx_bas_get_opmode(struct ssam_controller *ctrl, int __user *buf) ++{ ++ u8 opmode; ++ int status; ++ ++ status = ssam_bas_query_opmode(ctrl, &opmode); ++ if (status < 0) ++ return status; ++ ++ if (put_user(opmode, buf)) ++ return -EACCES; ++ ++ return 0; ++} ++ ++ ++static int surface_dtx_open(struct inode *inode, struct file *file) ++{ ++ struct surface_dtx_dev *ddev = container_of(file->private_data, struct surface_dtx_dev, mdev); ++ struct surface_dtx_client *client; ++ ++ // initialize client ++ client = kzalloc(sizeof(struct surface_dtx_client), GFP_KERNEL); ++ if (!client) ++ return -ENOMEM; ++ ++ spin_lock_init(&client->buffer_lock); ++ client->buffer_head = 0; ++ client->buffer_tail = 0; ++ client->ddev = ddev; ++ ++ // attach client ++ spin_lock(&ddev->client_lock); ++ list_add_tail_rcu(&client->node, &ddev->client_list); ++ spin_unlock(&ddev->client_lock); ++ ++ file->private_data = client; ++ nonseekable_open(inode, file); ++ ++ return 0; ++} ++ ++static int surface_dtx_release(struct inode *inode, struct file *file) ++{ ++ struct surface_dtx_client *client = file->private_data; ++ ++ // detach client ++ spin_lock(&client->ddev->client_lock); ++ list_del_rcu(&client->node); ++ spin_unlock(&client->ddev->client_lock); ++ synchronize_rcu(); ++ ++ kfree(client); ++ file->private_data = NULL; ++ ++ return 0; ++} ++ ++static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs) ++{ ++ struct surface_dtx_client *client = file->private_data; ++ struct surface_dtx_dev *ddev = client->ddev; ++ struct surface_dtx_event event; ++ size_t read = 0; ++ int status = 0; ++ ++ if (count != 0 && count < sizeof(struct surface_dtx_event)) ++ return -EINVAL; ++ ++ if (!ddev->active) ++ return -ENODEV; ++ ++ // check availability ++ if (client->buffer_head == client->buffer_tail) { ++ if (file->f_flags & O_NONBLOCK) ++ return -EAGAIN; ++ ++ status = wait_event_interruptible(ddev->waitq, ++ client->buffer_head != client->buffer_tail || ++ !ddev->active); ++ if (status) ++ return status; ++ ++ if (!ddev->active) ++ return -ENODEV; ++ } ++ ++ // copy events one by one ++ while (read + sizeof(struct surface_dtx_event) <= count) { ++ spin_lock_irq(&client->buffer_lock); ++ ++ if (client->buffer_head == client->buffer_tail) { ++ spin_unlock_irq(&client->buffer_lock); ++ break; ++ } ++ ++ // get one event ++ event = client->buffer[client->buffer_tail]; ++ client->buffer_tail = (client->buffer_tail + 1) & (DTX_CLIENT_BUF_SIZE - 1); ++ spin_unlock_irq(&client->buffer_lock); ++ ++ // copy to userspace ++ if (copy_to_user(buf, &event, sizeof(struct surface_dtx_event))) ++ return -EFAULT; ++ ++ read += sizeof(struct surface_dtx_event); ++ } ++ ++ return read; ++} ++ ++static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt) ++{ ++ struct surface_dtx_client *client = file->private_data; ++ int mask; ++ ++ poll_wait(file, &client->ddev->waitq, pt); ++ ++ if (client->ddev->active) ++ mask = EPOLLOUT | EPOLLWRNORM; ++ else ++ mask = EPOLLHUP | EPOLLERR; ++ ++ if (client->buffer_head != client->buffer_tail) ++ mask |= EPOLLIN | EPOLLRDNORM; ++ ++ return mask; ++} ++ ++static int surface_dtx_fasync(int fd, struct file *file, int on) ++{ ++ struct surface_dtx_client *client = file->private_data; ++ ++ return fasync_helper(fd, file, on, &client->fasync); ++} ++ ++static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ struct surface_dtx_client *client = file->private_data; ++ struct surface_dtx_dev *ddev = client->ddev; ++ int status; ++ ++ status = mutex_lock_interruptible(&ddev->mutex); ++ if (status) ++ return status; ++ ++ if (!ddev->active) { ++ mutex_unlock(&ddev->mutex); ++ return -ENODEV; ++ } ++ ++ switch (cmd) { ++ case DTX_CMD_LATCH_LOCK: ++ status = ssam_bas_latch_lock(ddev->ctrl); ++ break; ++ ++ case DTX_CMD_LATCH_UNLOCK: ++ status = ssam_bas_latch_unlock(ddev->ctrl); ++ break; ++ ++ case DTX_CMD_LATCH_REQUEST: ++ status = ssam_bas_latch_request(ddev->ctrl); ++ break; ++ ++ case DTX_CMD_LATCH_OPEN: ++ status = ssam_bas_latch_open(ddev->ctrl); ++ break; ++ ++ case DTX_CMD_GET_OPMODE: ++ status = dtx_bas_get_opmode(ddev->ctrl, (int __user *)arg); ++ break; ++ ++ default: ++ status = -EINVAL; ++ break; ++ } ++ ++ mutex_unlock(&ddev->mutex); ++ return status; ++} ++ ++static const struct file_operations surface_dtx_fops = { ++ .owner = THIS_MODULE, ++ .open = surface_dtx_open, ++ .release = surface_dtx_release, ++ .read = surface_dtx_read, ++ .poll = surface_dtx_poll, ++ .fasync = surface_dtx_fasync, ++ .unlocked_ioctl = surface_dtx_ioctl, ++ .llseek = no_llseek, ++}; ++ ++static struct surface_dtx_dev surface_dtx_dev = { ++ .mdev = { ++ .minor = MISC_DYNAMIC_MINOR, ++ .name = "surface_dtx", ++ .fops = &surface_dtx_fops, ++ }, ++ .client_lock = __SPIN_LOCK_UNLOCKED(), ++ .input_lock = __SPIN_LOCK_UNLOCKED(), ++ .mutex = __MUTEX_INITIALIZER(surface_dtx_dev.mutex), ++ .active = false, ++}; ++ ++ ++static void surface_dtx_push_event(struct surface_dtx_dev *ddev, struct surface_dtx_event *event) ++{ ++ struct surface_dtx_client *client; ++ ++ rcu_read_lock(); ++ list_for_each_entry_rcu(client, &ddev->client_list, node) { ++ spin_lock(&client->buffer_lock); ++ ++ client->buffer[client->buffer_head++] = *event; ++ client->buffer_head &= DTX_CLIENT_BUF_SIZE - 1; ++ ++ if (unlikely(client->buffer_head == client->buffer_tail)) { ++ printk(DTX_WARN "event buffer overrun\n"); ++ client->buffer_tail = (client->buffer_tail + 1) & (DTX_CLIENT_BUF_SIZE - 1); ++ } ++ ++ spin_unlock(&client->buffer_lock); ++ ++ kill_fasync(&client->fasync, SIGIO, POLL_IN); ++ } ++ rcu_read_unlock(); ++ ++ wake_up_interruptible(&ddev->waitq); ++} ++ ++ ++static void surface_dtx_update_opmpde(struct surface_dtx_dev *ddev) ++{ ++ struct surface_dtx_event event; ++ u8 opmode; ++ int status; ++ ++ // get operation mode ++ status = ssam_bas_query_opmode(ddev->ctrl, &opmode); ++ if (status < 0) { ++ printk(DTX_ERR "EC request failed with error %d\n", status); ++ return; ++ } ++ ++ // send DTX event ++ event.type = 0x11; ++ event.code = 0x0D; ++ event.arg0 = opmode; ++ event.arg1 = 0x00; ++ ++ surface_dtx_push_event(ddev, &event); ++ ++ // send SW_TABLET_MODE event ++ spin_lock(&ddev->input_lock); ++ input_report_switch(ddev->input_dev, SW_TABLET_MODE, opmode != DTX_OPMODE_LAPTOP); ++ input_sync(ddev->input_dev); ++ spin_unlock(&ddev->input_lock); ++} ++ ++static void surface_dtx_opmode_workfn(struct work_struct *work) ++{ ++ struct surface_dtx_dev *ddev = container_of(work, struct surface_dtx_dev, opmode_work.work); ++ ++ surface_dtx_update_opmpde(ddev); ++} ++ ++static u32 surface_dtx_notification(struct ssam_notifier_block *nb, const struct ssam_event *in_event) ++{ ++ struct surface_dtx_dev *ddev = container_of(nb, struct surface_dtx_dev, notif.base); ++ struct surface_dtx_event event; ++ unsigned long delay; ++ ++ switch (in_event->command_id) { ++ case SAM_EVENT_DTX_CID_CONNECTION: ++ case SAM_EVENT_DTX_CID_BUTTON: ++ case SAM_EVENT_DTX_CID_ERROR: ++ case SAM_EVENT_DTX_CID_LATCH_STATUS: ++ if (in_event->length > 2) { ++ printk(DTX_ERR "unexpected payload size (cid: %x, len: %u)\n", ++ in_event->command_id, in_event->length); ++ return SSAM_NOTIF_HANDLED; ++ } ++ ++ event.type = in_event->target_category; ++ event.code = in_event->command_id; ++ event.arg0 = in_event->length >= 1 ? in_event->data[0] : 0x00; ++ event.arg1 = in_event->length >= 2 ? in_event->data[1] : 0x00; ++ surface_dtx_push_event(ddev, &event); ++ break; ++ ++ default: ++ return 0; ++ } ++ ++ // update device mode ++ if (in_event->command_id == SAM_EVENT_DTX_CID_CONNECTION) { ++ delay = event.arg0 ? DTX_CONNECT_OPMODE_DELAY : 0; ++ schedule_delayed_work(&ddev->opmode_work, delay); ++ } ++ ++ return SSAM_NOTIF_HANDLED; ++} ++ ++ ++static struct input_dev *surface_dtx_register_inputdev( ++ struct platform_device *pdev, struct ssam_controller *ctrl) ++{ ++ struct input_dev *input_dev; ++ u8 opmode; ++ int status; ++ ++ input_dev = input_allocate_device(); ++ if (!input_dev) ++ return ERR_PTR(-ENOMEM); ++ ++ input_dev->name = DTX_INPUT_NAME; ++ input_dev->dev.parent = &pdev->dev; ++ input_dev->id.bustype = BUS_VIRTUAL; ++ input_dev->id.vendor = USB_VENDOR_ID_MICROSOFT; ++ input_dev->id.product = USB_DEVICE_ID_MS_SURFACE_BASE_2_INTEGRATION; ++ ++ input_set_capability(input_dev, EV_SW, SW_TABLET_MODE); ++ ++ status = ssam_bas_query_opmode(ctrl, &opmode); ++ if (status < 0) { ++ input_free_device(input_dev); ++ return ERR_PTR(status); ++ } ++ ++ input_report_switch(input_dev, SW_TABLET_MODE, opmode != DTX_OPMODE_LAPTOP); ++ ++ status = input_register_device(input_dev); ++ if (status) { ++ input_unregister_device(input_dev); ++ return ERR_PTR(status); ++ } ++ ++ return input_dev; ++} ++ ++ ++static int surface_sam_dtx_probe(struct platform_device *pdev) ++{ ++ struct surface_dtx_dev *ddev = &surface_dtx_dev; ++ struct ssam_controller *ctrl; ++ struct input_dev *input_dev; ++ int status; ++ ++ // link to ec ++ status = ssam_client_bind(&pdev->dev, &ctrl); ++ if (status) ++ return status == -ENXIO ? -EPROBE_DEFER : status; ++ ++ input_dev = surface_dtx_register_inputdev(pdev, ctrl); ++ if (IS_ERR(input_dev)) ++ return PTR_ERR(input_dev); ++ ++ // initialize device ++ mutex_lock(&ddev->mutex); ++ if (ddev->active) { ++ mutex_unlock(&ddev->mutex); ++ status = -ENODEV; ++ goto err_register; ++ } ++ ++ ddev->ctrl = ctrl; ++ INIT_DELAYED_WORK(&ddev->opmode_work, surface_dtx_opmode_workfn); ++ INIT_LIST_HEAD(&ddev->client_list); ++ init_waitqueue_head(&ddev->waitq); ++ ddev->active = true; ++ ddev->input_dev = input_dev; ++ mutex_unlock(&ddev->mutex); ++ ++ status = misc_register(&ddev->mdev); ++ if (status) ++ goto err_register; ++ ++ // set up events ++ ddev->notif.base.priority = 1; ++ ddev->notif.base.fn = surface_dtx_notification; ++ ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM; ++ ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS; ++ ddev->notif.event.id.instance = 0; ++ ddev->notif.event.flags = SSAM_EVENT_SEQUENCED; ++ ++ status = ssam_notifier_register(ctrl, &ddev->notif); ++ if (status) ++ goto err_events_setup; ++ ++ return 0; ++ ++err_events_setup: ++ misc_deregister(&ddev->mdev); ++err_register: ++ input_unregister_device(ddev->input_dev); ++ return status; ++} ++ ++static int surface_sam_dtx_remove(struct platform_device *pdev) ++{ ++ struct surface_dtx_dev *ddev = &surface_dtx_dev; ++ struct surface_dtx_client *client; ++ ++ mutex_lock(&ddev->mutex); ++ if (!ddev->active) { ++ mutex_unlock(&ddev->mutex); ++ return 0; ++ } ++ ++ // mark as inactive ++ ddev->active = false; ++ mutex_unlock(&ddev->mutex); ++ ++ // After this call we're guaranteed that no more input events will arive ++ ssam_notifier_unregister(ddev->ctrl, &ddev->notif); ++ ++ // wake up clients ++ spin_lock(&ddev->client_lock); ++ list_for_each_entry(client, &ddev->client_list, node) { ++ kill_fasync(&client->fasync, SIGIO, POLL_HUP); ++ } ++ spin_unlock(&ddev->client_lock); ++ ++ wake_up_interruptible(&ddev->waitq); ++ ++ // unregister user-space devices ++ input_unregister_device(ddev->input_dev); ++ misc_deregister(&ddev->mdev); ++ ++ return 0; ++} ++ ++ ++static const struct acpi_device_id surface_sam_dtx_match[] = { ++ { "MSHW0133", 0 }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(acpi, surface_sam_dtx_match); ++ ++static struct platform_driver surface_sam_dtx = { ++ .probe = surface_sam_dtx_probe, ++ .remove = surface_sam_dtx_remove, ++ .driver = { ++ .name = "surface_sam_dtx", ++ .acpi_match_table = surface_sam_dtx_match, ++ .probe_type = PROBE_PREFER_ASYNCHRONOUS, ++ }, ++}; ++module_platform_driver(surface_sam_dtx); ++ ++MODULE_AUTHOR("Maximilian Luz "); ++MODULE_DESCRIPTION("Surface Detachment System (DTX) Driver for 5th Generation Surface Devices"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/platform/x86/surface_sam/surface_sam_hps.c b/drivers/platform/x86/surface_sam/surface_sam_hps.c +new file mode 100644 +index 0000000000000..b11f9fa8095fb +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_hps.c +@@ -0,0 +1,1287 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Surface dGPU hot-plug system driver. ++ * Supports explicit setting of the dGPU power-state on the Surface Book 2 and ++ * properly handles hot-plugging by detaching the base. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "surface_sam_ssh.h" ++#include "surface_sam_san.h" ++ ++ ++// TODO: vgaswitcheroo integration ++ ++ ++static void dbg_dump_drvsta(struct platform_device *pdev, const char *prefix); ++ ++ ++#define SHPS_DSM_REVISION 1 ++#define SHPS_DSM_GPU_ADDRS 0x02 ++#define SHPS_DSM_GPU_POWER 0x05 ++static const guid_t SHPS_DSM_UUID = ++ GUID_INIT(0x5515a847, 0xed55, 0x4b27, 0x83, 0x52, 0xcd, ++ 0x32, 0x0e, 0x10, 0x36, 0x0a); ++ ++ ++#define SAM_DGPU_TC 0x13 ++#define SAM_DGPU_CID_POWERON 0x02 ++#define ACPI_SGCP_NOTIFY_POWER_ON 0x81 ++ ++#define SHPS_DSM_GPU_ADDRS_RP "RP5_PCIE" ++#define SHPS_DSM_GPU_ADDRS_DGPU "DGPU_PCIE" ++#define SHPS_PCI_GPU_ADDR_RP "\\_SB.PCI0.RP13._ADR" ++ ++static const struct acpi_gpio_params gpio_base_presence_int = { 0, 0, false }; ++static const struct acpi_gpio_params gpio_base_presence = { 1, 0, false }; ++static const struct acpi_gpio_params gpio_dgpu_power_int = { 2, 0, false }; ++static const struct acpi_gpio_params gpio_dgpu_power = { 3, 0, false }; ++static const struct acpi_gpio_params gpio_dgpu_presence_int = { 4, 0, false }; ++static const struct acpi_gpio_params gpio_dgpu_presence = { 5, 0, false }; ++ ++static const struct acpi_gpio_mapping shps_acpi_gpios[] = { ++ { "base_presence-int-gpio", &gpio_base_presence_int, 1 }, ++ { "base_presence-gpio", &gpio_base_presence, 1 }, ++ { "dgpu_power-int-gpio", &gpio_dgpu_power_int, 1 }, ++ { "dgpu_power-gpio", &gpio_dgpu_power, 1 }, ++ { "dgpu_presence-int-gpio", &gpio_dgpu_presence_int, 1 }, ++ { "dgpu_presence-gpio", &gpio_dgpu_presence, 1 }, ++ { }, ++}; ++ ++ ++enum shps_dgpu_power { ++ SHPS_DGPU_POWER_OFF = 0, ++ SHPS_DGPU_POWER_ON = 1, ++ SHPS_DGPU_POWER_UNKNOWN = 2, ++}; ++ ++static const char *shps_dgpu_power_str(enum shps_dgpu_power power) ++{ ++ if (power == SHPS_DGPU_POWER_OFF) ++ return "off"; ++ else if (power == SHPS_DGPU_POWER_ON) ++ return "on"; ++ else if (power == SHPS_DGPU_POWER_UNKNOWN) ++ return "unknown"; ++ else ++ return ""; ++} ++ ++enum shps_notification_method { ++ SHPS_NOTIFICATION_METHOD_SAN = 1, ++ SHPS_NOTIFICATION_METHOD_SGCP = 2 ++}; ++ ++struct shps_hardware_traits { ++ enum shps_notification_method notification_method; ++ const char *dgpu_rp_pci_address; ++}; ++ ++struct shps_driver_data { ++ struct ssam_controller *ctrl; ++ ++ struct mutex lock; ++ struct pci_dev *dgpu_root_port; ++ struct pci_saved_state *dgpu_root_port_state; ++ struct gpio_desc *gpio_dgpu_power; ++ struct gpio_desc *gpio_dgpu_presence; ++ struct gpio_desc *gpio_base_presence; ++ unsigned int irq_dgpu_presence; ++ unsigned int irq_base_presence; ++ unsigned long state; ++ acpi_handle sgpc_handle; ++ struct shps_hardware_traits hardware_traits; ++}; ++ ++struct shps_hardware_probe { ++ const char *hardware_id; ++ int generation; ++ struct shps_hardware_traits *hardware_traits; ++}; ++ ++static struct shps_hardware_traits shps_gen1_hwtraits = { ++ .notification_method = SHPS_NOTIFICATION_METHOD_SAN ++}; ++ ++static struct shps_hardware_traits shps_gen2_hwtraits = { ++ .notification_method = SHPS_NOTIFICATION_METHOD_SGCP, ++ .dgpu_rp_pci_address = SHPS_PCI_GPU_ADDR_RP ++}; ++ ++static const struct shps_hardware_probe shps_hardware_probe_match[] = { ++ /* Surface Book 3 */ ++ { "MSHW0117", 2, &shps_gen2_hwtraits }, ++ ++ /* Surface Book 2 (default, must be last entry) */ ++ { NULL, 1, &shps_gen1_hwtraits } ++}; ++ ++#define SHPS_STATE_BIT_PWRTGT 0 /* desired power state: 1 for on, 0 for off */ ++#define SHPS_STATE_BIT_RPPWRON_SYNC 1 /* synchronous/requested power-up in progress */ ++#define SHPS_STATE_BIT_WAKE_ENABLED 2 /* wakeup via base-presence GPIO enabled */ ++ ++ ++#define SHPS_DGPU_PARAM_PERM 0644 ++ ++enum shps_dgpu_power_mp { ++ SHPS_DGPU_MP_POWER_OFF = SHPS_DGPU_POWER_OFF, ++ SHPS_DGPU_MP_POWER_ON = SHPS_DGPU_POWER_ON, ++ SHPS_DGPU_MP_POWER_ASIS = -1, ++ ++ __SHPS_DGPU_MP_POWER_START = -1, ++ __SHPS_DGPU_MP_POWER_END = 1, ++}; ++ ++static int param_dgpu_power_set(const char *val, const struct kernel_param *kp) ++{ ++ int power = SHPS_DGPU_MP_POWER_OFF; ++ int status; ++ ++ status = kstrtoint(val, 0, &power); ++ if (status) ++ return status; ++ ++ if (power < __SHPS_DGPU_MP_POWER_START || power > __SHPS_DGPU_MP_POWER_END) ++ return -EINVAL; ++ ++ return param_set_int(val, kp); ++} ++ ++static const struct kernel_param_ops param_dgpu_power_ops = { ++ .set = param_dgpu_power_set, ++ .get = param_get_int, ++}; ++ ++static int param_dgpu_power_init = SHPS_DGPU_MP_POWER_OFF; ++static int param_dgpu_power_exit = SHPS_DGPU_MP_POWER_ON; ++static int param_dgpu_power_susp = SHPS_DGPU_MP_POWER_ASIS; ++static bool param_dtx_latch = true; ++ ++module_param_cb(dgpu_power_init, ¶m_dgpu_power_ops, ¶m_dgpu_power_init, SHPS_DGPU_PARAM_PERM); ++module_param_cb(dgpu_power_exit, ¶m_dgpu_power_ops, ¶m_dgpu_power_exit, SHPS_DGPU_PARAM_PERM); ++module_param_cb(dgpu_power_susp, ¶m_dgpu_power_ops, ¶m_dgpu_power_susp, SHPS_DGPU_PARAM_PERM); ++module_param_named(dtx_latch, param_dtx_latch, bool, SHPS_DGPU_PARAM_PERM); ++ ++MODULE_PARM_DESC(dgpu_power_init, "dGPU power state to be set on init (0: off / 1: on / 2: as-is, default: off)"); ++MODULE_PARM_DESC(dgpu_power_exit, "dGPU power state to be set on exit (0: off / 1: on / 2: as-is, default: on)"); ++MODULE_PARM_DESC(dgpu_power_susp, "dGPU power state to be set on exit (0: off / 1: on / 2: as-is, default: as-is)"); ++MODULE_PARM_DESC(dtx_latch, "lock/unlock DTX base latch in accordance to power-state (Y/n)"); ++ ++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, { ++ .target_category = SSAM_SSH_TC_BAS, ++ .command_id = 0x06, ++ .instance_id = 0x00, ++ .channel = 0x01, ++}); ++ ++static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, { ++ .target_category = SSAM_SSH_TC_BAS, ++ .command_id = 0x07, ++ .instance_id = 0x00, ++ .channel = 0x01, ++}); ++ ++static int shps_dgpu_dsm_get_pci_addr_from_adr(struct platform_device *pdev, const char *entry) { ++ acpi_handle handle = ACPI_HANDLE(&pdev->dev); ++ int status; ++ struct acpi_object_list input; ++ union acpi_object input_args[0]; ++ u64 device_addr; ++ u8 bus, dev, fun; ++ ++ input.count = 0; ++ input.pointer = input_args; ++ ++ ++ status = acpi_evaluate_integer(handle, (acpi_string)entry, &input, &device_addr); ++ if (status) { ++ return -ENODEV; ++ } ++ ++ bus = 0; ++ dev = (device_addr & 0xFF0000) >> 16; ++ fun = device_addr & 0xFF; ++ ++ dev_info(&pdev->dev, "found pci device at bus = %d, dev = %x, fun = %x\n", ++ (u32)bus, (u32)dev, (u32)fun); ++ ++ return bus << 8 | PCI_DEVFN(dev, fun); ++} ++ ++static int shps_dgpu_dsm_get_pci_addr_from_dsm(struct platform_device *pdev, const char *entry) ++{ ++ acpi_handle handle = ACPI_HANDLE(&pdev->dev); ++ union acpi_object *result; ++ union acpi_object *e0; ++ union acpi_object *e1; ++ union acpi_object *e2; ++ u64 device_addr = 0; ++ u8 bus, dev, fun; ++ int i; ++ ++ ++ result = acpi_evaluate_dsm_typed(handle, &SHPS_DSM_UUID, SHPS_DSM_REVISION, ++ SHPS_DSM_GPU_ADDRS, NULL, ACPI_TYPE_PACKAGE); ++ ++ if (IS_ERR_OR_NULL(result)) ++ return result ? PTR_ERR(result) : -EIO; ++ ++ // three entries per device: name, address, ++ for (i = 0; i + 2 < result->package.count; i += 3) { ++ e0 = &result->package.elements[i]; ++ e1 = &result->package.elements[i + 1]; ++ e2 = &result->package.elements[i + 2]; ++ ++ if (e0->type != ACPI_TYPE_STRING) { ++ ACPI_FREE(result); ++ return -EIO; ++ } ++ ++ if (e1->type != ACPI_TYPE_INTEGER) { ++ ACPI_FREE(result); ++ return -EIO; ++ } ++ ++ if (e2->type != ACPI_TYPE_INTEGER) { ++ ACPI_FREE(result); ++ return -EIO; ++ } ++ ++ if (strncmp(e0->string.pointer, entry, 64) == 0) ++ device_addr = e1->integer.value; ++ } ++ ++ ACPI_FREE(result); ++ if (device_addr == 0) ++ return -ENODEV; ++ ++ ++ // convert address ++ bus = (device_addr & 0x0FF00000) >> 20; ++ dev = (device_addr & 0x000F8000) >> 15; ++ fun = (device_addr & 0x00007000) >> 12; ++ ++ return bus << 8 | PCI_DEVFN(dev, fun); ++} ++ ++static struct pci_dev *shps_dgpu_dsm_get_pci_dev(struct platform_device *pdev) ++{ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ struct pci_dev *dev; ++ int addr; ++ ++ ++ if (drvdata->hardware_traits.dgpu_rp_pci_address) { ++ addr = shps_dgpu_dsm_get_pci_addr_from_adr(pdev, drvdata->hardware_traits.dgpu_rp_pci_address); ++ } else { ++ addr = shps_dgpu_dsm_get_pci_addr_from_dsm(pdev, SHPS_DSM_GPU_ADDRS_RP); ++ } ++ ++ if (addr < 0) ++ return ERR_PTR(addr); ++ ++ dev = pci_get_domain_bus_and_slot(0, (addr & 0xFF00) >> 8, addr & 0xFF); ++ return dev ? dev : ERR_PTR(-ENODEV); ++} ++ ++ ++static int shps_dgpu_dsm_get_power_unlocked(struct platform_device *pdev) ++{ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ struct gpio_desc *gpio = drvdata->gpio_dgpu_power; ++ int status; ++ ++ status = gpiod_get_value_cansleep(gpio); ++ if (status < 0) ++ return status; ++ ++ return status == 0 ? SHPS_DGPU_POWER_OFF : SHPS_DGPU_POWER_ON; ++} ++ ++static int shps_dgpu_dsm_get_power(struct platform_device *pdev) ++{ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ int status; ++ ++ mutex_lock(&drvdata->lock); ++ status = shps_dgpu_dsm_get_power_unlocked(pdev); ++ mutex_unlock(&drvdata->lock); ++ ++ return status; ++} ++ ++static int __shps_dgpu_dsm_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power) ++{ ++ acpi_handle handle = ACPI_HANDLE(&pdev->dev); ++ union acpi_object *result; ++ union acpi_object param; ++ ++ dev_info(&pdev->dev, "setting dGPU direct power to \'%s\'\n", shps_dgpu_power_str(power)); ++ ++ param.type = ACPI_TYPE_INTEGER; ++ param.integer.value = power == SHPS_DGPU_POWER_ON; ++ ++ result = acpi_evaluate_dsm_typed(handle, &SHPS_DSM_UUID, SHPS_DSM_REVISION, ++ SHPS_DSM_GPU_POWER, ¶m, ACPI_TYPE_BUFFER); ++ ++ if (IS_ERR_OR_NULL(result)) ++ return result ? PTR_ERR(result) : -EIO; ++ ++ // check for the expected result ++ if (result->buffer.length != 1 || result->buffer.pointer[0] != 0) { ++ ACPI_FREE(result); ++ return -EIO; ++ } ++ ++ ACPI_FREE(result); ++ return 0; ++} ++ ++static int shps_dgpu_dsm_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power) ++{ ++ int status; ++ ++ if (power != SHPS_DGPU_POWER_ON && power != SHPS_DGPU_POWER_OFF) ++ return -EINVAL; ++ ++ status = shps_dgpu_dsm_get_power_unlocked(pdev); ++ if (status < 0) ++ return status; ++ if (status == power) ++ return 0; ++ ++ return __shps_dgpu_dsm_set_power_unlocked(pdev, power); ++} ++ ++static int shps_dgpu_dsm_set_power(struct platform_device *pdev, enum shps_dgpu_power power) ++{ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ int status; ++ ++ mutex_lock(&drvdata->lock); ++ status = shps_dgpu_dsm_set_power_unlocked(pdev, power); ++ mutex_unlock(&drvdata->lock); ++ ++ return status; ++} ++ ++ ++static bool shps_rp_link_up(struct pci_dev *rp) ++{ ++ u16 lnksta = 0, sltsta = 0; ++ ++ pcie_capability_read_word(rp, PCI_EXP_LNKSTA, &lnksta); ++ pcie_capability_read_word(rp, PCI_EXP_SLTSTA, &sltsta); ++ ++ return (lnksta & PCI_EXP_LNKSTA_DLLLA) || (sltsta & PCI_EXP_SLTSTA_PDS); ++} ++ ++ ++static int shps_dgpu_rp_get_power_unlocked(struct platform_device *pdev) ++{ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ struct pci_dev *rp = drvdata->dgpu_root_port; ++ ++ if (rp->current_state == PCI_D3hot || rp->current_state == PCI_D3cold) ++ return SHPS_DGPU_POWER_OFF; ++ else if (rp->current_state == PCI_UNKNOWN || rp->current_state == PCI_POWER_ERROR) ++ return SHPS_DGPU_POWER_UNKNOWN; ++ else ++ return SHPS_DGPU_POWER_ON; ++} ++ ++static int shps_dgpu_rp_get_power(struct platform_device *pdev) ++{ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ int status; ++ ++ mutex_lock(&drvdata->lock); ++ status = shps_dgpu_rp_get_power_unlocked(pdev); ++ mutex_unlock(&drvdata->lock); ++ ++ return status; ++} ++ ++static int __shps_dgpu_rp_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power) ++{ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ struct pci_dev *rp = drvdata->dgpu_root_port; ++ int status, i; ++ ++ dev_info(&pdev->dev, "setting dGPU power state to \'%s\'\n", shps_dgpu_power_str(power)); ++ ++ dbg_dump_drvsta(pdev, "__shps_dgpu_rp_set_power_unlocked.1"); ++ if (power == SHPS_DGPU_POWER_ON) { ++ set_bit(SHPS_STATE_BIT_RPPWRON_SYNC, &drvdata->state); ++ pci_set_power_state(rp, PCI_D0); ++ ++ if (drvdata->dgpu_root_port_state) ++ pci_load_and_free_saved_state(rp, &drvdata->dgpu_root_port_state); ++ ++ pci_restore_state(rp); ++ ++ if (!pci_is_enabled(rp)) ++ pci_enable_device(rp); ++ ++ pci_set_master(rp); ++ clear_bit(SHPS_STATE_BIT_RPPWRON_SYNC, &drvdata->state); ++ ++ set_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state); ++ } else { ++ if (!drvdata->dgpu_root_port_state) { ++ pci_save_state(rp); ++ drvdata->dgpu_root_port_state = pci_store_saved_state(rp); ++ } ++ ++ /* ++ * To properly update the hot-plug system we need to "remove" the dGPU ++ * before disabling it and sending it to D3cold. Following this, we ++ * need to wait for the link and slot status to actually change. ++ */ ++ status = shps_dgpu_dsm_set_power_unlocked(pdev, SHPS_DGPU_POWER_OFF); ++ if (status) ++ return status; ++ ++ for (i = 0; i < 20 && shps_rp_link_up(rp); i++) ++ msleep(50); ++ ++ if (shps_rp_link_up(rp)) ++ dev_err(&pdev->dev, "dGPU removal via DSM timed out\n"); ++ ++ pci_clear_master(rp); ++ ++ if (pci_is_enabled(rp)) ++ pci_disable_device(rp); ++ ++ pci_set_power_state(rp, PCI_D3cold); ++ ++ clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state); ++ } ++ dbg_dump_drvsta(pdev, "__shps_dgpu_rp_set_power_unlocked.2"); ++ ++ return 0; ++} ++ ++static int shps_dgpu_rp_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power) ++{ ++ int status; ++ ++ if (power != SHPS_DGPU_POWER_ON && power != SHPS_DGPU_POWER_OFF) ++ return -EINVAL; ++ ++ status = shps_dgpu_rp_get_power_unlocked(pdev); ++ if (status < 0) ++ return status; ++ if (status == power) ++ return 0; ++ ++ return __shps_dgpu_rp_set_power_unlocked(pdev, power); ++} ++ ++static int shps_dgpu_rp_set_power(struct platform_device *pdev, enum shps_dgpu_power power) ++{ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ int status; ++ ++ mutex_lock(&drvdata->lock); ++ status = shps_dgpu_rp_set_power_unlocked(pdev, power); ++ mutex_unlock(&drvdata->lock); ++ ++ return status; ++} ++ ++ ++static int shps_dgpu_set_power(struct platform_device *pdev, enum shps_dgpu_power power) ++{ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ int status; ++ ++ if (!param_dtx_latch) ++ return shps_dgpu_rp_set_power(pdev, power); ++ ++ if (power == SHPS_DGPU_POWER_ON) { ++ status = ssam_bas_latch_lock(drvdata->ctrl); ++ if (status) ++ return status; ++ ++ status = shps_dgpu_rp_set_power(pdev, power); ++ if (status) ++ ssam_bas_latch_unlock(drvdata->ctrl); ++ ++ } else { ++ status = shps_dgpu_rp_set_power(pdev, power); ++ if (status) ++ return status; ++ ++ status = ssam_bas_latch_unlock(drvdata->ctrl); ++ } ++ ++ return status; ++} ++ ++ ++static int shps_dgpu_is_present(struct platform_device *pdev) ++{ ++ struct shps_driver_data *drvdata; ++ ++ drvdata = platform_get_drvdata(pdev); ++ return gpiod_get_value_cansleep(drvdata->gpio_dgpu_presence); ++} ++ ++ ++static ssize_t dgpu_power_show(struct device *dev, struct device_attribute *attr, char *data) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ int power = shps_dgpu_rp_get_power(pdev); ++ ++ if (power < 0) ++ return power; ++ ++ return sprintf(data, "%s\n", shps_dgpu_power_str(power)); ++} ++ ++static ssize_t dgpu_power_store(struct device *dev, struct device_attribute *attr, ++ const char *data, size_t count) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ enum shps_dgpu_power power; ++ bool b = false; ++ int status; ++ ++ status = kstrtobool(data, &b); ++ if (status) ++ return status; ++ ++ status = shps_dgpu_is_present(pdev); ++ if (status <= 0) ++ return status < 0 ? status : -EPERM; ++ ++ power = b ? SHPS_DGPU_POWER_ON : SHPS_DGPU_POWER_OFF; ++ status = shps_dgpu_set_power(pdev, power); ++ ++ return status < 0 ? status : count; ++} ++ ++static ssize_t dgpu_power_dsm_show(struct device *dev, struct device_attribute *attr, char *data) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ int power = shps_dgpu_dsm_get_power(pdev); ++ ++ if (power < 0) ++ return power; ++ ++ return sprintf(data, "%s\n", shps_dgpu_power_str(power)); ++} ++ ++static ssize_t dgpu_power_dsm_store(struct device *dev, struct device_attribute *attr, ++ const char *data, size_t count) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ enum shps_dgpu_power power; ++ bool b = false; ++ int status; ++ ++ status = kstrtobool(data, &b); ++ if (status) ++ return status; ++ ++ status = shps_dgpu_is_present(pdev); ++ if (status <= 0) ++ return status < 0 ? status : -EPERM; ++ ++ power = b ? SHPS_DGPU_POWER_ON : SHPS_DGPU_POWER_OFF; ++ status = shps_dgpu_dsm_set_power(pdev, power); ++ ++ return status < 0 ? status : count; ++} ++ ++static DEVICE_ATTR_RW(dgpu_power); ++static DEVICE_ATTR_RW(dgpu_power_dsm); ++ ++static struct attribute *shps_power_attrs[] = { ++ &dev_attr_dgpu_power.attr, ++ &dev_attr_dgpu_power_dsm.attr, ++ NULL, ++}; ++ATTRIBUTE_GROUPS(shps_power); ++ ++ ++static void dbg_dump_power_states(struct platform_device *pdev, const char *prefix) ++{ ++ enum shps_dgpu_power power_dsm; ++ enum shps_dgpu_power power_rp; ++ int status; ++ ++ status = shps_dgpu_rp_get_power_unlocked(pdev); ++ if (status < 0) ++ dev_err(&pdev->dev, "%s: failed to get root-port power state: %d\n", prefix, status); ++ power_rp = status; ++ ++ status = shps_dgpu_rp_get_power_unlocked(pdev); ++ if (status < 0) ++ dev_err(&pdev->dev, "%s: failed to get direct power state: %d\n", prefix, status); ++ power_dsm = status; ++ ++ dev_dbg(&pdev->dev, "%s: root-port power state: %d\n", prefix, power_rp); ++ dev_dbg(&pdev->dev, "%s: direct power state: %d\n", prefix, power_dsm); ++} ++ ++static void dbg_dump_pciesta(struct platform_device *pdev, const char *prefix) ++{ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ struct pci_dev *rp = drvdata->dgpu_root_port; ++ u16 lnksta, lnksta2, sltsta, sltsta2; ++ ++ pcie_capability_read_word(rp, PCI_EXP_LNKSTA, &lnksta); ++ pcie_capability_read_word(rp, PCI_EXP_LNKSTA2, &lnksta2); ++ pcie_capability_read_word(rp, PCI_EXP_SLTSTA, &sltsta); ++ pcie_capability_read_word(rp, PCI_EXP_SLTSTA2, &sltsta2); ++ ++ dev_dbg(&pdev->dev, "%s: LNKSTA: 0x%04x\n", prefix, lnksta); ++ dev_dbg(&pdev->dev, "%s: LNKSTA2: 0x%04x\n", prefix, lnksta2); ++ dev_dbg(&pdev->dev, "%s: SLTSTA: 0x%04x\n", prefix, sltsta); ++ dev_dbg(&pdev->dev, "%s: SLTSTA2: 0x%04x\n", prefix, sltsta2); ++} ++ ++static void dbg_dump_drvsta(struct platform_device *pdev, const char *prefix) ++{ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ struct pci_dev *rp = drvdata->dgpu_root_port; ++ ++ dev_dbg(&pdev->dev, "%s: RP power: %d\n", prefix, rp->current_state); ++ dev_dbg(&pdev->dev, "%s: RP state saved: %d\n", prefix, rp->state_saved); ++ dev_dbg(&pdev->dev, "%s: RP state stored: %d\n", prefix, !!drvdata->dgpu_root_port_state); ++ dev_dbg(&pdev->dev, "%s: RP enabled: %d\n", prefix, atomic_read(&rp->enable_cnt)); ++ dev_dbg(&pdev->dev, "%s: RP mastered: %d\n", prefix, rp->is_busmaster); ++} ++ ++static int shps_pm_prepare(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ bool pwrtgt; ++ int status = 0; ++ ++ dbg_dump_power_states(pdev, "shps_pm_prepare"); ++ ++ if (param_dgpu_power_susp != SHPS_DGPU_MP_POWER_ASIS) { ++ pwrtgt = test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state); ++ ++ status = shps_dgpu_set_power(pdev, param_dgpu_power_susp); ++ if (status) { ++ dev_err(&pdev->dev, "failed to power %s dGPU: %d\n", ++ param_dgpu_power_susp == SHPS_DGPU_MP_POWER_OFF ? "off" : "on", ++ status); ++ return status; ++ } ++ ++ if (pwrtgt) ++ set_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state); ++ else ++ clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state); ++ } ++ ++ return 0; ++} ++ ++static void shps_pm_complete(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ int status; ++ ++ dbg_dump_power_states(pdev, "shps_pm_complete"); ++ dbg_dump_pciesta(pdev, "shps_pm_complete"); ++ dbg_dump_drvsta(pdev, "shps_pm_complete.1"); ++ ++ // update power target, dGPU may have been detached while suspended ++ status = shps_dgpu_is_present(pdev); ++ if (status < 0) { ++ dev_err(&pdev->dev, "failed to get dGPU presence: %d\n", status); ++ return; ++ } else if (status == 0) { ++ clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state); ++ } ++ ++ /* ++ * During resume, the PCIe core will power on the root-port, which in turn ++ * will power on the dGPU. Most of the state synchronization is already ++ * handled via the SAN RQSG handler, so it is in a fully consistent ++ * on-state here. If requested, turn it off here. ++ * ++ * As there seem to be some synchronization issues turning off the dGPU ++ * directly after the power-on SAN RQSG notification during the resume ++ * process, let's do this here. ++ * ++ * TODO/FIXME: ++ * This does not combat unhandled power-ons when the device is not fully ++ * resumed, i.e. re-suspended before shps_pm_complete is called. Those ++ * should normally not be an issue, but the dGPU does get hot even though ++ * it is suspended, so ideally we want to keep it off. ++ */ ++ if (!test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state)) { ++ status = shps_dgpu_set_power(pdev, SHPS_DGPU_POWER_OFF); ++ if (status) ++ dev_err(&pdev->dev, "failed to power-off dGPU: %d\n", status); ++ } ++ ++ dbg_dump_drvsta(pdev, "shps_pm_complete.2"); ++} ++ ++static int shps_pm_suspend(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ int status; ++ ++ if (device_may_wakeup(dev)) { ++ status = enable_irq_wake(drvdata->irq_base_presence); ++ if (status) ++ return status; ++ ++ set_bit(SHPS_STATE_BIT_WAKE_ENABLED, &drvdata->state); ++ } ++ ++ return 0; ++} ++ ++static int shps_pm_resume(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ int status = 0; ++ ++ if (test_and_clear_bit(SHPS_STATE_BIT_WAKE_ENABLED, &drvdata->state)) ++ status = disable_irq_wake(drvdata->irq_base_presence); ++ ++ return status; ++} ++ ++static void shps_shutdown(struct platform_device *pdev) ++{ ++ int status; ++ ++ /* ++ * Turn on dGPU before shutting down. This allows the core drivers to ++ * properly shut down the device. If we don't do this, the pcieport driver ++ * will complain that the device has already been disabled. ++ */ ++ status = shps_dgpu_set_power(pdev, SHPS_DGPU_POWER_ON); ++ if (status) ++ dev_err(&pdev->dev, "failed to turn on dGPU: %d\n", status); ++} ++ ++static int shps_dgpu_detached(struct platform_device *pdev) ++{ ++ dbg_dump_power_states(pdev, "shps_dgpu_detached"); ++ return shps_dgpu_set_power(pdev, SHPS_DGPU_POWER_OFF); ++} ++ ++static int shps_dgpu_attached(struct platform_device *pdev) ++{ ++ dbg_dump_power_states(pdev, "shps_dgpu_attached"); ++ return 0; ++} ++ ++static int shps_dgpu_powered_on(struct platform_device *pdev) ++{ ++ /* ++ * This function gets called directly after a power-state transition of ++ * the dGPU root port out of D3cold state, indicating a power-on of the ++ * dGPU. Specifically, this function is called from the RQSG handler of ++ * SAN, invoked by the ACPI _ON method of the dGPU root port. This means ++ * that this function is run inside `pci_set_power_state(rp, ...)` ++ * syncrhonously and thus returns before the `pci_set_power_state` call ++ * does. ++ * ++ * `pci_set_power_state` may either be called by us or when the PCI ++ * subsystem decides to power up the root port (e.g. during resume). Thus ++ * we should use this function to ensure that the dGPU and root port ++ * states are consistent when an unexpected power-up is encountered. ++ */ ++ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ struct pci_dev *rp = drvdata->dgpu_root_port; ++ int status; ++ ++ dbg_dump_drvsta(pdev, "shps_dgpu_powered_on.1"); ++ ++ // if we caused the root port to power-on, return ++ if (test_bit(SHPS_STATE_BIT_RPPWRON_SYNC, &drvdata->state)) ++ return 0; ++ ++ // if dGPU is not present, force power-target to off and return ++ status = shps_dgpu_is_present(pdev); ++ if (status == 0) ++ clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state); ++ if (status <= 0) ++ return status; ++ ++ mutex_lock(&drvdata->lock); ++ ++ dbg_dump_power_states(pdev, "shps_dgpu_powered_on.1"); ++ dbg_dump_pciesta(pdev, "shps_dgpu_powered_on.1"); ++ if (drvdata->dgpu_root_port_state) ++ pci_load_and_free_saved_state(rp, &drvdata->dgpu_root_port_state); ++ pci_restore_state(rp); ++ if (!pci_is_enabled(rp)) ++ pci_enable_device(rp); ++ pci_set_master(rp); ++ dbg_dump_drvsta(pdev, "shps_dgpu_powered_on.2"); ++ dbg_dump_power_states(pdev, "shps_dgpu_powered_on.2"); ++ dbg_dump_pciesta(pdev, "shps_dgpu_powered_on.2"); ++ ++ mutex_unlock(&drvdata->lock); ++ ++ if (!test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state)) { ++ dev_warn(&pdev->dev, "unexpected dGPU power-on detected\n"); ++ // TODO: schedule state re-check and update ++ } ++ ++ return 0; ++} ++ ++static int shps_dgpu_handle_rqsg(struct surface_sam_san_rqsg *rqsg, void *data) ++{ ++ struct platform_device *pdev = data; ++ ++ if (rqsg->tc == SAM_DGPU_TC && rqsg->cid == SAM_DGPU_CID_POWERON) ++ return shps_dgpu_powered_on(pdev); ++ ++ dev_warn(&pdev->dev, "unimplemented dGPU request: RQSG(0x%02x, 0x%02x, 0x%02x)\n", ++ rqsg->tc, rqsg->cid, rqsg->iid); ++ return 0; ++} ++ ++static irqreturn_t shps_dgpu_presence_irq(int irq, void *data) ++{ ++ struct platform_device *pdev = data; ++ bool dgpu_present; ++ int status; ++ ++ status = shps_dgpu_is_present(pdev); ++ if (status < 0) { ++ dev_err(&pdev->dev, "failed to check physical dGPU presence: %d\n", status); ++ return IRQ_HANDLED; ++ } ++ ++ dgpu_present = status != 0; ++ dev_info(&pdev->dev, "dGPU physically %s\n", dgpu_present ? "attached" : "detached"); ++ ++ if (dgpu_present) ++ status = shps_dgpu_attached(pdev); ++ else ++ status = shps_dgpu_detached(pdev); ++ ++ if (status) ++ dev_err(&pdev->dev, "error handling dGPU interrupt: %d\n", status); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t shps_base_presence_irq(int irq, void *data) ++{ ++ return IRQ_HANDLED; // nothing to do, just wake ++} ++ ++ ++static int shps_gpios_setup(struct platform_device *pdev) ++{ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ struct gpio_desc *gpio_dgpu_power; ++ struct gpio_desc *gpio_dgpu_presence; ++ struct gpio_desc *gpio_base_presence; ++ int status; ++ ++ // get GPIOs ++ gpio_dgpu_power = devm_gpiod_get(&pdev->dev, "dgpu_power", GPIOD_IN); ++ if (IS_ERR(gpio_dgpu_power)) { ++ status = PTR_ERR(gpio_dgpu_power); ++ goto err_out; ++ } ++ ++ gpio_dgpu_presence = devm_gpiod_get(&pdev->dev, "dgpu_presence", GPIOD_IN); ++ if (IS_ERR(gpio_dgpu_presence)) { ++ status = PTR_ERR(gpio_dgpu_presence); ++ goto err_out; ++ } ++ ++ gpio_base_presence = devm_gpiod_get(&pdev->dev, "base_presence", GPIOD_IN); ++ if (IS_ERR(gpio_base_presence)) { ++ status = PTR_ERR(gpio_base_presence); ++ goto err_out; ++ } ++ ++ // export GPIOs ++ status = gpiod_export(gpio_dgpu_power, false); ++ if (status) ++ goto err_out; ++ ++ status = gpiod_export(gpio_dgpu_presence, false); ++ if (status) ++ goto err_export_dgpu_presence; ++ ++ status = gpiod_export(gpio_base_presence, false); ++ if (status) ++ goto err_export_base_presence; ++ ++ // create sysfs links ++ status = gpiod_export_link(&pdev->dev, "gpio-dgpu_power", gpio_dgpu_power); ++ if (status) ++ goto err_link_dgpu_power; ++ ++ status = gpiod_export_link(&pdev->dev, "gpio-dgpu_presence", gpio_dgpu_presence); ++ if (status) ++ goto err_link_dgpu_presence; ++ ++ status = gpiod_export_link(&pdev->dev, "gpio-base_presence", gpio_base_presence); ++ if (status) ++ goto err_link_base_presence; ++ ++ drvdata->gpio_dgpu_power = gpio_dgpu_power; ++ drvdata->gpio_dgpu_presence = gpio_dgpu_presence; ++ drvdata->gpio_base_presence = gpio_base_presence; ++ return 0; ++ ++err_link_base_presence: ++ sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_presence"); ++err_link_dgpu_presence: ++ sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_power"); ++err_link_dgpu_power: ++ gpiod_unexport(gpio_base_presence); ++err_export_base_presence: ++ gpiod_unexport(gpio_dgpu_presence); ++err_export_dgpu_presence: ++ gpiod_unexport(gpio_dgpu_power); ++err_out: ++ return status; ++} ++ ++static void shps_gpios_remove(struct platform_device *pdev) ++{ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ ++ sysfs_remove_link(&pdev->dev.kobj, "gpio-base_presence"); ++ sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_presence"); ++ sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_power"); ++ gpiod_unexport(drvdata->gpio_base_presence); ++ gpiod_unexport(drvdata->gpio_dgpu_presence); ++ gpiod_unexport(drvdata->gpio_dgpu_power); ++} ++ ++static int shps_gpios_setup_irq(struct platform_device *pdev) ++{ ++ const int irqf_dgpu = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; ++ const int irqf_base = IRQF_SHARED; ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ int status; ++ ++ status = gpiod_to_irq(drvdata->gpio_base_presence); ++ if (status < 0) ++ return status; ++ drvdata->irq_base_presence = status; ++ ++ status = gpiod_to_irq(drvdata->gpio_dgpu_presence); ++ if (status < 0) ++ return status; ++ drvdata->irq_dgpu_presence = status; ++ ++ status = request_irq(drvdata->irq_base_presence, ++ shps_base_presence_irq, irqf_base, ++ "shps_base_presence_irq", pdev); ++ if (status) { ++ dev_err(&pdev->dev, "base irq failed: %d\n", status); ++ return status; ++ } ++ ++ status = request_threaded_irq(drvdata->irq_dgpu_presence, ++ NULL, shps_dgpu_presence_irq, irqf_dgpu, ++ "shps_dgpu_presence_irq", pdev); ++ if (status) { ++ free_irq(drvdata->irq_base_presence, pdev); ++ return status; ++ } ++ ++ return 0; ++} ++ ++static void shps_gpios_remove_irq(struct platform_device *pdev) ++{ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ ++ free_irq(drvdata->irq_base_presence, pdev); ++ free_irq(drvdata->irq_dgpu_presence, pdev); ++} ++ ++static void shps_sgcp_notify(acpi_handle device, u32 value, void *context) { ++ struct platform_device *pdev = context; ++ switch (value) { ++ case ACPI_SGCP_NOTIFY_POWER_ON: ++ shps_dgpu_powered_on(pdev); ++ } ++} ++ ++static int shps_start_sgcp_notification(struct platform_device *pdev, acpi_handle *sgpc_handle) { ++ acpi_handle handle; ++ int status; ++ ++ status = acpi_get_handle(NULL, "\\_SB.SGPC", &handle); ++ if (status) { ++ dev_err(&pdev->dev, "error in get_handle %d\n", status); ++ return status; ++ } ++ ++ status = acpi_install_notify_handler(handle, ACPI_DEVICE_NOTIFY, shps_sgcp_notify, pdev); ++ if (status) { ++ dev_err(&pdev->dev, "error in install notify %d\n", status); ++ *sgpc_handle = NULL; ++ return status; ++ } ++ ++ *sgpc_handle = handle; ++ return 0; ++} ++ ++static void shps_remove_sgcp_notification(struct platform_device *pdev) { ++ int status; ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ ++ if (drvdata->sgpc_handle) { ++ status = acpi_remove_notify_handler(drvdata->sgpc_handle, ACPI_DEVICE_NOTIFY, shps_sgcp_notify); ++ if (status) { ++ dev_err(&pdev->dev, "failed to remove notify handler: %d\n", status); ++ } ++ } ++} ++ ++static struct shps_hardware_traits shps_detect_hardware_traits(struct platform_device *pdev) { ++ const struct shps_hardware_probe *p; ++ ++ for (p = shps_hardware_probe_match; p->hardware_id; ++p) { ++ if (acpi_dev_present(p->hardware_id, NULL, -1)) { ++ break; ++ } ++ } ++ ++ dev_info(&pdev->dev, ++ "shps_detect_hardware_traits found device %s, generation %d\n", ++ p->hardware_id ? p->hardware_id : "SAN (default)", ++ p->generation); ++ ++ return *p->hardware_traits; ++} ++ ++static int shps_probe(struct platform_device *pdev) ++{ ++ struct acpi_device *shps_dev = ACPI_COMPANION(&pdev->dev); ++ struct shps_driver_data *drvdata; ++ struct ssam_controller *ctrl; ++ struct device_link *link; ++ int power, status; ++ struct shps_hardware_traits detected_traits; ++ ++ if (gpiod_count(&pdev->dev, NULL) < 0) { ++ dev_err(&pdev->dev, "gpiod_count returned < 0\n"); ++ return -ENODEV; ++ } ++ ++ // link to SSH ++ status = ssam_client_bind(&pdev->dev, &ctrl); ++ if (status) { ++ return status == -ENXIO ? -EPROBE_DEFER : status; ++ } ++ ++ // detect what kind of hardware we're running ++ detected_traits = shps_detect_hardware_traits(pdev); ++ ++ if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) { ++ // link to SAN ++ status = surface_sam_san_consumer_register(&pdev->dev, 0); ++ if (status) { ++ dev_err(&pdev->dev, "failed to register with san consumer: %d\n", status); ++ return status == -ENXIO ? -EPROBE_DEFER : status; ++ } ++ } ++ ++ status = acpi_dev_add_driver_gpios(shps_dev, shps_acpi_gpios); ++ if (status) { ++ dev_err(&pdev->dev, "failed to add gpios: %d\n", status); ++ return status; ++ } ++ ++ drvdata = kzalloc(sizeof(struct shps_driver_data), GFP_KERNEL); ++ if (!drvdata) { ++ status = -ENOMEM; ++ goto err_drvdata; ++ } ++ mutex_init(&drvdata->lock); ++ platform_set_drvdata(pdev, drvdata); ++ ++ drvdata->ctrl = ctrl; ++ drvdata->hardware_traits = detected_traits; ++ ++ drvdata->dgpu_root_port = shps_dgpu_dsm_get_pci_dev(pdev); ++ if (IS_ERR(drvdata->dgpu_root_port)) { ++ status = PTR_ERR(drvdata->dgpu_root_port); ++ dev_err(&pdev->dev, "failed to get pci dev: %d\n", status); ++ goto err_rp_lookup; ++ } ++ ++ status = shps_gpios_setup(pdev); ++ if (status) { ++ dev_err(&pdev->dev, "unable to set up gpios, %d\n", status); ++ goto err_gpio; ++ } ++ ++ status = shps_gpios_setup_irq(pdev); ++ if (status) { ++ dev_err(&pdev->dev, "unable to set up irqs %d\n", status); ++ goto err_gpio_irqs; ++ } ++ ++ status = device_add_groups(&pdev->dev, shps_power_groups); ++ if (status) ++ goto err_devattr; ++ ++ link = device_link_add(&pdev->dev, &drvdata->dgpu_root_port->dev, ++ DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER); ++ if (!link) ++ goto err_devlink; ++ ++ if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) { ++ status = surface_sam_san_set_rqsg_handler(shps_dgpu_handle_rqsg, pdev); ++ if (status) { ++ dev_err(&pdev->dev, "unable to set SAN notification handler (%d)\n", status); ++ goto err_devlink; ++ } ++ } else if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) { ++ status = shps_start_sgcp_notification(pdev, &drvdata->sgpc_handle); ++ if (status) { ++ dev_err(&pdev->dev, "unable to install SGCP notification handler (%d)\n", status); ++ goto err_devlink; ++ } ++ } ++ ++ // if dGPU is not present turn-off root-port, else obey module param ++ status = shps_dgpu_is_present(pdev); ++ if (status < 0) ++ goto err_post_notification; ++ ++ power = status == 0 ? SHPS_DGPU_POWER_OFF : param_dgpu_power_init; ++ if (power != SHPS_DGPU_MP_POWER_ASIS) { ++ status = shps_dgpu_set_power(pdev, power); ++ if (status) ++ goto err_post_notification; ++ } ++ ++ // initialize power target ++ status = shps_dgpu_rp_get_power(pdev); ++ if (status < 0) ++ goto err_pwrtgt; ++ ++ if (status) ++ set_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state); ++ else ++ clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state); ++ ++ device_init_wakeup(&pdev->dev, true); ++ return 0; ++ ++err_pwrtgt: ++ if (param_dgpu_power_exit != SHPS_DGPU_MP_POWER_ASIS) { ++ status = shps_dgpu_set_power(pdev, param_dgpu_power_exit); ++ if (status) ++ dev_err(&pdev->dev, "failed to set dGPU power state: %d\n", status); ++ } ++err_post_notification: ++ if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) { ++ shps_remove_sgcp_notification(pdev); ++ } else if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) { ++ surface_sam_san_set_rqsg_handler(NULL, NULL); ++ } ++err_devlink: ++ device_remove_groups(&pdev->dev, shps_power_groups); ++err_devattr: ++ shps_gpios_remove_irq(pdev); ++err_gpio_irqs: ++ shps_gpios_remove(pdev); ++err_gpio: ++ pci_dev_put(drvdata->dgpu_root_port); ++err_rp_lookup: ++ platform_set_drvdata(pdev, NULL); ++ kfree(drvdata); ++err_drvdata: ++ acpi_dev_remove_driver_gpios(shps_dev); ++ return status; ++} ++ ++static int shps_remove(struct platform_device *pdev) ++{ ++ struct acpi_device *shps_dev = ACPI_COMPANION(&pdev->dev); ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ int status; ++ ++ if (param_dgpu_power_exit != SHPS_DGPU_MP_POWER_ASIS) { ++ status = shps_dgpu_set_power(pdev, param_dgpu_power_exit); ++ if (status) ++ dev_err(&pdev->dev, "failed to set dGPU power state: %d\n", status); ++ } ++ ++ device_set_wakeup_capable(&pdev->dev, false); ++ ++ if (drvdata->hardware_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) { ++ shps_remove_sgcp_notification(pdev); ++ } else if (drvdata->hardware_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) { ++ surface_sam_san_set_rqsg_handler(NULL, NULL); ++ } ++ device_remove_groups(&pdev->dev, shps_power_groups); ++ shps_gpios_remove_irq(pdev); ++ shps_gpios_remove(pdev); ++ pci_dev_put(drvdata->dgpu_root_port); ++ platform_set_drvdata(pdev, NULL); ++ kfree(drvdata); ++ ++ acpi_dev_remove_driver_gpios(shps_dev); ++ return 0; ++} ++ ++ ++static const struct dev_pm_ops shps_pm_ops = { ++ .prepare = shps_pm_prepare, ++ .complete = shps_pm_complete, ++ .suspend = shps_pm_suspend, ++ .resume = shps_pm_resume, ++}; ++ ++static const struct acpi_device_id shps_acpi_match[] = { ++ { "MSHW0153", 0 }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(acpi, shps_acpi_match); ++ ++static struct platform_driver surface_sam_hps = { ++ .probe = shps_probe, ++ .remove = shps_remove, ++ .shutdown = shps_shutdown, ++ .driver = { ++ .name = "surface_dgpu_hps", ++ .acpi_match_table = shps_acpi_match, ++ .pm = &shps_pm_ops, ++ }, ++}; ++ ++module_platform_driver(surface_sam_hps); ++ ++MODULE_AUTHOR("Maximilian Luz "); ++MODULE_DESCRIPTION("Surface Hot-Plug System (HPS) and dGPU power-state Driver for Surface Book 2"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/platform/x86/surface_sam/surface_sam_san.c b/drivers/platform/x86/surface_sam/surface_sam_san.c +new file mode 100644 +index 0000000000000..eab4e178a8450 +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_san.c +@@ -0,0 +1,930 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Surface ACPI Notify (SAN) and ACPI integration driver for SAM. ++ * Translates communication from ACPI to SSH and back. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "surface_sam_ssh.h" ++#include "surface_sam_san.h" ++ ++ ++#define SAN_RQST_RETRY 5 ++ ++#define SAN_DSM_REVISION 0 ++#define SAN_DSM_FN_NOTIFY_SENSOR_TRIP_POINT 0x09 ++ ++static const guid_t SAN_DSM_UUID = ++ GUID_INIT(0x93b666c5, 0x70c6, 0x469f, 0xa2, 0x15, 0x3d, ++ 0x48, 0x7c, 0x91, 0xab, 0x3c); ++ ++#define SAM_EVENT_DELAY_PWR_ADAPTER msecs_to_jiffies(5000) ++#define SAM_EVENT_DELAY_PWR_BST msecs_to_jiffies(2500) ++ ++#define SAM_EVENT_PWR_CID_BIX 0x15 ++#define SAM_EVENT_PWR_CID_BST 0x16 ++#define SAM_EVENT_PWR_CID_ADAPTER 0x17 ++#define SAM_EVENT_PWR_CID_DPTF 0x4f ++ ++#define SAM_EVENT_TEMP_CID_NOTIFY_SENSOR_TRIP_POINT 0x0b ++ ++ ++struct san_acpi_consumer { ++ char *path; ++ bool required; ++ u32 flags; ++}; ++ ++struct san_handler_data { ++ struct acpi_connection_info info; // must be first ++}; ++ ++struct san_consumer_link { ++ const struct san_acpi_consumer *properties; ++ struct device_link *link; ++}; ++ ++struct san_consumers { ++ u32 num; ++ struct san_consumer_link *links; ++}; ++ ++struct san_data { ++ struct device *dev; ++ struct ssam_controller *ctrl; ++ ++ struct san_handler_data context; ++ struct san_consumers consumers; ++ ++ struct ssam_event_notifier nf_bat; ++ struct ssam_event_notifier nf_tmp; ++}; ++ ++#define to_san_data(ptr, member) \ ++ container_of(ptr, struct san_data, member) ++ ++struct san_event_work { ++ struct delayed_work work; ++ struct device *dev; ++ struct ssam_event event; // must be last ++}; ++ ++struct gsb_data_in { ++ u8 cv; ++} __packed; ++ ++struct gsb_data_rqsx { ++ u8 cv; // command value (should be 0x01 or 0x03) ++ u8 tc; // target controller ++ u8 tid; // transport channnel ID ++ u8 iid; // target sub-controller (e.g. primary vs. secondary battery) ++ u8 snc; // expect-response-flag ++ u8 cid; // command ID ++ u16 cdl; // payload length ++ u8 pld[0]; // payload ++} __packed; ++ ++struct gsb_data_etwl { ++ u8 cv; // command value (should be 0x02) ++ u8 etw3; // ? ++ u8 etw4; // ? ++ u8 msg[0]; // error message (ASCIIZ) ++} __packed; ++ ++struct gsb_data_out { ++ u8 status; // _SSH communication status ++ u8 len; // _SSH payload length ++ u8 pld[0]; // _SSH payload ++} __packed; ++ ++union gsb_buffer_data { ++ struct gsb_data_in in; // common input ++ struct gsb_data_rqsx rqsx; // RQSX input ++ struct gsb_data_etwl etwl; // ETWL input ++ struct gsb_data_out out; // output ++}; ++ ++struct gsb_buffer { ++ u8 status; // GSB AttribRawProcess status ++ u8 len; // GSB AttribRawProcess length ++ union gsb_buffer_data data; ++} __packed; ++ ++#define SAN_GSB_MAX_RQSX_PAYLOAD (U8_MAX - 2 - sizeof(struct gsb_data_rqsx)) ++#define SAN_GSB_MAX_RESPONSE (U8_MAX - 2 - sizeof(struct gsb_data_out)) ++ ++#define san_request_sync_onstack(ctrl, rqst, rsp) \ ++ ssam_request_sync_onstack(ctrl, rqst, rsp, SAN_GSB_MAX_RQSX_PAYLOAD) ++ ++ ++enum san_pwr_event { ++ SAN_PWR_EVENT_BAT1_STAT = 0x03, ++ SAN_PWR_EVENT_BAT1_INFO = 0x04, ++ SAN_PWR_EVENT_ADP1_STAT = 0x05, ++ SAN_PWR_EVENT_ADP1_INFO = 0x06, ++ SAN_PWR_EVENT_BAT2_STAT = 0x07, ++ SAN_PWR_EVENT_BAT2_INFO = 0x08, ++ SAN_PWR_EVENT_DPTF = 0x0A, ++}; ++ ++ ++static int sam_san_default_rqsg_handler(struct surface_sam_san_rqsg *rqsg, void *data); ++ ++struct sam_san_rqsg_if { ++ struct mutex lock; ++ struct device *san_dev; ++ surface_sam_san_rqsg_handler_fn handler; ++ void *handler_data; ++}; ++ ++static struct sam_san_rqsg_if rqsg_if = { ++ .lock = __MUTEX_INITIALIZER(rqsg_if.lock), ++ .san_dev = NULL, ++ .handler = sam_san_default_rqsg_handler, ++ .handler_data = NULL, ++}; ++ ++int surface_sam_san_consumer_register(struct device *consumer, u32 flags) ++{ ++ const u32 valid = DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE; ++ int status; ++ ++ if ((flags | valid) != valid) ++ return -EINVAL; ++ ++ flags |= DL_FLAG_AUTOREMOVE_CONSUMER; ++ ++ mutex_lock(&rqsg_if.lock); ++ if (rqsg_if.san_dev) ++ status = device_link_add(consumer, rqsg_if.san_dev, flags) ? 0 : -EINVAL; ++ else ++ status = -ENXIO; ++ mutex_unlock(&rqsg_if.lock); ++ return status; ++} ++EXPORT_SYMBOL_GPL(surface_sam_san_consumer_register); ++ ++int surface_sam_san_set_rqsg_handler(surface_sam_san_rqsg_handler_fn fn, void *data) ++{ ++ int status = -EBUSY; ++ ++ mutex_lock(&rqsg_if.lock); ++ ++ if (rqsg_if.handler == sam_san_default_rqsg_handler || !fn) { ++ rqsg_if.handler = fn ? fn : sam_san_default_rqsg_handler; ++ rqsg_if.handler_data = fn ? data : NULL; ++ status = 0; ++ } ++ ++ mutex_unlock(&rqsg_if.lock); ++ return status; ++} ++EXPORT_SYMBOL_GPL(surface_sam_san_set_rqsg_handler); ++ ++int san_call_rqsg_handler(struct surface_sam_san_rqsg *rqsg) ++{ ++ int status; ++ ++ mutex_lock(&rqsg_if.lock); ++ status = rqsg_if.handler(rqsg, rqsg_if.handler_data); ++ mutex_unlock(&rqsg_if.lock); ++ ++ return status; ++} ++ ++static int sam_san_default_rqsg_handler(struct surface_sam_san_rqsg *rqsg, void *data) ++{ ++ struct device *dev = rqsg_if.san_dev; ++ ++ dev_warn(dev, "unhandled request: RQSG(0x%02x, 0x%02x, 0x%02x)\n", ++ rqsg->tc, rqsg->cid, rqsg->iid); ++ ++ return 0; ++} ++ ++ ++static bool san_acpi_can_notify(struct device *dev, u64 func) ++{ ++ acpi_handle san = ACPI_HANDLE(dev); ++ return acpi_check_dsm(san, &SAN_DSM_UUID, SAN_DSM_REVISION, 1 << func); ++} ++ ++static int san_acpi_notify_power_event(struct device *dev, enum san_pwr_event event) ++{ ++ acpi_handle san = ACPI_HANDLE(dev); ++ union acpi_object *obj; ++ ++ if (!san_acpi_can_notify(dev, event)) ++ return 0; ++ ++ dev_dbg(dev, "notify power event 0x%02x\n", event); ++ obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION, ++ event, NULL, ACPI_TYPE_BUFFER); ++ ++ if (IS_ERR_OR_NULL(obj)) ++ return obj ? PTR_ERR(obj) : -ENXIO; ++ ++ if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) { ++ dev_err(dev, "got unexpected result from _DSM\n"); ++ return -EFAULT; ++ } ++ ++ ACPI_FREE(obj); ++ return 0; ++} ++ ++static int san_acpi_notify_sensor_trip_point(struct device *dev, u8 iid) ++{ ++ acpi_handle san = ACPI_HANDLE(dev); ++ union acpi_object *obj; ++ union acpi_object param; ++ ++ if (!san_acpi_can_notify(dev, SAN_DSM_FN_NOTIFY_SENSOR_TRIP_POINT)) ++ return 0; ++ ++ param.type = ACPI_TYPE_INTEGER; ++ param.integer.value = iid; ++ ++ obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION, ++ SAN_DSM_FN_NOTIFY_SENSOR_TRIP_POINT, ++ ¶m, ACPI_TYPE_BUFFER); ++ ++ if (IS_ERR_OR_NULL(obj)) ++ return obj ? PTR_ERR(obj) : -ENXIO; ++ ++ if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) { ++ dev_err(dev, "got unexpected result from _DSM\n"); ++ return -EFAULT; ++ } ++ ++ ACPI_FREE(obj); ++ return 0; ++} ++ ++ ++static inline int san_evt_power_adapter(struct device *dev, const struct ssam_event *event) ++{ ++ int status; ++ ++ status = san_acpi_notify_power_event(dev, SAN_PWR_EVENT_ADP1_STAT); ++ if (status) ++ return status; ++ ++ /* ++ * Enusre that the battery states get updated correctly. ++ * When the battery is fully charged and an adapter is plugged in, it ++ * sometimes is not updated correctly, instead showing it as charging. ++ * Explicitly trigger battery updates to fix this. ++ */ ++ ++ status = san_acpi_notify_power_event(dev, SAN_PWR_EVENT_BAT1_STAT); ++ if (status) ++ return status; ++ ++ return san_acpi_notify_power_event(dev, SAN_PWR_EVENT_BAT2_STAT); ++} ++ ++static inline int san_evt_power_bix(struct device *dev, const struct ssam_event *event) ++{ ++ enum san_pwr_event evcode; ++ ++ if (event->instance_id == 0x02) ++ evcode = SAN_PWR_EVENT_BAT2_INFO; ++ else ++ evcode = SAN_PWR_EVENT_BAT1_INFO; ++ ++ return san_acpi_notify_power_event(dev, evcode); ++} ++ ++static inline int san_evt_power_bst(struct device *dev, const struct ssam_event *event) ++{ ++ enum san_pwr_event evcode; ++ ++ if (event->instance_id == 0x02) ++ evcode = SAN_PWR_EVENT_BAT2_STAT; ++ else ++ evcode = SAN_PWR_EVENT_BAT1_STAT; ++ ++ return san_acpi_notify_power_event(dev, evcode); ++} ++ ++static inline int san_evt_power_dptf(struct device *dev, const struct ssam_event *event) ++{ ++ union acpi_object payload; ++ acpi_handle san = ACPI_HANDLE(dev); ++ union acpi_object *obj; ++ ++ if (!san_acpi_can_notify(dev, SAN_PWR_EVENT_DPTF)) ++ return 0; ++ ++ /* ++ * The Surface ACPI expects a buffer and not a package. It specifically ++ * checks for ObjectType (Arg3) == 0x03. This will cause a warning in ++ * acpica/nsarguments.c, but this can safely be ignored. ++ */ ++ payload.type = ACPI_TYPE_BUFFER; ++ payload.buffer.length = event->length; ++ payload.buffer.pointer = (u8 *)&event->data[0]; ++ ++ dev_dbg(dev, "notify power event 0x%02x\n", event->command_id); ++ obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION, ++ SAN_PWR_EVENT_DPTF, &payload, ++ ACPI_TYPE_BUFFER); ++ ++ if (IS_ERR_OR_NULL(obj)) ++ return obj ? PTR_ERR(obj) : -ENXIO; ++ ++ if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) { ++ dev_err(dev, "got unexpected result from _DSM\n"); ++ return -EFAULT; ++ } ++ ++ ACPI_FREE(obj); ++ return 0; ++} ++ ++static unsigned long san_evt_power_delay(u8 cid) ++{ ++ switch (cid) { ++ case SAM_EVENT_PWR_CID_ADAPTER: ++ /* ++ * Wait for battery state to update before signalling adapter change. ++ */ ++ return SAM_EVENT_DELAY_PWR_ADAPTER; ++ ++ case SAM_EVENT_PWR_CID_BST: ++ /* ++ * Ensure we do not miss anything important due to caching. ++ */ ++ return SAM_EVENT_DELAY_PWR_BST; ++ ++ case SAM_EVENT_PWR_CID_BIX: ++ case SAM_EVENT_PWR_CID_DPTF: ++ default: ++ return 0; ++ } ++} ++ ++static bool san_evt_power(const struct ssam_event *event, struct device *dev) ++{ ++ int status; ++ ++ switch (event->command_id) { ++ case SAM_EVENT_PWR_CID_BIX: ++ status = san_evt_power_bix(dev, event); ++ break; ++ ++ case SAM_EVENT_PWR_CID_BST: ++ status = san_evt_power_bst(dev, event); ++ break; ++ ++ case SAM_EVENT_PWR_CID_ADAPTER: ++ status = san_evt_power_adapter(dev, event); ++ break; ++ ++ case SAM_EVENT_PWR_CID_DPTF: ++ status = san_evt_power_dptf(dev, event); ++ break; ++ ++ default: ++ return false; ++ } ++ ++ if (status) ++ dev_err(dev, "error handling power event (cid = %x)\n", ++ event->command_id); ++ ++ return true; ++} ++ ++static void san_evt_power_workfn(struct work_struct *work) ++{ ++ struct san_event_work *ev = container_of(work, struct san_event_work, work.work); ++ ++ san_evt_power(&ev->event, ev->dev); ++ kfree(ev); ++} ++ ++ ++static u32 san_evt_power_nb(struct ssam_notifier_block *nb, const struct ssam_event *event) ++{ ++ struct san_data *d = to_san_data(nb, nf_bat.base); ++ struct san_event_work *work; ++ unsigned long delay = san_evt_power_delay(event->command_id); ++ ++ if (delay == 0) { ++ if (san_evt_power(event, d->dev)) ++ return SSAM_NOTIF_HANDLED; ++ else ++ return 0; ++ } ++ ++ work = kzalloc(sizeof(struct san_event_work) + event->length, GFP_KERNEL); ++ if (!work) ++ return ssam_notifier_from_errno(-ENOMEM); ++ ++ INIT_DELAYED_WORK(&work->work, san_evt_power_workfn); ++ work->dev = d->dev; ++ ++ memcpy(&work->event, event, sizeof(struct ssam_event) + event->length); ++ ++ schedule_delayed_work(&work->work, delay); ++ return SSAM_NOTIF_HANDLED; ++} ++ ++ ++static inline int san_evt_thermal_notify(struct device *dev, const struct ssam_event *event) ++{ ++ return san_acpi_notify_sensor_trip_point(dev, event->instance_id); ++} ++ ++static bool san_evt_thermal(const struct ssam_event *event, struct device *dev) ++{ ++ int status; ++ ++ switch (event->command_id) { ++ case SAM_EVENT_TEMP_CID_NOTIFY_SENSOR_TRIP_POINT: ++ status = san_evt_thermal_notify(dev, event); ++ break; ++ ++ default: ++ return false; ++ } ++ ++ if (status) { ++ dev_err(dev, "error handling thermal event (cid = %x)\n", ++ event->command_id); ++ } ++ ++ return true; ++} ++ ++static u32 san_evt_thermal_nb(struct ssam_notifier_block *nb, const struct ssam_event *event) ++{ ++ if (san_evt_thermal(event, to_san_data(nb, nf_tmp.base)->dev)) ++ return SSAM_NOTIF_HANDLED; ++ else ++ return 0; ++} ++ ++ ++static struct gsb_data_rqsx ++*san_validate_rqsx(struct device *dev, const char *type, struct gsb_buffer *buffer) ++{ ++ struct gsb_data_rqsx *rqsx = &buffer->data.rqsx; ++ ++ if (buffer->len < 8) { ++ dev_err(dev, "invalid %s package (len = %d)\n", ++ type, buffer->len); ++ return NULL; ++ } ++ ++ if (get_unaligned(&rqsx->cdl) != buffer->len - sizeof(struct gsb_data_rqsx)) { ++ dev_err(dev, "bogus %s package (len = %d, cdl = %d)\n", ++ type, buffer->len, get_unaligned(&rqsx->cdl)); ++ return NULL; ++ } ++ ++ if (get_unaligned(&rqsx->cdl) > SAN_GSB_MAX_RQSX_PAYLOAD) { ++ dev_err(dev, "payload for %s package too large (cdl = %d)\n", ++ type, get_unaligned(&rqsx->cdl)); ++ return NULL; ++ } ++ ++ if (rqsx->tid != 0x01) { ++ dev_warn(dev, "unsupported %s package (tid = 0x%02x)\n", ++ type, rqsx->tid); ++ return NULL; ++ } ++ ++ return rqsx; ++} ++ ++static acpi_status san_etwl(struct san_data *d, struct gsb_buffer *buffer) ++{ ++ struct gsb_data_etwl *etwl = &buffer->data.etwl; ++ ++ if (buffer->len < 3) { ++ dev_err(d->dev, "invalid ETWL package (len = %d)\n", buffer->len); ++ return AE_OK; ++ } ++ ++ dev_err(d->dev, "ETWL(0x%02x, 0x%02x): %.*s\n", ++ etwl->etw3, etwl->etw4, ++ buffer->len - 3, (char *)etwl->msg); ++ ++ // indicate success ++ buffer->status = 0x00; ++ buffer->len = 0x00; ++ ++ return AE_OK; ++} ++ ++static void gsb_response_error(struct gsb_buffer *gsb, int status) ++{ ++ gsb->status = 0x00; ++ gsb->len = 0x02; ++ gsb->data.out.status = (u8)(-status); ++ gsb->data.out.len = 0x00; ++} ++ ++static void gsb_response_success(struct gsb_buffer *gsb, u8 *ptr, size_t len) ++{ ++ gsb->status = 0x00; ++ gsb->len = len + 2; ++ gsb->data.out.status = 0x00; ++ gsb->data.out.len = len; ++ ++ if (len) ++ memcpy(&gsb->data.out.pld[0], ptr, len); ++} ++ ++static acpi_status san_rqst_fixup_suspended(struct ssam_request *rqst, ++ struct gsb_buffer *gsb) ++{ ++ if (rqst->target_category == 0x11 && rqst->command_id == 0x0D) { ++ /* Base state quirk: ++ * The base state may be queried from ACPI when the EC is still ++ * suspended. In this case it will return '-EPERM'. This query ++ * will only be triggered from the ACPI lid GPE interrupt, thus ++ * we are either in laptop or studio mode (base status 0x01 or ++ * 0x02). Furthermore, we will only get here if the device (and ++ * EC) have been suspended. ++ * ++ * We now assume that the device is in laptop mode (0x01). This ++ * has the drawback that it will wake the device when unfolding ++ * it in studio mode, but it also allows us to avoid actively ++ * waiting for the EC to wake up, which may incur a notable ++ * delay. ++ */ ++ ++ u8 base_state = 1; ++ gsb_response_success(gsb, &base_state, 1); ++ return AE_OK; ++ } ++ ++ gsb_response_error(gsb, -ENXIO); ++ return AE_OK; ++} ++ ++static acpi_status san_rqst(struct san_data *d, struct gsb_buffer *buffer) ++{ ++ u8 rspbuf[SAN_GSB_MAX_RESPONSE]; ++ struct gsb_data_rqsx *gsb_rqst; ++ struct ssam_request rqst; ++ struct ssam_response rsp; ++ int status = 0; ++ int try; ++ ++ gsb_rqst = san_validate_rqsx(d->dev, "RQST", buffer); ++ if (!gsb_rqst) ++ return AE_OK; ++ ++ rqst.target_category = gsb_rqst->tc; ++ rqst.command_id = gsb_rqst->cid; ++ rqst.instance_id = gsb_rqst->iid; ++ rqst.channel = gsb_rqst->tid; ++ rqst.flags = gsb_rqst->snc ? SSAM_REQUEST_HAS_RESPONSE : 0; ++ rqst.length = get_unaligned(&gsb_rqst->cdl); ++ rqst.payload = &gsb_rqst->pld[0]; ++ ++ rsp.capacity = ARRAY_SIZE(rspbuf); ++ rsp.length = 0; ++ rsp.pointer = &rspbuf[0]; ++ ++ // handle suspended device ++ if (d->dev->power.is_suspended) { ++ dev_warn(d->dev, "rqst: device is suspended, not executing\n"); ++ return san_rqst_fixup_suspended(&rqst, buffer); ++ } ++ ++ for (try = 0; try < SAN_RQST_RETRY; try++) { ++ if (try) ++ dev_warn(d->dev, "rqst: IO error, trying again\n"); ++ ++ status = san_request_sync_onstack(d->ctrl, &rqst, &rsp); ++ if (status != -ETIMEDOUT && status != -EREMOTEIO) ++ break; ++ } ++ ++ if (!status) { ++ gsb_response_success(buffer, rsp.pointer, rsp.length); ++ } else { ++ dev_err(d->dev, "rqst: failed with error %d\n", status); ++ gsb_response_error(buffer, status); ++ } ++ ++ return AE_OK; ++} ++ ++static acpi_status san_rqsg(struct san_data *d, struct gsb_buffer *buffer) ++{ ++ struct gsb_data_rqsx *gsb_rqsg; ++ struct surface_sam_san_rqsg rqsg; ++ int status; ++ ++ gsb_rqsg = san_validate_rqsx(d->dev, "RQSG", buffer); ++ if (!gsb_rqsg) ++ return AE_OK; ++ ++ rqsg.tc = gsb_rqsg->tc; ++ rqsg.cid = gsb_rqsg->cid; ++ rqsg.iid = gsb_rqsg->iid; ++ rqsg.cdl = get_unaligned(&gsb_rqsg->cdl); ++ rqsg.pld = &gsb_rqsg->pld[0]; ++ ++ status = san_call_rqsg_handler(&rqsg); ++ if (!status) { ++ gsb_response_success(buffer, NULL, 0); ++ } else { ++ dev_err(d->dev, "rqsg: failed with error %d\n", status); ++ gsb_response_error(buffer, status); ++ } ++ ++ return AE_OK; ++} ++ ++ ++static acpi_status ++san_opreg_handler(u32 function, acpi_physical_address command, ++ u32 bits, u64 *value64, ++ void *opreg_context, void *region_context) ++{ ++ struct san_data *d = to_san_data(opreg_context, context); ++ struct gsb_buffer *buffer = (struct gsb_buffer *)value64; ++ int accessor_type = (0xFFFF0000 & function) >> 16; ++ ++ if (command != 0) { ++ dev_warn(d->dev, "unsupported command: 0x%02llx\n", command); ++ return AE_OK; ++ } ++ ++ if (accessor_type != ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS) { ++ dev_err(d->dev, "invalid access type: 0x%02x\n", accessor_type); ++ return AE_OK; ++ } ++ ++ // buffer must have at least contain the command-value ++ if (buffer->len == 0) { ++ dev_err(d->dev, "request-package too small\n"); ++ return AE_OK; ++ } ++ ++ switch (buffer->data.in.cv) { ++ case 0x01: return san_rqst(d, buffer); ++ case 0x02: return san_etwl(d, buffer); ++ case 0x03: return san_rqsg(d, buffer); ++ } ++ ++ dev_warn(d->dev, "unsupported SAN0 request (cv: 0x%02x)\n", buffer->data.in.cv); ++ return AE_OK; ++} ++ ++static int san_events_register(struct platform_device *pdev) ++{ ++ struct san_data *d = platform_get_drvdata(pdev); ++ int status; ++ ++ d->nf_bat.base.priority = 1; ++ d->nf_bat.base.fn = san_evt_power_nb; ++ d->nf_bat.event.reg = SSAM_EVENT_REGISTRY_SAM; ++ d->nf_bat.event.id.target_category = SSAM_SSH_TC_BAT; ++ d->nf_bat.event.id.instance = 0; ++ d->nf_bat.event.flags = SSAM_EVENT_SEQUENCED; ++ ++ d->nf_tmp.base.priority = 1; ++ d->nf_tmp.base.fn = san_evt_thermal_nb; ++ d->nf_tmp.event.reg = SSAM_EVENT_REGISTRY_SAM; ++ d->nf_tmp.event.id.target_category = SSAM_SSH_TC_TMP; ++ d->nf_tmp.event.id.instance = 0; ++ d->nf_tmp.event.flags = SSAM_EVENT_SEQUENCED; ++ ++ status = ssam_notifier_register(d->ctrl, &d->nf_bat); ++ if (status) ++ return status; ++ ++ status = ssam_notifier_register(d->ctrl, &d->nf_tmp); ++ if (status) ++ ssam_notifier_unregister(d->ctrl, &d->nf_bat); ++ ++ return status; ++} ++ ++static void san_events_unregister(struct platform_device *pdev) ++{ ++ struct san_data *d = platform_get_drvdata(pdev); ++ ++ ssam_notifier_unregister(d->ctrl, &d->nf_bat); ++ ssam_notifier_unregister(d->ctrl, &d->nf_tmp); ++} ++ ++ ++static int san_consumers_link(struct platform_device *pdev, ++ const struct san_acpi_consumer *cons, ++ struct san_consumers *out) ++{ ++ const struct san_acpi_consumer *con; ++ struct san_consumer_link *links, *link; ++ struct acpi_device *adev; ++ acpi_handle handle; ++ u32 max_links = 0; ++ int status; ++ ++ if (!cons) ++ return 0; ++ ++ // count links ++ for (con = cons; con->path; ++con) ++ max_links += 1; ++ ++ // allocate ++ links = kcalloc(max_links, sizeof(struct san_consumer_link), GFP_KERNEL); ++ link = &links[0]; ++ ++ if (!links) ++ return -ENOMEM; ++ ++ // create links ++ for (con = cons; con->path; ++con) { ++ status = acpi_get_handle(NULL, con->path, &handle); ++ if (status) { ++ if (con->required || status != AE_NOT_FOUND) { ++ status = -ENXIO; ++ goto cleanup; ++ } else { ++ continue; ++ } ++ } ++ ++ status = acpi_bus_get_device(handle, &adev); ++ if (status) ++ goto cleanup; ++ ++ link->link = device_link_add(&adev->dev, &pdev->dev, con->flags); ++ if (!(link->link)) { ++ status = -EFAULT; ++ goto cleanup; ++ } ++ link->properties = con; ++ ++ link += 1; ++ } ++ ++ out->num = link - links; ++ out->links = links; ++ ++ return 0; ++ ++cleanup: ++ for (link = link - 1; link >= links; --link) { ++ if (link->properties->flags & DL_FLAG_STATELESS) ++ device_link_del(link->link); ++ } ++ ++ return status; ++} ++ ++static void san_consumers_unlink(struct san_consumers *consumers) ++{ ++ u32 i; ++ ++ if (!consumers) ++ return; ++ ++ for (i = 0; i < consumers->num; ++i) { ++ if (consumers->links[i].properties->flags & DL_FLAG_STATELESS) ++ device_link_del(consumers->links[i].link); ++ } ++ ++ kfree(consumers->links); ++ ++ consumers->num = 0; ++ consumers->links = NULL; ++} ++ ++static int surface_sam_san_probe(struct platform_device *pdev) ++{ ++ const struct san_acpi_consumer *cons; ++ acpi_handle san = ACPI_HANDLE(&pdev->dev); // _SAN device node ++ struct ssam_controller *ctrl; ++ struct san_data *data; ++ int status; ++ ++ status = ssam_client_bind(&pdev->dev, &ctrl); ++ if (status) ++ return status == -ENXIO ? -EPROBE_DEFER : status; ++ ++ data = kzalloc(sizeof(struct san_data), GFP_KERNEL); ++ if (!data) ++ return -ENOMEM; ++ ++ data->dev = &pdev->dev; ++ data->ctrl = ctrl; ++ ++ cons = acpi_device_get_match_data(&pdev->dev); ++ status = san_consumers_link(pdev, cons, &data->consumers); ++ if (status) ++ goto err_consumers; ++ ++ platform_set_drvdata(pdev, data); ++ ++ status = acpi_install_address_space_handler(san, ++ ACPI_ADR_SPACE_GSBUS, ++ &san_opreg_handler, ++ NULL, &data->context); ++ ++ if (ACPI_FAILURE(status)) { ++ status = -ENODEV; ++ goto err_install_handler; ++ } ++ ++ status = san_events_register(pdev); ++ if (status) ++ goto err_enable_events; ++ ++ mutex_lock(&rqsg_if.lock); ++ if (!rqsg_if.san_dev) ++ rqsg_if.san_dev = &pdev->dev; ++ else ++ status = -EBUSY; ++ mutex_unlock(&rqsg_if.lock); ++ ++ if (status) ++ goto err_install_dev; ++ ++ acpi_walk_dep_device_list(san); ++ return 0; ++ ++err_install_dev: ++ san_events_unregister(pdev); ++err_enable_events: ++ acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS, &san_opreg_handler); ++err_install_handler: ++ platform_set_drvdata(san, NULL); ++ san_consumers_unlink(&data->consumers); ++err_consumers: ++ kfree(data); ++ return status; ++} ++ ++static int surface_sam_san_remove(struct platform_device *pdev) ++{ ++ struct san_data *data = platform_get_drvdata(pdev); ++ acpi_handle san = ACPI_HANDLE(&pdev->dev); // _SAN device node ++ acpi_status status = AE_OK; ++ ++ mutex_lock(&rqsg_if.lock); ++ rqsg_if.san_dev = NULL; ++ mutex_unlock(&rqsg_if.lock); ++ ++ acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS, &san_opreg_handler); ++ san_events_unregister(pdev); ++ ++ /* ++ * We have unregistered our event sources. Now we need to ensure that ++ * all delayed works they may have spawned are run to completion. ++ */ ++ flush_scheduled_work(); ++ ++ san_consumers_unlink(&data->consumers); ++ kfree(data); ++ ++ platform_set_drvdata(pdev, NULL); ++ return status; ++} ++ ++ ++static const struct san_acpi_consumer san_mshw0091_consumers[] = { ++ { "\\_SB.SRTC", true, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS }, ++ { "\\ADP1", true, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS }, ++ { "\\_SB.BAT1", true, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS }, ++ { "\\_SB.BAT2", false, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS }, ++ { }, ++}; ++ ++static const struct acpi_device_id surface_sam_san_match[] = { ++ { "MSHW0091", (unsigned long) san_mshw0091_consumers }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(acpi, surface_sam_san_match); ++ ++static struct platform_driver surface_sam_san = { ++ .probe = surface_sam_san_probe, ++ .remove = surface_sam_san_remove, ++ .driver = { ++ .name = "surface_sam_san", ++ .acpi_match_table = surface_sam_san_match, ++ .probe_type = PROBE_PREFER_ASYNCHRONOUS, ++ }, ++}; ++module_platform_driver(surface_sam_san); ++ ++MODULE_AUTHOR("Maximilian Luz "); ++MODULE_DESCRIPTION("Surface ACPI Notify Driver for 5th Generation Surface Devices"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/platform/x86/surface_sam/surface_sam_san.h b/drivers/platform/x86/surface_sam/surface_sam_san.h +new file mode 100644 +index 0000000000000..3408dde964b3c +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_san.h +@@ -0,0 +1,30 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Interface for Surface ACPI/Notify (SAN). ++ * ++ * The SAN is the main interface between the Surface Serial Hub (SSH) and the ++ * Surface/System Aggregator Module (SAM). It allows requests to be translated ++ * from ACPI to SSH/SAM. It also interfaces with the discrete GPU hot-plug ++ * driver. ++ */ ++ ++#ifndef _SURFACE_SAM_SAN_H ++#define _SURFACE_SAM_SAN_H ++ ++#include ++ ++ ++struct surface_sam_san_rqsg { ++ u8 tc; // target category ++ u8 cid; // command ID ++ u8 iid; // instance ID ++ u16 cdl; // command data length (length of payload) ++ u8 *pld; // pointer to payload of length cdl ++}; ++ ++typedef int (*surface_sam_san_rqsg_handler_fn)(struct surface_sam_san_rqsg *rqsg, void *data); ++ ++int surface_sam_san_consumer_register(struct device *consumer, u32 flags); ++int surface_sam_san_set_rqsg_handler(surface_sam_san_rqsg_handler_fn fn, void *data); ++ ++#endif /* _SURFACE_SAM_SAN_H */ +diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid.c b/drivers/platform/x86/surface_sam/surface_sam_sid.c +new file mode 100644 +index 0000000000000..bcf9a569ee719 +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_sid.c +@@ -0,0 +1,283 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Surface Integration Driver. ++ * MFD driver to provide device/model dependent functionality. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "surface_sam_sid_power.h" ++#include "surface_sam_sid_vhf.h" ++ ++ ++static const struct ssam_battery_properties ssam_battery_props_bat1 = { ++ .registry = SSAM_EVENT_REGISTRY_SAM, ++ .num = 0, ++ .channel = 1, ++ .instance = 1, ++}; ++ ++static const struct ssam_battery_properties ssam_battery_props_bat2_sb3 = { ++ .registry = SSAM_EVENT_REGISTRY_KIP, ++ .num = 1, ++ .channel = 2, ++ .instance = 1, ++}; ++ ++ ++static const struct ssam_hid_properties ssam_hid_props_keyboard = { ++ .registry = SSAM_EVENT_REGISTRY_REG, ++ .instance = 1, ++}; ++ ++static const struct ssam_hid_properties ssam_hid_props_touchpad = { ++ .registry = SSAM_EVENT_REGISTRY_REG, ++ .instance = 3, ++}; ++ ++static const struct ssam_hid_properties ssam_hid_props_iid5 = { ++ .registry = SSAM_EVENT_REGISTRY_REG, ++ .instance = 5, ++}; ++ ++static const struct ssam_hid_properties ssam_hid_props_iid6 = { ++ .registry = SSAM_EVENT_REGISTRY_REG, ++ .instance = 6, ++}; ++ ++ ++static const struct mfd_cell sid_devs_sp4[] = { ++ { .name = "surface_sam_sid_gpelid", .id = -1 }, ++ { .name = "surface_sam_sid_perfmode", .id = -1 }, ++ { }, ++}; ++ ++static const struct mfd_cell sid_devs_sp6[] = { ++ { .name = "surface_sam_sid_gpelid", .id = -1 }, ++ { .name = "surface_sam_sid_perfmode", .id = -1 }, ++ { }, ++}; ++ ++static const struct mfd_cell sid_devs_sp7[] = { ++ { .name = "surface_sam_sid_gpelid", .id = -1 }, ++ { .name = "surface_sam_sid_perfmode", .id = -1 }, ++ { .name = "surface_sam_sid_ac", .id = -1 }, ++ { ++ .name = "surface_sam_sid_battery", ++ .id = -1, ++ .platform_data = (void *)&ssam_battery_props_bat1, ++ .pdata_size = sizeof(struct ssam_battery_properties), ++ }, ++ { }, ++}; ++ ++static const struct mfd_cell sid_devs_sb1[] = { ++ { .name = "surface_sam_sid_gpelid", .id = -1 }, ++ { }, ++}; ++ ++static const struct mfd_cell sid_devs_sb2[] = { ++ { .name = "surface_sam_sid_gpelid", .id = -1 }, ++ { .name = "surface_sam_sid_perfmode", .id = -1 }, ++ { }, ++}; ++ ++static const struct mfd_cell sid_devs_sb3[] = { ++ { .name = "surface_sam_sid_gpelid", .id = -1 }, ++ { .name = "surface_sam_sid_perfmode", .id = -1 }, ++ { .name = "surface_sam_sid_ac", .id = -1 }, ++ { ++ .name = "surface_sam_sid_battery", ++ .id = 1, ++ .platform_data = (void *)&ssam_battery_props_bat1, ++ .pdata_size = sizeof(struct ssam_battery_properties), ++ }, ++ { ++ .name = "surface_sam_sid_battery", ++ .id = 2, ++ .platform_data = (void *)&ssam_battery_props_bat2_sb3, ++ .pdata_size = sizeof(struct ssam_battery_properties), ++ }, ++ { ++ .name = "surface_sam_sid_vhf", ++ .id = 1, ++ .platform_data = (void *)&ssam_hid_props_keyboard, ++ .pdata_size = sizeof(struct ssam_hid_properties), ++ }, ++ { ++ .name = "surface_sam_sid_vhf", ++ .id = 3, ++ .platform_data = (void *)&ssam_hid_props_touchpad, ++ .pdata_size = sizeof(struct ssam_hid_properties), ++ }, ++ { ++ .name = "surface_sam_sid_vhf", ++ .id = 5, ++ .platform_data = (void *)&ssam_hid_props_iid5, ++ .pdata_size = sizeof(struct ssam_hid_properties), ++ }, ++ { ++ .name = "surface_sam_sid_vhf", ++ .id = 6, ++ .platform_data = (void *)&ssam_hid_props_iid6, ++ .pdata_size = sizeof(struct ssam_hid_properties), ++ }, ++ { }, ++}; ++ ++static const struct mfd_cell sid_devs_sl1[] = { ++ { .name = "surface_sam_sid_gpelid", .id = -1 }, ++ { .name = "surface_sam_sid_perfmode", .id = -1 }, ++ { }, ++}; ++ ++static const struct mfd_cell sid_devs_sl2[] = { ++ { .name = "surface_sam_sid_gpelid", .id = -1 }, ++ { .name = "surface_sam_sid_perfmode", .id = -1 }, ++ { }, ++}; ++ ++static const struct mfd_cell sid_devs_sl3_13[] = { ++ { .name = "surface_sam_sid_gpelid", .id = -1 }, ++ { .name = "surface_sam_sid_perfmode", .id = -1 }, ++ { .name = "surface_sam_sid_ac", .id = -1 }, ++ { ++ .name = "surface_sam_sid_battery", ++ .id = -1, ++ .platform_data = (void *)&ssam_battery_props_bat1, ++ .pdata_size = sizeof(struct ssam_battery_properties), ++ }, ++ { ++ .name = "surface_sam_sid_vhf", ++ .id = 1, ++ .platform_data = (void *)&ssam_hid_props_keyboard, ++ .pdata_size = sizeof(struct ssam_hid_properties), ++ }, ++ { ++ .name = "surface_sam_sid_vhf", ++ .id = 3, ++ .platform_data = (void *)&ssam_hid_props_touchpad, ++ .pdata_size = sizeof(struct ssam_hid_properties), ++ }, ++ { ++ .name = "surface_sam_sid_vhf", ++ .id = 5, ++ .platform_data = (void *)&ssam_hid_props_iid5, ++ .pdata_size = sizeof(struct ssam_hid_properties), ++ }, ++ { }, ++}; ++ ++static const struct mfd_cell sid_devs_sl3_15[] = { ++ { .name = "surface_sam_sid_perfmode", .id = -1 }, ++ { .name = "surface_sam_sid_ac", .id = -1 }, ++ { ++ .name = "surface_sam_sid_battery", ++ .id = -1, ++ .platform_data = (void *)&ssam_battery_props_bat1, ++ .pdata_size = sizeof(struct ssam_battery_properties), ++ }, ++ { ++ .name = "surface_sam_sid_vhf", ++ .id = 1, ++ .platform_data = (void *)&ssam_hid_props_keyboard, ++ .pdata_size = sizeof(struct ssam_hid_properties), ++ }, ++ { ++ .name = "surface_sam_sid_vhf", ++ .id = 3, ++ .platform_data = (void *)&ssam_hid_props_touchpad, ++ .pdata_size = sizeof(struct ssam_hid_properties), ++ }, ++ { ++ .name = "surface_sam_sid_vhf", ++ .id = 5, ++ .platform_data = (void *)&ssam_hid_props_iid5, ++ .pdata_size = sizeof(struct ssam_hid_properties), ++ }, ++ { }, ++}; ++ ++static const struct acpi_device_id surface_sam_sid_match[] = { ++ /* Surface Pro 4, 5, and 6 */ ++ { "MSHW0081", (unsigned long)sid_devs_sp4 }, ++ ++ /* Surface Pro 6 (OMBR >= 0x10) */ ++ { "MSHW0111", (unsigned long)sid_devs_sp6 }, ++ ++ /* Surface Pro 7 */ ++ { "MSHW0116", (unsigned long)sid_devs_sp7 }, ++ ++ /* Surface Book 1 */ ++ { "MSHW0080", (unsigned long)sid_devs_sb1 }, ++ ++ /* Surface Book 2 */ ++ { "MSHW0107", (unsigned long)sid_devs_sb2 }, ++ ++ /* Surface Book 3 */ ++ { "MSHW0117", (unsigned long)sid_devs_sb3 }, ++ ++ /* Surface Laptop 1 */ ++ { "MSHW0086", (unsigned long)sid_devs_sl1 }, ++ ++ /* Surface Laptop 2 */ ++ { "MSHW0112", (unsigned long)sid_devs_sl2 }, ++ ++ /* Surface Laptop 3 (13") */ ++ { "MSHW0114", (unsigned long)sid_devs_sl3_13 }, ++ ++ /* Surface Laptop 3 (15") */ ++ { "MSHW0110", (unsigned long)sid_devs_sl3_15 }, ++ ++ { }, ++}; ++MODULE_DEVICE_TABLE(acpi, surface_sam_sid_match); ++ ++ ++static int surface_sam_sid_probe(struct platform_device *pdev) ++{ ++ const struct acpi_device_id *match; ++ const struct mfd_cell *cells, *p; ++ ++ match = acpi_match_device(surface_sam_sid_match, &pdev->dev); ++ if (!match) ++ return -ENODEV; ++ ++ cells = (struct mfd_cell *)match->driver_data; ++ if (!cells) ++ return -ENODEV; ++ ++ for (p = cells; p->name; ++p) { ++ /* just count */ ++ } ++ ++ if (p == cells) ++ return -ENODEV; ++ ++ return mfd_add_devices(&pdev->dev, 0, cells, p - cells, NULL, 0, NULL); ++} ++ ++static int surface_sam_sid_remove(struct platform_device *pdev) ++{ ++ mfd_remove_devices(&pdev->dev); ++ return 0; ++} ++ ++static struct platform_driver surface_sam_sid = { ++ .probe = surface_sam_sid_probe, ++ .remove = surface_sam_sid_remove, ++ .driver = { ++ .name = "surface_sam_sid", ++ .acpi_match_table = surface_sam_sid_match, ++ .probe_type = PROBE_PREFER_ASYNCHRONOUS, ++ }, ++}; ++module_platform_driver(surface_sam_sid); ++ ++MODULE_AUTHOR("Maximilian Luz "); ++MODULE_DESCRIPTION("Surface Integration Driver for 5th Generation Surface Devices"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_gpelid.c b/drivers/platform/x86/surface_sam/surface_sam_sid_gpelid.c +new file mode 100644 +index 0000000000000..f0cee43c859b4 +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_sid_gpelid.c +@@ -0,0 +1,232 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Surface Lid driver to enable wakeup from suspend via the lid. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++ ++struct sid_lid_device { ++ const char *acpi_path; ++ const u32 gpe_number; ++}; ++ ++ ++static const struct sid_lid_device lid_device_l17 = { ++ .acpi_path = "\\_SB.LID0", ++ .gpe_number = 0x17, ++}; ++ ++static const struct sid_lid_device lid_device_l4D = { ++ .acpi_path = "\\_SB.LID0", ++ .gpe_number = 0x4D, ++}; ++ ++static const struct sid_lid_device lid_device_l4F = { ++ .acpi_path = "\\_SB.LID0", ++ .gpe_number = 0x4F, ++}; ++ ++static const struct sid_lid_device lid_device_l57 = { ++ .acpi_path = "\\_SB.LID0", ++ .gpe_number = 0x57, ++}; ++ ++ ++static const struct dmi_system_id dmi_lid_device_table[] = { ++ { ++ .ident = "Surface Pro 4", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"), ++ }, ++ .driver_data = (void *)&lid_device_l17, ++ }, ++ { ++ .ident = "Surface Pro 5", ++ .matches = { ++ /* match for SKU here due to generic product name "Surface Pro" */ ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"), ++ }, ++ .driver_data = (void *)&lid_device_l4F, ++ }, ++ { ++ .ident = "Surface Pro 5 (LTE)", ++ .matches = { ++ /* match for SKU here due to generic product name "Surface Pro" */ ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"), ++ }, ++ .driver_data = (void *)&lid_device_l4F, ++ }, ++ { ++ .ident = "Surface Pro 6", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"), ++ }, ++ .driver_data = (void *)&lid_device_l4F, ++ }, ++ { ++ .ident = "Surface Pro 7", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 7"), ++ }, ++ .driver_data = (void *)&lid_device_l4D, ++ }, ++ { ++ .ident = "Surface Book 1", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"), ++ }, ++ .driver_data = (void *)&lid_device_l17, ++ }, ++ { ++ .ident = "Surface Book 2", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"), ++ }, ++ .driver_data = (void *)&lid_device_l17, ++ }, ++ { ++ .ident = "Surface Book 3", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 3"), ++ }, ++ .driver_data = (void *)&lid_device_l4D, ++ }, ++ { ++ .ident = "Surface Laptop 1", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"), ++ }, ++ .driver_data = (void *)&lid_device_l57, ++ }, ++ { ++ .ident = "Surface Laptop 2", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"), ++ }, ++ .driver_data = (void *)&lid_device_l57, ++ }, ++ { ++ .ident = "Surface Laptop 3 (13\")", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_3_1867:1868"), ++ }, ++ .driver_data = (void *)&lid_device_l4D, ++ }, ++ { } ++}; ++ ++ ++static int sid_lid_enable_wakeup(const struct sid_lid_device *dev, bool enable) ++{ ++ int action = enable ? ACPI_GPE_ENABLE : ACPI_GPE_DISABLE; ++ int status; ++ ++ status = acpi_set_gpe_wake_mask(NULL, dev->gpe_number, action); ++ if (status) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++ ++static int surface_sam_sid_gpelid_suspend(struct device *dev) ++{ ++ const struct sid_lid_device *ldev; ++ ++ ldev = dev_get_drvdata(dev); ++ return sid_lid_enable_wakeup(ldev, true); ++} ++ ++static int surface_sam_sid_gpelid_resume(struct device *dev) ++{ ++ const struct sid_lid_device *ldev; ++ ++ ldev = dev_get_drvdata(dev); ++ return sid_lid_enable_wakeup(ldev, false); ++} ++ ++static SIMPLE_DEV_PM_OPS(surface_sam_sid_gpelid_pm, ++ surface_sam_sid_gpelid_suspend, ++ surface_sam_sid_gpelid_resume); ++ ++ ++static int surface_sam_sid_gpelid_probe(struct platform_device *pdev) ++{ ++ const struct dmi_system_id *match; ++ struct sid_lid_device *dev; ++ acpi_handle lid_handle; ++ int status; ++ ++ match = dmi_first_match(dmi_lid_device_table); ++ if (!match) ++ return -ENODEV; ++ ++ dev = match->driver_data; ++ if (!dev) ++ return -ENODEV; ++ ++ status = acpi_get_handle(NULL, (acpi_string)dev->acpi_path, &lid_handle); ++ if (status) ++ return -EFAULT; ++ ++ status = acpi_setup_gpe_for_wake(lid_handle, NULL, dev->gpe_number); ++ if (status) ++ return -EFAULT; ++ ++ status = acpi_enable_gpe(NULL, dev->gpe_number); ++ if (status) ++ return -EFAULT; ++ ++ status = sid_lid_enable_wakeup(dev, false); ++ if (status) { ++ acpi_disable_gpe(NULL, dev->gpe_number); ++ return status; ++ } ++ ++ platform_set_drvdata(pdev, dev); ++ return 0; ++} ++ ++static int surface_sam_sid_gpelid_remove(struct platform_device *pdev) ++{ ++ struct sid_lid_device *dev = platform_get_drvdata(pdev); ++ ++ /* restore default behavior without this module */ ++ sid_lid_enable_wakeup(dev, false); ++ acpi_disable_gpe(NULL, dev->gpe_number); ++ ++ platform_set_drvdata(pdev, NULL); ++ return 0; ++} ++ ++static struct platform_driver surface_sam_sid_gpelid = { ++ .probe = surface_sam_sid_gpelid_probe, ++ .remove = surface_sam_sid_gpelid_remove, ++ .driver = { ++ .name = "surface_sam_sid_gpelid", ++ .pm = &surface_sam_sid_gpelid_pm, ++ .probe_type = PROBE_PREFER_ASYNCHRONOUS, ++ }, ++}; ++module_platform_driver(surface_sam_sid_gpelid); ++ ++MODULE_AUTHOR("Maximilian Luz "); ++MODULE_DESCRIPTION("Surface Lid Driver for 5th Generation Surface Devices"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:surface_sam_sid_gpelid"); +diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_perfmode.c b/drivers/platform/x86/surface_sam/surface_sam_sid_perfmode.c +new file mode 100644 +index 0000000000000..e0b1e42c2087f +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_sid_perfmode.c +@@ -0,0 +1,214 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Surface Performance Mode Driver. ++ * Allows to change cooling capabilities based on user preference. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include "surface_sam_ssh.h" ++ ++ ++#define SID_PARAM_PERM 0644 ++ ++enum sam_perf_mode { ++ SAM_PERF_MODE_NORMAL = 1, ++ SAM_PERF_MODE_BATTERY = 2, ++ SAM_PERF_MODE_PERF1 = 3, ++ SAM_PERF_MODE_PERF2 = 4, ++ ++ __SAM_PERF_MODE__START = 1, ++ __SAM_PERF_MODE__END = 4, ++}; ++ ++enum sid_param_perf_mode { ++ SID_PARAM_PERF_MODE_AS_IS = 0, ++ SID_PARAM_PERF_MODE_NORMAL = SAM_PERF_MODE_NORMAL, ++ SID_PARAM_PERF_MODE_BATTERY = SAM_PERF_MODE_BATTERY, ++ SID_PARAM_PERF_MODE_PERF1 = SAM_PERF_MODE_PERF1, ++ SID_PARAM_PERF_MODE_PERF2 = SAM_PERF_MODE_PERF2, ++ ++ __SID_PARAM_PERF_MODE__START = 0, ++ __SID_PARAM_PERF_MODE__END = 4, ++}; ++ ++struct spm_data { ++ struct ssam_controller *ctrl; ++}; ++ ++ ++struct ssam_perf_info { ++ __le32 mode; ++ __le16 unknown1; ++ __le16 unknown2; ++} __packed; ++ ++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_tmp_perf_mode_get, struct ssam_perf_info, { ++ .target_category = SSAM_SSH_TC_TMP, ++ .command_id = 0x02, ++ .instance_id = 0x00, ++ .channel = 0x01, ++}); ++ ++static SSAM_DEFINE_SYNC_REQUEST_W(__ssam_tmp_perf_mode_set, __le32, { ++ .target_category = SSAM_SSH_TC_TMP, ++ .command_id = 0x03, ++ .instance_id = 0x00, ++ .channel = 0x01, ++}); ++ ++static int ssam_tmp_perf_mode_set(struct ssam_controller *ctrl, u32 mode) ++{ ++ __le32 mode_le = cpu_to_le32(mode); ++ ++ if (mode < __SAM_PERF_MODE__START || mode > __SAM_PERF_MODE__END) ++ return -EINVAL; ++ ++ return __ssam_tmp_perf_mode_set(ctrl, &mode_le); ++} ++ ++ ++static int param_perf_mode_set(const char *val, const struct kernel_param *kp) ++{ ++ int perf_mode; ++ int status; ++ ++ status = kstrtoint(val, 0, &perf_mode); ++ if (status) ++ return status; ++ ++ if (perf_mode < __SID_PARAM_PERF_MODE__START || perf_mode > __SID_PARAM_PERF_MODE__END) ++ return -EINVAL; ++ ++ return param_set_int(val, kp); ++} ++ ++static const struct kernel_param_ops param_perf_mode_ops = { ++ .set = param_perf_mode_set, ++ .get = param_get_int, ++}; ++ ++static int param_perf_mode_init = SID_PARAM_PERF_MODE_AS_IS; ++static int param_perf_mode_exit = SID_PARAM_PERF_MODE_AS_IS; ++ ++module_param_cb(perf_mode_init, ¶m_perf_mode_ops, ¶m_perf_mode_init, SID_PARAM_PERM); ++module_param_cb(perf_mode_exit, ¶m_perf_mode_ops, ¶m_perf_mode_exit, SID_PARAM_PERM); ++ ++MODULE_PARM_DESC(perf_mode_init, "Performance-mode to be set on module initialization"); ++MODULE_PARM_DESC(perf_mode_exit, "Performance-mode to be set on module exit"); ++ ++ ++static ssize_t perf_mode_show(struct device *dev, struct device_attribute *attr, char *data) ++{ ++ struct spm_data *d = dev_get_drvdata(dev); ++ struct ssam_perf_info info; ++ int status; ++ ++ status = ssam_tmp_perf_mode_get(d->ctrl, &info); ++ if (status) { ++ dev_err(dev, "failed to get current performance mode: %d\n", status); ++ return -EIO; ++ } ++ ++ return sprintf(data, "%d\n", le32_to_cpu(info.mode)); ++} ++ ++static ssize_t perf_mode_store(struct device *dev, struct device_attribute *attr, ++ const char *data, size_t count) ++{ ++ struct spm_data *d = dev_get_drvdata(dev); ++ int perf_mode; ++ int status; ++ ++ status = kstrtoint(data, 0, &perf_mode); ++ if (status) ++ return status; ++ ++ status = ssam_tmp_perf_mode_set(d->ctrl, perf_mode); ++ if (status) ++ return status; ++ ++ // TODO: Should we notify ACPI here? ++ // ++ // There is a _DSM call described as ++ // WSID._DSM: Notify DPTF on Slider State change ++ // which calls ++ // ODV3 = ToInteger (Arg3) ++ // Notify(IETM, 0x88) ++ // IETM is an INT3400 Intel Dynamic Power Performance Management ++ // device, part of the DPTF framework. From the corresponding ++ // kernel driver, it looks like event 0x88 is being ignored. Also ++ // it is currently unknown what the consequecnes of setting ODV3 ++ // are. ++ ++ return count; ++} ++ ++static const DEVICE_ATTR_RW(perf_mode); ++ ++ ++static int surface_sam_sid_perfmode_probe(struct platform_device *pdev) ++{ ++ struct ssam_controller *ctrl; ++ struct spm_data *data; ++ int status; ++ ++ // link to ec ++ status = ssam_client_bind(&pdev->dev, &ctrl); ++ if (status) ++ return status == -ENXIO ? -EPROBE_DEFER : status; ++ ++ data = devm_kzalloc(&pdev->dev, sizeof(struct spm_data), GFP_KERNEL); ++ if (!data) ++ return -ENOMEM; ++ ++ data->ctrl = ctrl; ++ platform_set_drvdata(pdev, data); ++ ++ // set initial perf_mode ++ if (param_perf_mode_init != SID_PARAM_PERF_MODE_AS_IS) { ++ status = ssam_tmp_perf_mode_set(ctrl, param_perf_mode_init); ++ if (status) ++ return status; ++ } ++ ++ // register perf_mode attribute ++ status = sysfs_create_file(&pdev->dev.kobj, &dev_attr_perf_mode.attr); ++ if (status) ++ goto err_sysfs; ++ ++ return 0; ++ ++err_sysfs: ++ ssam_tmp_perf_mode_set(ctrl, param_perf_mode_exit); ++ return status; ++} ++ ++static int surface_sam_sid_perfmode_remove(struct platform_device *pdev) ++{ ++ struct spm_data *data = platform_get_drvdata(pdev); ++ ++ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_perf_mode.attr); ++ ssam_tmp_perf_mode_set(data->ctrl, param_perf_mode_exit); ++ ++ platform_set_drvdata(pdev, NULL); ++ return 0; ++} ++ ++static struct platform_driver surface_sam_sid_perfmode = { ++ .probe = surface_sam_sid_perfmode_probe, ++ .remove = surface_sam_sid_perfmode_remove, ++ .driver = { ++ .name = "surface_sam_sid_perfmode", ++ .probe_type = PROBE_PREFER_ASYNCHRONOUS, ++ }, ++}; ++module_platform_driver(surface_sam_sid_perfmode); ++ ++MODULE_AUTHOR("Maximilian Luz "); ++MODULE_DESCRIPTION("Surface Performance Mode Driver for 5th Generation Surface Devices"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:surface_sam_sid_perfmode"); +diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_power.c b/drivers/platform/x86/surface_sam/surface_sam_sid_power.c +new file mode 100644 +index 0000000000000..64a3d46a128cc +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_sid_power.c +@@ -0,0 +1,1054 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Surface SID Battery/AC Driver. ++ * Provides support for the battery and AC on 7th generation Surface devices. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "surface_sam_ssh.h" ++#include "surface_sam_sid_power.h" ++ ++#define SPWR_WARN KERN_WARNING KBUILD_MODNAME ": " ++#define SPWR_DEBUG KERN_DEBUG KBUILD_MODNAME ": " ++ ++ ++// TODO: check BIX/BST for unknown/unsupported 0xffffffff entries ++// TODO: DPTF (/SAN notifications)? ++// TODO: other properties? ++ ++ ++static unsigned int cache_time = 1000; ++module_param(cache_time, uint, 0644); ++MODULE_PARM_DESC(cache_time, "battery state chaching time in milliseconds [default: 1000]"); ++ ++#define SPWR_AC_BAT_UPDATE_DELAY msecs_to_jiffies(5000) ++ ++ ++/* ++ * SAM Interface. ++ */ ++ ++#define SAM_EVENT_PWR_CID_BIX 0x15 ++#define SAM_EVENT_PWR_CID_BST 0x16 ++#define SAM_EVENT_PWR_CID_ADAPTER 0x17 ++ ++#define SAM_BATTERY_STA_OK 0x0f ++#define SAM_BATTERY_STA_PRESENT 0x10 ++ ++#define SAM_BATTERY_STATE_DISCHARGING 0x01 ++#define SAM_BATTERY_STATE_CHARGING 0x02 ++#define SAM_BATTERY_STATE_CRITICAL 0x04 ++ ++#define SAM_BATTERY_POWER_UNIT_MA 1 ++ ++ ++/* Equivalent to data returned in ACPI _BIX method */ ++struct spwr_bix { ++ u8 revision; ++ __le32 power_unit; ++ __le32 design_cap; ++ __le32 last_full_charge_cap; ++ __le32 technology; ++ __le32 design_voltage; ++ __le32 design_cap_warn; ++ __le32 design_cap_low; ++ __le32 cycle_count; ++ __le32 measurement_accuracy; ++ __le32 max_sampling_time; ++ __le32 min_sampling_time; ++ __le32 max_avg_interval; ++ __le32 min_avg_interval; ++ __le32 bat_cap_granularity_1; ++ __le32 bat_cap_granularity_2; ++ u8 model[21]; ++ u8 serial[11]; ++ u8 type[5]; ++ u8 oem_info[21]; ++} __packed; ++ ++/* Equivalent to data returned in ACPI _BST method */ ++struct spwr_bst { ++ __le32 state; ++ __le32 present_rate; ++ __le32 remaining_cap; ++ __le32 present_voltage; ++} __packed; ++ ++/* DPTF event payload */ ++struct spwr_event_dptf { ++ __le32 pmax; ++ __le32 _1; /* currently unknown */ ++ __le32 _2; /* currently unknown */ ++} __packed; ++ ++ ++/* Get battery status (_STA) */ ++static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_sta, __le32, { ++ .target_category = SSAM_SSH_TC_BAT, ++ .command_id = 0x01, ++}); ++ ++/* Get battery static information (_BIX) */ ++static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_bix, struct spwr_bix, { ++ .target_category = SSAM_SSH_TC_BAT, ++ .command_id = 0x02, ++}); ++ ++/* Get battery dynamic information (_BST) */ ++static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_bst, struct spwr_bst, { ++ .target_category = SSAM_SSH_TC_BAT, ++ .command_id = 0x03, ++}); ++ ++/* Set battery trip point (_BTP) */ ++static SSAM_DEFINE_SYNC_REQUEST_MD_W(ssam_bat_set_btp, __le32, { ++ .target_category = SSAM_SSH_TC_BAT, ++ .command_id = 0x04, ++}); ++ ++/* Get platform power soruce for battery (DPTF PSRC) */ ++static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_psrc, __le32, { ++ .target_category = SSAM_SSH_TC_BAT, ++ .command_id = 0x0d, ++}); ++ ++/* Get maximum platform power for battery (DPTF PMAX) */ ++__always_unused ++static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_pmax, __le32, { ++ .target_category = SSAM_SSH_TC_BAT, ++ .command_id = 0x0b, ++}); ++ ++/* Get adapter rating (DPTF ARTG) */ ++__always_unused ++static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_artg, __le32, { ++ .target_category = SSAM_SSH_TC_BAT, ++ .command_id = 0x0f, ++}); ++ ++/* Unknown (DPTF PSOC) */ ++__always_unused ++static SSAM_DEFINE_SYNC_REQUEST_MD_R(ssam_bat_get_psoc, __le32, { ++ .target_category = SSAM_SSH_TC_BAT, ++ .command_id = 0x0c, ++}); ++ ++/* Unknown (DPTF CHGI/ INT3403 SPPC) */ ++__always_unused ++static SSAM_DEFINE_SYNC_REQUEST_MD_W(ssam_bat_set_chgi, __le32, { ++ .target_category = SSAM_SSH_TC_BAT, ++ .command_id = 0x0e, ++}); ++ ++ ++/* ++ * Common Power-Subsystem Interface. ++ */ ++ ++struct spwr_battery_device { ++ struct platform_device *pdev; ++ struct ssam_controller *ctrl; ++ const struct ssam_battery_properties *p; ++ ++ char name[32]; ++ struct power_supply *psy; ++ struct power_supply_desc psy_desc; ++ ++ struct delayed_work update_work; ++ ++ struct ssam_event_notifier notif; ++ ++ struct mutex lock; ++ unsigned long timestamp; ++ ++ __le32 sta; ++ struct spwr_bix bix; ++ struct spwr_bst bst; ++ u32 alarm; ++}; ++ ++struct spwr_ac_device { ++ struct platform_device *pdev; ++ struct ssam_controller *ctrl; ++ ++ char name[32]; ++ struct power_supply *psy; ++ struct power_supply_desc psy_desc; ++ ++ struct ssam_event_notifier notif; ++ ++ struct mutex lock; ++ ++ __le32 state; ++}; ++ ++static enum power_supply_property spwr_ac_props[] = { ++ POWER_SUPPLY_PROP_ONLINE, ++}; ++ ++static enum power_supply_property spwr_battery_props_chg[] = { ++ POWER_SUPPLY_PROP_STATUS, ++ POWER_SUPPLY_PROP_PRESENT, ++ POWER_SUPPLY_PROP_TECHNOLOGY, ++ POWER_SUPPLY_PROP_CYCLE_COUNT, ++ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, ++ POWER_SUPPLY_PROP_VOLTAGE_NOW, ++ POWER_SUPPLY_PROP_CURRENT_NOW, ++ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, ++ POWER_SUPPLY_PROP_CHARGE_FULL, ++ POWER_SUPPLY_PROP_CHARGE_NOW, ++ POWER_SUPPLY_PROP_CAPACITY, ++ POWER_SUPPLY_PROP_CAPACITY_LEVEL, ++ POWER_SUPPLY_PROP_MODEL_NAME, ++ POWER_SUPPLY_PROP_MANUFACTURER, ++ POWER_SUPPLY_PROP_SERIAL_NUMBER, ++}; ++ ++static enum power_supply_property spwr_battery_props_eng[] = { ++ POWER_SUPPLY_PROP_STATUS, ++ POWER_SUPPLY_PROP_PRESENT, ++ POWER_SUPPLY_PROP_TECHNOLOGY, ++ POWER_SUPPLY_PROP_CYCLE_COUNT, ++ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, ++ POWER_SUPPLY_PROP_VOLTAGE_NOW, ++ POWER_SUPPLY_PROP_POWER_NOW, ++ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, ++ POWER_SUPPLY_PROP_ENERGY_FULL, ++ POWER_SUPPLY_PROP_ENERGY_NOW, ++ POWER_SUPPLY_PROP_CAPACITY, ++ POWER_SUPPLY_PROP_CAPACITY_LEVEL, ++ POWER_SUPPLY_PROP_MODEL_NAME, ++ POWER_SUPPLY_PROP_MANUFACTURER, ++ POWER_SUPPLY_PROP_SERIAL_NUMBER, ++}; ++ ++ ++static int spwr_battery_register(struct spwr_battery_device *bat, ++ struct platform_device *pdev, ++ struct ssam_controller *ctrl, ++ const struct ssam_battery_properties *p); ++ ++static void spwr_battery_unregister(struct spwr_battery_device *bat); ++ ++ ++static inline bool spwr_battery_present(struct spwr_battery_device *bat) ++{ ++ return le32_to_cpu(bat->sta) & SAM_BATTERY_STA_PRESENT; ++} ++ ++ ++static inline int spwr_battery_load_sta(struct spwr_battery_device *bat) ++{ ++ return ssam_bat_get_sta(bat->ctrl, bat->p->channel, bat->p->instance, ++ &bat->sta); ++} ++ ++static inline int spwr_battery_load_bix(struct spwr_battery_device *bat) ++{ ++ if (!spwr_battery_present(bat)) ++ return 0; ++ ++ return ssam_bat_get_bix(bat->ctrl, bat->p->channel, bat->p->instance, ++ &bat->bix); ++} ++ ++static inline int spwr_battery_load_bst(struct spwr_battery_device *bat) ++{ ++ if (!spwr_battery_present(bat)) ++ return 0; ++ ++ return ssam_bat_get_bst(bat->ctrl, bat->p->channel, bat->p->instance, ++ &bat->bst); ++} ++ ++ ++static inline int spwr_battery_set_alarm_unlocked(struct spwr_battery_device *bat, u32 value) ++{ ++ __le32 alarm = cpu_to_le32(value); ++ ++ bat->alarm = value; ++ return ssam_bat_set_btp(bat->ctrl, bat->p->channel, bat->p->instance, ++ &alarm); ++} ++ ++static inline int spwr_battery_set_alarm(struct spwr_battery_device *bat, u32 value) ++{ ++ int status; ++ ++ mutex_lock(&bat->lock); ++ status = spwr_battery_set_alarm_unlocked(bat, value); ++ mutex_unlock(&bat->lock); ++ ++ return status; ++} ++ ++static inline int spwr_battery_update_bst_unlocked(struct spwr_battery_device *bat, bool cached) ++{ ++ unsigned long cache_deadline = bat->timestamp + msecs_to_jiffies(cache_time); ++ int status; ++ ++ if (cached && bat->timestamp && time_is_after_jiffies(cache_deadline)) ++ return 0; ++ ++ status = spwr_battery_load_sta(bat); ++ if (status) ++ return status; ++ ++ status = spwr_battery_load_bst(bat); ++ if (status) ++ return status; ++ ++ bat->timestamp = jiffies; ++ return 0; ++} ++ ++static int spwr_battery_update_bst(struct spwr_battery_device *bat, bool cached) ++{ ++ int status; ++ ++ mutex_lock(&bat->lock); ++ status = spwr_battery_update_bst_unlocked(bat, cached); ++ mutex_unlock(&bat->lock); ++ ++ return status; ++} ++ ++static inline int spwr_battery_update_bix_unlocked(struct spwr_battery_device *bat) ++{ ++ int status; ++ ++ status = spwr_battery_load_sta(bat); ++ if (status) ++ return status; ++ ++ status = spwr_battery_load_bix(bat); ++ if (status) ++ return status; ++ ++ status = spwr_battery_load_bst(bat); ++ if (status) ++ return status; ++ ++ bat->timestamp = jiffies; ++ return 0; ++} ++ ++static int spwr_battery_update_bix(struct spwr_battery_device *bat) ++{ ++ int status; ++ ++ mutex_lock(&bat->lock); ++ status = spwr_battery_update_bix_unlocked(bat); ++ mutex_unlock(&bat->lock); ++ ++ return status; ++} ++ ++static inline int spwr_ac_update_unlocked(struct spwr_ac_device *ac) ++{ ++ return ssam_bat_get_psrc(ac->ctrl, 0x01, 0x01, &ac->state); ++} ++ ++static int spwr_ac_update(struct spwr_ac_device *ac) ++{ ++ int status; ++ ++ mutex_lock(&ac->lock); ++ status = spwr_ac_update_unlocked(ac); ++ mutex_unlock(&ac->lock); ++ ++ return status; ++} ++ ++ ++static int spwr_battery_recheck(struct spwr_battery_device *bat) ++{ ++ bool present = spwr_battery_present(bat); ++ u32 unit = get_unaligned_le32(&bat->bix.power_unit); ++ int status; ++ ++ status = spwr_battery_update_bix(bat); ++ if (status) ++ return status; ++ ++ // if battery has been attached, (re-)initialize alarm ++ if (!present && spwr_battery_present(bat)) { ++ u32 cap_warn = get_unaligned_le32(&bat->bix.design_cap_warn); ++ status = spwr_battery_set_alarm(bat, cap_warn); ++ if (status) ++ return status; ++ } ++ ++ // if the unit has changed, re-add the battery ++ if (unit != get_unaligned_le32(&bat->bix.power_unit)) { ++ spwr_battery_unregister(bat); ++ status = spwr_battery_register(bat, bat->pdev, bat->ctrl, bat->p); ++ } ++ ++ return status; ++} ++ ++ ++static inline int spwr_notify_bix(struct spwr_battery_device *bat) ++{ ++ int status; ++ ++ status = spwr_battery_recheck(bat); ++ if (!status) ++ power_supply_changed(bat->psy); ++ ++ return status; ++} ++ ++static inline int spwr_notify_bst(struct spwr_battery_device *bat) ++{ ++ int status; ++ ++ status = spwr_battery_update_bst(bat, false); ++ if (!status) ++ power_supply_changed(bat->psy); ++ ++ return status; ++} ++ ++static inline int spwr_notify_adapter_bat(struct spwr_battery_device *bat) ++{ ++ u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap); ++ u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap); ++ ++ /* ++ * Handle battery update quirk: ++ * When the battery is fully charged and the adapter is plugged in or ++ * removed, the EC does not send a separate event for the state ++ * (charging/discharging) change. Furthermore it may take some time until ++ * the state is updated on the battery. Schedule an update to solve this. ++ */ ++ ++ if (remaining_cap >= last_full_cap) ++ schedule_delayed_work(&bat->update_work, SPWR_AC_BAT_UPDATE_DELAY); ++ ++ return 0; ++} ++ ++static inline int spwr_notify_adapter_ac(struct spwr_ac_device *ac) ++{ ++ int status; ++ ++ status = spwr_ac_update(ac); ++ if (!status) ++ power_supply_changed(ac->psy); ++ ++ return status; ++} ++ ++static u32 spwr_notify_bat(struct ssam_notifier_block *nb, const struct ssam_event *event) ++{ ++ struct spwr_battery_device *bat = container_of(nb, struct spwr_battery_device, notif.base); ++ int status; ++ ++ dev_dbg(&bat->pdev->dev, "power event (cid = 0x%02x, iid = %d, chn = %d)\n", ++ event->command_id, event->instance_id, event->channel); ++ ++ // handled here, needs to be handled for all channels/instances ++ if (event->command_id == SAM_EVENT_PWR_CID_ADAPTER) { ++ status = spwr_notify_adapter_bat(bat); ++ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED; ++ } ++ ++ // check for the correct channel and instance ID ++ if (event->channel != bat->p->channel) ++ return 0; ++ ++ if (event->instance_id != bat->p->instance) ++ return 0; ++ ++ switch (event->command_id) { ++ case SAM_EVENT_PWR_CID_BIX: ++ status = spwr_notify_bix(bat); ++ break; ++ ++ case SAM_EVENT_PWR_CID_BST: ++ status = spwr_notify_bst(bat); ++ break; ++ ++ default: ++ return 0; ++ } ++ ++ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED; ++} ++ ++static u32 spwr_notify_ac(struct ssam_notifier_block *nb, const struct ssam_event *event) ++{ ++ struct spwr_ac_device *ac = container_of(nb, struct spwr_ac_device, notif.base); ++ int status; ++ ++ dev_dbg(&ac->pdev->dev, "power event (cid = 0x%02x, iid = %d, chn = %d)\n", ++ event->command_id, event->instance_id, event->channel); ++ ++ // AC has IID = 0 ++ if (event->instance_id != 0) ++ return 0; ++ ++ switch (event->command_id) { ++ case SAM_EVENT_PWR_CID_ADAPTER: ++ status = spwr_notify_adapter_ac(ac); ++ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED; ++ ++ default: ++ return 0; ++ } ++} ++ ++static void spwr_battery_update_bst_workfn(struct work_struct *work) ++{ ++ struct delayed_work *dwork = to_delayed_work(work); ++ struct spwr_battery_device *bat = container_of(dwork, struct spwr_battery_device, update_work); ++ int status; ++ ++ status = spwr_battery_update_bst(bat, false); ++ if (!status) ++ power_supply_changed(bat->psy); ++ ++ if (status) ++ dev_err(&bat->pdev->dev, "failed to update battery state: %d\n", status); ++} ++ ++ ++static inline int spwr_battery_prop_status(struct spwr_battery_device *bat) ++{ ++ u32 state = get_unaligned_le32(&bat->bst.state); ++ u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap); ++ u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap); ++ u32 present_rate = get_unaligned_le32(&bat->bst.present_rate); ++ ++ if (state & SAM_BATTERY_STATE_DISCHARGING) ++ return POWER_SUPPLY_STATUS_DISCHARGING; ++ ++ if (state & SAM_BATTERY_STATE_CHARGING) ++ return POWER_SUPPLY_STATUS_CHARGING; ++ ++ if (last_full_cap == remaining_cap) ++ return POWER_SUPPLY_STATUS_FULL; ++ ++ if (present_rate == 0) ++ return POWER_SUPPLY_STATUS_NOT_CHARGING; ++ ++ return POWER_SUPPLY_STATUS_UNKNOWN; ++} ++ ++static inline int spwr_battery_prop_technology(struct spwr_battery_device *bat) ++{ ++ if (!strcasecmp("NiCd", bat->bix.type)) ++ return POWER_SUPPLY_TECHNOLOGY_NiCd; ++ ++ if (!strcasecmp("NiMH", bat->bix.type)) ++ return POWER_SUPPLY_TECHNOLOGY_NiMH; ++ ++ if (!strcasecmp("LION", bat->bix.type)) ++ return POWER_SUPPLY_TECHNOLOGY_LION; ++ ++ if (!strncasecmp("LI-ION", bat->bix.type, 6)) ++ return POWER_SUPPLY_TECHNOLOGY_LION; ++ ++ if (!strcasecmp("LiP", bat->bix.type)) ++ return POWER_SUPPLY_TECHNOLOGY_LIPO; ++ ++ return POWER_SUPPLY_TECHNOLOGY_UNKNOWN; ++} ++ ++static inline int spwr_battery_prop_capacity(struct spwr_battery_device *bat) ++{ ++ u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap); ++ u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap); ++ ++ if (remaining_cap && last_full_cap) ++ return remaining_cap * 100 / last_full_cap; ++ else ++ return 0; ++} ++ ++static inline int spwr_battery_prop_capacity_level(struct spwr_battery_device *bat) ++{ ++ u32 state = get_unaligned_le32(&bat->bst.state); ++ u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap); ++ u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap); ++ ++ if (state & SAM_BATTERY_STATE_CRITICAL) ++ return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; ++ ++ if (remaining_cap >= last_full_cap) ++ return POWER_SUPPLY_CAPACITY_LEVEL_FULL; ++ ++ if (remaining_cap <= bat->alarm) ++ return POWER_SUPPLY_CAPACITY_LEVEL_LOW; ++ ++ return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; ++} ++ ++static int spwr_ac_get_property(struct power_supply *psy, ++ enum power_supply_property psp, ++ union power_supply_propval *val) ++{ ++ struct spwr_ac_device *ac = power_supply_get_drvdata(psy); ++ int status; ++ ++ mutex_lock(&ac->lock); ++ ++ status = spwr_ac_update_unlocked(ac); ++ if (status) ++ goto out; ++ ++ switch (psp) { ++ case POWER_SUPPLY_PROP_ONLINE: ++ val->intval = le32_to_cpu(ac->state) == 1; ++ break; ++ ++ default: ++ status = -EINVAL; ++ goto out; ++ } ++ ++out: ++ mutex_unlock(&ac->lock); ++ return status; ++} ++ ++static int spwr_battery_get_property(struct power_supply *psy, ++ enum power_supply_property psp, ++ union power_supply_propval *val) ++{ ++ struct spwr_battery_device *bat = power_supply_get_drvdata(psy); ++ int status; ++ ++ mutex_lock(&bat->lock); ++ ++ status = spwr_battery_update_bst_unlocked(bat, true); ++ if (status) ++ goto out; ++ ++ // abort if battery is not present ++ if (!spwr_battery_present(bat) && psp != POWER_SUPPLY_PROP_PRESENT) { ++ status = -ENODEV; ++ goto out; ++ } ++ ++ switch (psp) { ++ case POWER_SUPPLY_PROP_STATUS: ++ val->intval = spwr_battery_prop_status(bat); ++ break; ++ ++ case POWER_SUPPLY_PROP_PRESENT: ++ val->intval = spwr_battery_present(bat); ++ break; ++ ++ case POWER_SUPPLY_PROP_TECHNOLOGY: ++ val->intval = spwr_battery_prop_technology(bat); ++ break; ++ ++ case POWER_SUPPLY_PROP_CYCLE_COUNT: ++ val->intval = get_unaligned_le32(&bat->bix.cycle_count); ++ break; ++ ++ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: ++ val->intval = get_unaligned_le32(&bat->bix.design_voltage) ++ * 1000; ++ break; ++ ++ case POWER_SUPPLY_PROP_VOLTAGE_NOW: ++ val->intval = get_unaligned_le32(&bat->bst.present_voltage) ++ * 1000; ++ break; ++ ++ case POWER_SUPPLY_PROP_CURRENT_NOW: ++ case POWER_SUPPLY_PROP_POWER_NOW: ++ val->intval = get_unaligned_le32(&bat->bst.present_rate) * 1000; ++ break; ++ ++ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: ++ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: ++ val->intval = get_unaligned_le32(&bat->bix.design_cap) * 1000; ++ break; ++ ++ case POWER_SUPPLY_PROP_CHARGE_FULL: ++ case POWER_SUPPLY_PROP_ENERGY_FULL: ++ val->intval = get_unaligned_le32(&bat->bix.last_full_charge_cap) ++ * 1000; ++ break; ++ ++ case POWER_SUPPLY_PROP_CHARGE_NOW: ++ case POWER_SUPPLY_PROP_ENERGY_NOW: ++ val->intval = get_unaligned_le32(&bat->bst.remaining_cap) ++ * 1000; ++ break; ++ ++ case POWER_SUPPLY_PROP_CAPACITY: ++ val->intval = spwr_battery_prop_capacity(bat); ++ break; ++ ++ case POWER_SUPPLY_PROP_CAPACITY_LEVEL: ++ val->intval = spwr_battery_prop_capacity_level(bat); ++ break; ++ ++ case POWER_SUPPLY_PROP_MODEL_NAME: ++ val->strval = bat->bix.model; ++ break; ++ ++ case POWER_SUPPLY_PROP_MANUFACTURER: ++ val->strval = bat->bix.oem_info; ++ break; ++ ++ case POWER_SUPPLY_PROP_SERIAL_NUMBER: ++ val->strval = bat->bix.serial; ++ break; ++ ++ default: ++ status = -EINVAL; ++ goto out; ++ } ++ ++out: ++ mutex_unlock(&bat->lock); ++ return status; ++} ++ ++ ++static ssize_t spwr_battery_alarm_show(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct power_supply *psy = dev_get_drvdata(dev); ++ struct spwr_battery_device *bat = power_supply_get_drvdata(psy); ++ ++ return sprintf(buf, "%d\n", bat->alarm * 1000); ++} ++ ++static ssize_t spwr_battery_alarm_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct power_supply *psy = dev_get_drvdata(dev); ++ struct spwr_battery_device *bat = power_supply_get_drvdata(psy); ++ unsigned long value; ++ int status; ++ ++ status = kstrtoul(buf, 0, &value); ++ if (status) ++ return status; ++ ++ if (!spwr_battery_present(bat)) ++ return -ENODEV; ++ ++ status = spwr_battery_set_alarm(bat, value / 1000); ++ if (status) ++ return status; ++ ++ return count; ++} ++ ++static const struct device_attribute alarm_attr = { ++ .attr = {.name = "alarm", .mode = 0644}, ++ .show = spwr_battery_alarm_show, ++ .store = spwr_battery_alarm_store, ++}; ++ ++ ++static int spwr_ac_register(struct spwr_ac_device *ac, ++ struct platform_device *pdev, ++ struct ssam_controller *ctrl) ++{ ++ struct power_supply_config psy_cfg = {}; ++ __le32 sta; ++ int status; ++ ++ // make sure the device is there and functioning properly ++ status = ssam_bat_get_sta(ctrl, 0x01, 0x01, &sta); ++ if (status) ++ return status; ++ ++ if ((le32_to_cpu(sta) & SAM_BATTERY_STA_OK) != SAM_BATTERY_STA_OK) ++ return -ENODEV; ++ ++ psy_cfg.drv_data = ac; ++ ++ ac->pdev = pdev; ++ ac->ctrl = ctrl; ++ mutex_init(&ac->lock); ++ ++ snprintf(ac->name, ARRAY_SIZE(ac->name), "ADP0"); ++ ++ ac->psy_desc.name = ac->name; ++ ac->psy_desc.type = POWER_SUPPLY_TYPE_MAINS; ++ ac->psy_desc.properties = spwr_ac_props; ++ ac->psy_desc.num_properties = ARRAY_SIZE(spwr_ac_props); ++ ac->psy_desc.get_property = spwr_ac_get_property; ++ ++ ac->psy = power_supply_register(&ac->pdev->dev, &ac->psy_desc, &psy_cfg); ++ if (IS_ERR(ac->psy)) { ++ status = PTR_ERR(ac->psy); ++ goto err_psy; ++ } ++ ++ ac->notif.base.priority = 1; ++ ac->notif.base.fn = spwr_notify_ac; ++ ac->notif.event.reg = SSAM_EVENT_REGISTRY_SAM; ++ ac->notif.event.id.target_category = SSAM_SSH_TC_BAT; ++ ac->notif.event.id.instance = 0; ++ ac->notif.event.flags = SSAM_EVENT_SEQUENCED; ++ ++ status = ssam_notifier_register(ctrl, &ac->notif); ++ if (status) ++ goto err_notif; ++ ++ return 0; ++ ++err_notif: ++ power_supply_unregister(ac->psy); ++err_psy: ++ mutex_destroy(&ac->lock); ++ return status; ++} ++ ++static int spwr_ac_unregister(struct spwr_ac_device *ac) ++{ ++ ssam_notifier_unregister(ac->ctrl, &ac->notif); ++ power_supply_unregister(ac->psy); ++ mutex_destroy(&ac->lock); ++ return 0; ++} ++ ++static int spwr_battery_register(struct spwr_battery_device *bat, ++ struct platform_device *pdev, ++ struct ssam_controller *ctrl, ++ const struct ssam_battery_properties *p) ++{ ++ struct power_supply_config psy_cfg = {}; ++ __le32 sta; ++ int status; ++ ++ bat->pdev = pdev; ++ bat->ctrl = ctrl; ++ bat->p = p; ++ ++ // make sure the device is there and functioning properly ++ status = ssam_bat_get_sta(ctrl, bat->p->channel, bat->p->instance, &sta); ++ if (status) ++ return status; ++ ++ if ((le32_to_cpu(sta) & SAM_BATTERY_STA_OK) != SAM_BATTERY_STA_OK) ++ return -ENODEV; ++ ++ status = spwr_battery_update_bix_unlocked(bat); ++ if (status) ++ return status; ++ ++ if (spwr_battery_present(bat)) { ++ u32 cap_warn = get_unaligned_le32(&bat->bix.design_cap_warn); ++ status = spwr_battery_set_alarm_unlocked(bat, cap_warn); ++ if (status) ++ return status; ++ } ++ ++ snprintf(bat->name, ARRAY_SIZE(bat->name), "BAT%d", bat->p->num); ++ bat->psy_desc.name = bat->name; ++ bat->psy_desc.type = POWER_SUPPLY_TYPE_BATTERY; ++ ++ if (get_unaligned_le32(&bat->bix.power_unit) == SAM_BATTERY_POWER_UNIT_MA) { ++ bat->psy_desc.properties = spwr_battery_props_chg; ++ bat->psy_desc.num_properties = ARRAY_SIZE(spwr_battery_props_chg); ++ } else { ++ bat->psy_desc.properties = spwr_battery_props_eng; ++ bat->psy_desc.num_properties = ARRAY_SIZE(spwr_battery_props_eng); ++ } ++ ++ bat->psy_desc.get_property = spwr_battery_get_property; ++ ++ mutex_init(&bat->lock); ++ psy_cfg.drv_data = bat; ++ ++ INIT_DELAYED_WORK(&bat->update_work, spwr_battery_update_bst_workfn); ++ ++ bat->psy = power_supply_register(&bat->pdev->dev, &bat->psy_desc, &psy_cfg); ++ if (IS_ERR(bat->psy)) { ++ status = PTR_ERR(bat->psy); ++ goto err_psy; ++ } ++ ++ bat->notif.base.priority = 1; ++ bat->notif.base.fn = spwr_notify_bat; ++ bat->notif.event.reg = p->registry; ++ bat->notif.event.id.target_category = SSAM_SSH_TC_BAT; ++ bat->notif.event.id.instance = 0; ++ bat->notif.event.flags = SSAM_EVENT_SEQUENCED; ++ ++ status = ssam_notifier_register(ctrl, &bat->notif); ++ if (status) ++ goto err_notif; ++ ++ status = device_create_file(&bat->psy->dev, &alarm_attr); ++ if (status) ++ goto err_file; ++ ++ return 0; ++ ++err_file: ++ ssam_notifier_unregister(ctrl, &bat->notif); ++err_notif: ++ power_supply_unregister(bat->psy); ++err_psy: ++ mutex_destroy(&bat->lock); ++ return status; ++} ++ ++static void spwr_battery_unregister(struct spwr_battery_device *bat) ++{ ++ ssam_notifier_unregister(bat->ctrl, &bat->notif); ++ cancel_delayed_work_sync(&bat->update_work); ++ device_remove_file(&bat->psy->dev, &alarm_attr); ++ power_supply_unregister(bat->psy); ++ mutex_destroy(&bat->lock); ++} ++ ++ ++/* ++ * Battery Driver. ++ */ ++ ++#ifdef CONFIG_PM_SLEEP ++static int surface_sam_sid_battery_resume(struct device *dev) ++{ ++ struct spwr_battery_device *bat; ++ ++ bat = dev_get_drvdata(dev); ++ return spwr_battery_recheck(bat); ++} ++#else ++#define surface_sam_sid_battery_resume NULL ++#endif ++ ++SIMPLE_DEV_PM_OPS(surface_sam_sid_battery_pm, NULL, surface_sam_sid_battery_resume); ++ ++static int surface_sam_sid_battery_probe(struct platform_device *pdev) ++{ ++ struct spwr_battery_device *bat; ++ struct ssam_controller *ctrl; ++ int status; ++ ++ // link to ec ++ status = ssam_client_bind(&pdev->dev, &ctrl); ++ if (status) ++ return status == -ENXIO ? -EPROBE_DEFER : status; ++ ++ bat = devm_kzalloc(&pdev->dev, sizeof(struct spwr_battery_device), GFP_KERNEL); ++ if (!bat) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, bat); ++ return spwr_battery_register(bat, pdev, ctrl, pdev->dev.platform_data); ++} ++ ++static int surface_sam_sid_battery_remove(struct platform_device *pdev) ++{ ++ struct spwr_battery_device *bat; ++ ++ bat = platform_get_drvdata(pdev); ++ spwr_battery_unregister(bat); ++ ++ return 0; ++} ++ ++static struct platform_driver surface_sam_sid_battery = { ++ .probe = surface_sam_sid_battery_probe, ++ .remove = surface_sam_sid_battery_remove, ++ .driver = { ++ .name = "surface_sam_sid_battery", ++ .pm = &surface_sam_sid_battery_pm, ++ .probe_type = PROBE_PREFER_ASYNCHRONOUS, ++ }, ++}; ++ ++ ++/* ++ * AC Driver. ++ */ ++ ++static int surface_sam_sid_ac_probe(struct platform_device *pdev) ++{ ++ struct spwr_ac_device *ac; ++ struct ssam_controller *ctrl; ++ int status; ++ ++ // link to ec ++ status = ssam_client_bind(&pdev->dev, &ctrl); ++ if (status) ++ return status == -ENXIO ? -EPROBE_DEFER : status; ++ ++ ac = devm_kzalloc(&pdev->dev, sizeof(struct spwr_ac_device), GFP_KERNEL); ++ if (!ac) ++ return -ENOMEM; ++ ++ status = spwr_ac_register(ac, pdev, ctrl); ++ if (status) ++ return status; ++ ++ platform_set_drvdata(pdev, ac); ++ return 0; ++} ++ ++static int surface_sam_sid_ac_remove(struct platform_device *pdev) ++{ ++ struct spwr_ac_device *ac; ++ ++ ac = platform_get_drvdata(pdev); ++ return spwr_ac_unregister(ac); ++} ++ ++static struct platform_driver surface_sam_sid_ac = { ++ .probe = surface_sam_sid_ac_probe, ++ .remove = surface_sam_sid_ac_remove, ++ .driver = { ++ .name = "surface_sam_sid_ac", ++ .probe_type = PROBE_PREFER_ASYNCHRONOUS, ++ }, ++}; ++ ++ ++static int __init surface_sam_sid_power_init(void) ++{ ++ int status; ++ ++ status = platform_driver_register(&surface_sam_sid_battery); ++ if (status) ++ return status; ++ ++ status = platform_driver_register(&surface_sam_sid_ac); ++ if (status) { ++ platform_driver_unregister(&surface_sam_sid_battery); ++ return status; ++ } ++ ++ return 0; ++} ++ ++static void __exit surface_sam_sid_power_exit(void) ++{ ++ platform_driver_unregister(&surface_sam_sid_battery); ++ platform_driver_unregister(&surface_sam_sid_ac); ++} ++ ++module_init(surface_sam_sid_power_init); ++module_exit(surface_sam_sid_power_exit); ++ ++MODULE_AUTHOR("Maximilian Luz "); ++MODULE_DESCRIPTION("Surface Battery/AC Driver for 7th Generation Surface Devices"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:surface_sam_sid_ac"); ++MODULE_ALIAS("platform:surface_sam_sid_battery"); +diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_power.h b/drivers/platform/x86/surface_sam/surface_sam_sid_power.h +new file mode 100644 +index 0000000000000..d8d9509b7d122 +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_sid_power.h +@@ -0,0 +1,16 @@ ++ ++#ifndef _SURFACE_SAM_SID_POWER_H ++#define _SURFACE_SAM_SID_POWER_H ++ ++#include ++#include "surface_sam_ssh.h" ++ ++ ++struct ssam_battery_properties { ++ struct ssam_event_registry registry; ++ u8 num; ++ u8 channel; ++ u8 instance; ++}; ++ ++#endif /* _SURFACE_SAM_SID_POWER_H */ +diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c b/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c +new file mode 100644 +index 0000000000000..a6059d6796619 +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c +@@ -0,0 +1,429 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Microsofs Surface HID (VHF) driver for HID input events via SAM. ++ * Used for keyboard input events on the 7th generation Surface Laptops. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "surface_sam_ssh.h" ++#include "surface_sam_sid_vhf.h" ++ ++#define SID_VHF_INPUT_NAME "Microsoft Surface HID" ++ ++#define SAM_EVENT_SID_VHF_TC 0x15 ++ ++#define VHF_HID_STARTED 0 ++ ++struct sid_vhf { ++ struct platform_device *dev; ++ struct ssam_controller *ctrl; ++ const struct ssam_hid_properties *p; ++ ++ struct ssam_event_notifier notif; ++ ++ struct hid_device *hid; ++ unsigned long state; ++}; ++ ++ ++static int sid_vhf_hid_start(struct hid_device *hid) ++{ ++ hid_dbg(hid, "%s\n", __func__); ++ return 0; ++} ++ ++static void sid_vhf_hid_stop(struct hid_device *hid) ++{ ++ hid_dbg(hid, "%s\n", __func__); ++} ++ ++static int sid_vhf_hid_open(struct hid_device *hid) ++{ ++ struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent); ++ ++ hid_dbg(hid, "%s\n", __func__); ++ ++ set_bit(VHF_HID_STARTED, &vhf->state); ++ return 0; ++} ++ ++static void sid_vhf_hid_close(struct hid_device *hid) ++{ ++ ++ struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent); ++ ++ hid_dbg(hid, "%s\n", __func__); ++ ++ clear_bit(VHF_HID_STARTED, &vhf->state); ++} ++ ++struct surface_sam_sid_vhf_meta_rqst { ++ u8 id; ++ u32 offset; ++ u32 length; // buffer limit on send, length of data received on receive ++ u8 end; // 0x01 if end was reached ++} __packed; ++ ++struct vhf_device_metadata_info { ++ u8 len; ++ u8 _2; ++ u8 _3; ++ u8 _4; ++ u8 _5; ++ u8 _6; ++ u8 _7; ++ u16 hid_len; // hid descriptor length ++} __packed; ++ ++struct vhf_device_metadata { ++ u32 len; ++ u16 vendor_id; ++ u16 product_id; ++ u8 _1[24]; ++} __packed; ++ ++union vhf_buffer_data { ++ struct vhf_device_metadata_info info; ++ u8 pld[0x76]; ++ struct vhf_device_metadata meta; ++}; ++ ++struct surface_sam_sid_vhf_meta_resp { ++ struct surface_sam_sid_vhf_meta_rqst rqst; ++ union vhf_buffer_data data; ++} __packed; ++ ++ ++static int vhf_get_metadata(struct ssam_controller *ctrl, u8 iid, ++ struct vhf_device_metadata *meta) ++{ ++ struct surface_sam_sid_vhf_meta_resp data = {}; ++ struct ssam_request rqst; ++ struct ssam_response rsp; ++ int status; ++ ++ data.rqst.id = 2; ++ data.rqst.offset = 0; ++ data.rqst.length = 0x76; ++ data.rqst.end = 0; ++ ++ rqst.target_category = SSAM_SSH_TC_HID;; ++ rqst.command_id = 0x04; ++ rqst.instance_id = iid; ++ rqst.channel = 0x02; ++ rqst.flags = SSAM_REQUEST_HAS_RESPONSE; ++ rqst.length = sizeof(struct surface_sam_sid_vhf_meta_rqst); ++ rqst.payload = (u8 *)&data.rqst; ++ ++ rsp.capacity = sizeof(struct surface_sam_sid_vhf_meta_resp); ++ rsp.length = 0; ++ rsp.pointer = (u8 *)&data; ++ ++ status = ssam_request_sync(ctrl, &rqst, &rsp); ++ if (status) ++ return status; ++ ++ *meta = data.data.meta; ++ ++ return 0; ++} ++ ++static int vhf_get_hid_descriptor(struct hid_device *hid, u8 iid, u8 **desc, int *size) ++{ ++ struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent); ++ struct surface_sam_sid_vhf_meta_resp data = {}; ++ struct ssam_request rqst; ++ struct ssam_response rsp; ++ int status, len; ++ u8 *buf; ++ ++ data.rqst.id = 0; ++ data.rqst.offset = 0; ++ data.rqst.length = 0x76; ++ data.rqst.end = 0; ++ ++ rqst.target_category = SSAM_SSH_TC_HID; ++ rqst.command_id = 0x04; ++ rqst.instance_id = iid; ++ rqst.channel = 0x02; ++ rqst.flags = SSAM_REQUEST_HAS_RESPONSE; ++ rqst.length = sizeof(struct surface_sam_sid_vhf_meta_rqst); ++ rqst.payload = (u8 *)&data.rqst; ++ ++ rsp.capacity = sizeof(struct surface_sam_sid_vhf_meta_resp); ++ rsp.length = 0; ++ rsp.pointer = (u8 *)&data; ++ ++ // first fetch 00 to get the total length ++ status = ssam_request_sync(vhf->ctrl, &rqst, &rsp); ++ if (status) ++ return status; ++ ++ len = data.data.info.hid_len; ++ ++ // allocate a buffer for the descriptor ++ buf = kzalloc(len, GFP_KERNEL); ++ ++ // then, iterate and write into buffer, copying out bytes ++ data.rqst.id = 1; ++ data.rqst.offset = 0; ++ data.rqst.length = 0x76; ++ data.rqst.end = 0; ++ ++ while (!data.rqst.end && data.rqst.offset < len) { ++ status = ssam_request_sync(vhf->ctrl, &rqst, &rsp); ++ if (status) { ++ kfree(buf); ++ return status; ++ } ++ memcpy(buf + data.rqst.offset, data.data.pld, data.rqst.length); ++ ++ data.rqst.offset += data.rqst.length; ++ } ++ ++ *desc = buf; ++ *size = len; ++ ++ return 0; ++} ++ ++static int sid_vhf_hid_parse(struct hid_device *hid) ++{ ++ struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent); ++ int ret = 0, size; ++ u8 *buf; ++ ++ ret = vhf_get_hid_descriptor(hid, vhf->p->instance, &buf, &size); ++ if (ret != 0) { ++ hid_err(hid, "Failed to read HID descriptor from device: %d\n", ret); ++ return -EIO; ++ } ++ hid_dbg(hid, "HID descriptor of device:"); ++ print_hex_dump_debug("descriptor:", DUMP_PREFIX_OFFSET, 16, 1, buf, size, false); ++ ++ ret = hid_parse_report(hid, buf, size); ++ kfree(buf); ++ return ret; ++ ++} ++ ++static int sid_vhf_hid_raw_request(struct hid_device *hid, unsigned char ++ reportnum, u8 *buf, size_t len, unsigned char rtype, int ++ reqtype) ++{ ++ struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent); ++ struct ssam_request rqst; ++ struct ssam_response rsp; ++ int status; ++ u8 cid; ++ ++ hid_dbg(hid, "%s: reportnum=%#04x rtype=%i reqtype=%i\n", __func__, reportnum, rtype, reqtype); ++ print_hex_dump_debug("report:", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false); ++ ++ // Byte 0 is the report number. Report data starts at byte 1. ++ buf[0] = reportnum; ++ ++ switch (rtype) { ++ case HID_OUTPUT_REPORT: ++ cid = 0x01; ++ break; ++ case HID_FEATURE_REPORT: ++ switch (reqtype) { ++ case HID_REQ_GET_REPORT: ++ // The EC doesn't respond to GET FEATURE for these touchpad reports ++ // we immediately discard to avoid waiting for a timeout. ++ if (reportnum == 6 || reportnum == 7 || reportnum == 8 || reportnum == 9 || reportnum == 0x0b) { ++ hid_dbg(hid, "%s: skipping get feature report for 0x%02x\n", __func__, reportnum); ++ return 0; ++ } ++ ++ cid = 0x02; ++ break; ++ case HID_REQ_SET_REPORT: ++ cid = 0x03; ++ break; ++ default: ++ hid_err(hid, "%s: unknown req type 0x%02x\n", __func__, rtype); ++ return -EIO; ++ } ++ break; ++ default: ++ hid_err(hid, "%s: unknown report type 0x%02x\n", __func__, reportnum); ++ return -EIO; ++ } ++ ++ rqst.target_category = SSAM_SSH_TC_HID; ++ rqst.channel = 0x02; ++ rqst.instance_id = vhf->p->instance; ++ rqst.command_id = cid; ++ rqst.flags = reqtype == HID_REQ_GET_REPORT ? SSAM_REQUEST_HAS_RESPONSE : 0; ++ rqst.length = reqtype == HID_REQ_GET_REPORT ? 1 : len; ++ rqst.payload = buf; ++ ++ rsp.capacity = len; ++ rsp.length = 0; ++ rsp.pointer = buf; ++ ++ hid_dbg(hid, "%s: sending to cid=%#04x snc=%#04x\n", __func__, cid, HID_REQ_GET_REPORT == reqtype); ++ ++ status = ssam_request_sync(vhf->ctrl, &rqst, &rsp); ++ hid_dbg(hid, "%s: status %i\n", __func__, status); ++ ++ if (status) ++ return status; ++ ++ if (rsp.length > 0) ++ print_hex_dump_debug("response:", DUMP_PREFIX_OFFSET, 16, 1, rsp.pointer, rsp.length, false); ++ ++ return rsp.length; ++} ++ ++static struct hid_ll_driver sid_vhf_hid_ll_driver = { ++ .start = sid_vhf_hid_start, ++ .stop = sid_vhf_hid_stop, ++ .open = sid_vhf_hid_open, ++ .close = sid_vhf_hid_close, ++ .parse = sid_vhf_hid_parse, ++ .raw_request = sid_vhf_hid_raw_request, ++}; ++ ++ ++static struct hid_device *sid_vhf_create_hid_device(struct platform_device *pdev, struct vhf_device_metadata *meta) ++{ ++ struct hid_device *hid; ++ ++ hid = hid_allocate_device(); ++ if (IS_ERR(hid)) ++ return hid; ++ ++ hid->dev.parent = &pdev->dev; ++ ++ hid->bus = BUS_VIRTUAL; ++ hid->vendor = meta->vendor_id; ++ hid->product = meta->product_id; ++ ++ hid->ll_driver = &sid_vhf_hid_ll_driver; ++ ++ sprintf(hid->name, "%s", SID_VHF_INPUT_NAME); ++ ++ return hid; ++} ++ ++static u32 sid_vhf_event_handler(struct ssam_notifier_block *nb, const struct ssam_event *event) ++{ ++ struct sid_vhf *vhf = container_of(nb, struct sid_vhf, notif.base); ++ int status; ++ ++ if (event->target_category != SSAM_SSH_TC_HID) ++ return 0; ++ ++ if (event->channel != 0x02) ++ return 0; ++ ++ if (event->instance_id != vhf->p->instance) ++ return 0; ++ ++ if (event->command_id != 0x00 && event->command_id != 0x03 && event->command_id != 0x04) ++ return 0; ++ ++ // skip if HID hasn't started yet ++ if (!test_bit(VHF_HID_STARTED, &vhf->state)) ++ return SSAM_NOTIF_HANDLED; ++ ++ status = hid_input_report(vhf->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 0); ++ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED; ++} ++ ++static int surface_sam_sid_vhf_probe(struct platform_device *pdev) ++{ ++ const struct ssam_hid_properties *p = pdev->dev.platform_data; ++ struct ssam_controller *ctrl; ++ struct sid_vhf *vhf; ++ struct vhf_device_metadata meta = {}; ++ struct hid_device *hid; ++ int status; ++ ++ // add device link to EC ++ status = ssam_client_bind(&pdev->dev, &ctrl); ++ if (status) ++ return status == -ENXIO ? -EPROBE_DEFER : status; ++ ++ vhf = kzalloc(sizeof(struct sid_vhf), GFP_KERNEL); ++ if (!vhf) ++ return -ENOMEM; ++ ++ status = vhf_get_metadata(ctrl, p->instance, &meta); ++ if (status) ++ goto err_create_hid; ++ ++ hid = sid_vhf_create_hid_device(pdev, &meta); ++ if (IS_ERR(hid)) { ++ status = PTR_ERR(hid); ++ goto err_create_hid; ++ } ++ ++ vhf->dev = pdev; ++ vhf->ctrl = ctrl; ++ vhf->p = pdev->dev.platform_data; ++ vhf->hid = hid; ++ ++ vhf->notif.base.priority = 1; ++ vhf->notif.base.fn = sid_vhf_event_handler; ++ vhf->notif.event.reg = p->registry; ++ vhf->notif.event.id.target_category = SSAM_SSH_TC_HID; ++ vhf->notif.event.id.instance = p->instance; ++ vhf->notif.event.flags = 0; ++ ++ platform_set_drvdata(pdev, vhf); ++ ++ status = ssam_notifier_register(ctrl, &vhf->notif); ++ if (status) ++ goto err_notif; ++ ++ status = hid_add_device(hid); ++ if (status) ++ goto err_add_hid; ++ ++ return 0; ++ ++err_add_hid: ++ ssam_notifier_unregister(ctrl, &vhf->notif); ++err_notif: ++ hid_destroy_device(hid); ++ platform_set_drvdata(pdev, NULL); ++err_create_hid: ++ kfree(vhf); ++ return status; ++} ++ ++static int surface_sam_sid_vhf_remove(struct platform_device *pdev) ++{ ++ struct sid_vhf *vhf = platform_get_drvdata(pdev); ++ ++ ssam_notifier_unregister(vhf->ctrl, &vhf->notif); ++ hid_destroy_device(vhf->hid); ++ kfree(vhf); ++ ++ platform_set_drvdata(pdev, NULL); ++ return 0; ++} ++ ++static struct platform_driver surface_sam_sid_vhf = { ++ .probe = surface_sam_sid_vhf_probe, ++ .remove = surface_sam_sid_vhf_remove, ++ .driver = { ++ .name = "surface_sam_sid_vhf", ++ .probe_type = PROBE_PREFER_ASYNCHRONOUS, ++ }, ++}; ++module_platform_driver(surface_sam_sid_vhf); ++ ++MODULE_AUTHOR("Blaž Hrastnik "); ++MODULE_DESCRIPTION("Driver for HID devices connected via Surface SAM"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:surface_sam_sid_vhf"); +diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.h b/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.h +new file mode 100644 +index 0000000000000..d956de5cf877a +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.h +@@ -0,0 +1,14 @@ ++ ++#ifndef _SURFACE_SAM_SID_VHF_H ++#define _SURFACE_SAM_SID_VHF_H ++ ++#include ++#include "surface_sam_ssh.h" ++ ++ ++struct ssam_hid_properties { ++ struct ssam_event_registry registry; ++ u8 instance; ++}; ++ ++#endif /* _SURFACE_SAM_SID_VHF_H */ +diff --git a/drivers/platform/x86/surface_sam/surface_sam_ssh.c b/drivers/platform/x86/surface_sam/surface_sam_ssh.c +new file mode 100644 +index 0000000000000..4551b75570f22 +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_ssh.c +@@ -0,0 +1,5329 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Surface Serial Hub (SSH) driver for communication with the Surface/System ++ * Aggregator Module. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "surface_sam_ssh.h" ++ ++#define CREATE_TRACE_POINTS ++#include "surface_sam_ssh_trace.h" ++ ++ ++/* -- Error injection helpers. ---------------------------------------------- */ ++ ++#ifdef CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION ++#define noinline_if_inject noinline ++#else /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */ ++#define noinline_if_inject inline ++#endif /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */ ++ ++ ++/* -- SSH protocol utility functions and definitions. ----------------------- */ ++ ++/* ++ * The number of reserved event IDs, used for registering an SSH event ++ * handler. Valid event IDs are numbers below or equal to this value, with ++ * exception of zero, which is not an event ID. Thus, this is also the ++ * absolute maximum number of event handlers that can be registered. ++ */ ++#define SSH_NUM_EVENTS 34 ++ ++/* ++ * The number of communication channels used in the protocol. ++ */ ++#define SSH_NUM_CHANNELS 2 ++ ++ ++static inline u16 ssh_crc(const u8 *buf, size_t len) ++{ ++ return crc_ccitt_false(0xffff, buf, len); ++} ++ ++static inline u16 ssh_rqid_next_valid(u16 rqid) ++{ ++ return rqid > 0 ? rqid + 1u : rqid + SSH_NUM_EVENTS + 1u; ++} ++ ++static inline u16 ssh_rqid_to_event(u16 rqid) ++{ ++ return rqid - 1u; ++} ++ ++static inline bool ssh_rqid_is_event(u16 rqid) ++{ ++ return ssh_rqid_to_event(rqid) < SSH_NUM_EVENTS; ++} ++ ++static inline int ssh_tc_to_rqid(u8 tc) ++{ ++ return tc; ++} ++ ++static inline u8 ssh_channel_to_index(u8 channel) ++{ ++ return channel - 1u; ++} ++ ++static inline bool ssh_channel_is_valid(u8 channel) ++{ ++ return ssh_channel_to_index(channel) < SSH_NUM_CHANNELS; ++} ++ ++ ++/* -- Safe counters. -------------------------------------------------------- */ ++ ++struct ssh_seq_counter { ++ u8 value; ++}; ++ ++struct ssh_rqid_counter { ++ u16 value; ++}; ++ ++static inline void ssh_seq_reset(struct ssh_seq_counter *c) ++{ ++ WRITE_ONCE(c->value, 0); ++} ++ ++static inline u8 ssh_seq_next(struct ssh_seq_counter *c) ++{ ++ u8 old = READ_ONCE(c->value); ++ u8 new = old + 1; ++ u8 ret; ++ ++ while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) { ++ old = ret; ++ new = old + 1; ++ } ++ ++ return old; ++} ++ ++static inline void ssh_rqid_reset(struct ssh_rqid_counter *c) ++{ ++ WRITE_ONCE(c->value, 0); ++} ++ ++static inline u16 ssh_rqid_next(struct ssh_rqid_counter *c) ++{ ++ u16 old = READ_ONCE(c->value); ++ u16 new = ssh_rqid_next_valid(old); ++ u16 ret; ++ ++ while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) { ++ old = ret; ++ new = ssh_rqid_next_valid(old); ++ } ++ ++ return old; ++} ++ ++ ++/* -- Builder functions for SAM-over-SSH messages. -------------------------- */ ++ ++struct msgbuf { ++ u8 *begin; ++ u8 *end; ++ u8 *ptr; ++}; ++ ++static inline void msgb_init(struct msgbuf *msgb, u8 *ptr, size_t cap) ++{ ++ msgb->begin = ptr; ++ msgb->end = ptr + cap; ++ msgb->ptr = ptr; ++} ++ ++static inline size_t msgb_bytes_used(const struct msgbuf *msgb) ++{ ++ return msgb->ptr - msgb->begin; ++} ++ ++static inline void msgb_push_u16(struct msgbuf *msgb, u16 value) ++{ ++ if (WARN_ON(msgb->ptr + sizeof(u16) > msgb->end)) ++ return; ++ ++ put_unaligned_le16(value, msgb->ptr); ++ msgb->ptr += sizeof(u16); ++} ++ ++static inline void msgb_push_syn(struct msgbuf *msgb) ++{ ++ msgb_push_u16(msgb, SSH_MSG_SYN); ++} ++ ++static inline void msgb_push_buf(struct msgbuf *msgb, const u8 *buf, size_t len) ++{ ++ msgb->ptr = memcpy(msgb->ptr, buf, len) + len; ++} ++ ++static inline void msgb_push_crc(struct msgbuf *msgb, const u8 *buf, size_t len) ++{ ++ msgb_push_u16(msgb, ssh_crc(buf, len)); ++} ++ ++static inline void msgb_push_frame(struct msgbuf *msgb, u8 ty, u16 len, u8 seq) ++{ ++ struct ssh_frame *frame = (struct ssh_frame *)msgb->ptr; ++ const u8 *const begin = msgb->ptr; ++ ++ if (WARN_ON(msgb->ptr + sizeof(*frame) > msgb->end)) ++ return; ++ ++ frame->type = ty; ++ put_unaligned_le16(len, &frame->len); ++ frame->seq = seq; ++ ++ msgb->ptr += sizeof(*frame); ++ msgb_push_crc(msgb, begin, msgb->ptr - begin); ++} ++ ++static inline void msgb_push_ack(struct msgbuf *msgb, u8 seq) ++{ ++ // SYN ++ msgb_push_syn(msgb); ++ ++ // ACK-type frame + CRC ++ msgb_push_frame(msgb, SSH_FRAME_TYPE_ACK, 0x00, seq); ++ ++ // payload CRC (ACK-type frames do not have a payload) ++ msgb_push_crc(msgb, msgb->ptr, 0); ++} ++ ++static inline void msgb_push_nak(struct msgbuf *msgb) ++{ ++ // SYN ++ msgb_push_syn(msgb); ++ ++ // NAK-type frame + CRC ++ msgb_push_frame(msgb, SSH_FRAME_TYPE_NAK, 0x00, 0x00); ++ ++ // payload CRC (ACK-type frames do not have a payload) ++ msgb_push_crc(msgb, msgb->ptr, 0); ++} ++ ++static inline void msgb_push_cmd(struct msgbuf *msgb, u8 seq, u16 rqid, ++ const struct ssam_request *rqst) ++{ ++ struct ssh_command *cmd; ++ const u8 *cmd_begin; ++ const u8 type = SSH_FRAME_TYPE_DATA_SEQ; ++ ++ // SYN ++ msgb_push_syn(msgb); ++ ++ // command frame + crc ++ msgb_push_frame(msgb, type, sizeof(*cmd) + rqst->length, seq); ++ ++ // frame payload: command struct + payload ++ if (WARN_ON(msgb->ptr + sizeof(*cmd) > msgb->end)) ++ return; ++ ++ cmd_begin = msgb->ptr; ++ cmd = (struct ssh_command *)msgb->ptr; ++ ++ cmd->type = SSH_PLD_TYPE_CMD; ++ cmd->tc = rqst->target_category; ++ cmd->chn_out = rqst->channel; ++ cmd->chn_in = 0x00; ++ cmd->iid = rqst->instance_id; ++ put_unaligned_le16(rqid, &cmd->rqid); ++ cmd->cid = rqst->command_id; ++ ++ msgb->ptr += sizeof(*cmd); ++ ++ // command payload ++ msgb_push_buf(msgb, rqst->payload, rqst->length); ++ ++ // crc for command struct + payload ++ msgb_push_crc(msgb, cmd_begin, msgb->ptr - cmd_begin); ++} ++ ++ ++/* -- Parser functions and utilities for SAM-over-SSH messages. ------------- */ ++ ++struct sshp_buf { ++ u8 *ptr; ++ size_t len; ++ size_t cap; ++}; ++ ++ ++static inline bool sshp_validate_crc(const struct ssam_span *src, const u8 *crc) ++{ ++ u16 actual = ssh_crc(src->ptr, src->len); ++ u16 expected = get_unaligned_le16(crc); ++ ++ return actual == expected; ++} ++ ++static bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem) ++{ ++ size_t i; ++ ++ for (i = 0; i < src->len - 1; i++) { ++ if (likely(get_unaligned_le16(src->ptr + i) == SSH_MSG_SYN)) { ++ rem->ptr = src->ptr + i; ++ rem->len = src->len - i; ++ return true; ++ } ++ } ++ ++ if (unlikely(src->ptr[src->len - 1] == (SSH_MSG_SYN & 0xff))) { ++ rem->ptr = src->ptr + src->len - 1; ++ rem->len = 1; ++ return false; ++ } else { ++ rem->ptr = src->ptr + src->len; ++ rem->len = 0; ++ return false; ++ } ++} ++ ++static bool sshp_starts_with_syn(const struct ssam_span *src) ++{ ++ return src->len >= 2 && get_unaligned_le16(src->ptr) == SSH_MSG_SYN; ++} ++ ++static int sshp_parse_frame(const struct device *dev, ++ const struct ssam_span *source, ++ struct ssh_frame **frame, ++ struct ssam_span *payload, ++ size_t maxlen) ++{ ++ struct ssam_span sf; ++ struct ssam_span sp; ++ ++ // initialize output ++ *frame = NULL; ++ payload->ptr = NULL; ++ payload->len = 0; ++ ++ if (!sshp_starts_with_syn(source)) { ++ dev_warn(dev, "rx: parser: invalid start of frame\n"); ++ return -ENOMSG; ++ } ++ ++ // check for minumum packet length ++ if (unlikely(source->len < SSH_MESSAGE_LENGTH(0))) { ++ dev_dbg(dev, "rx: parser: not enough data for frame\n"); ++ return 0; ++ } ++ ++ // pin down frame ++ sf.ptr = source->ptr + sizeof(u16); ++ sf.len = sizeof(struct ssh_frame); ++ ++ // validate frame CRC ++ if (unlikely(!sshp_validate_crc(&sf, sf.ptr + sf.len))) { ++ dev_warn(dev, "rx: parser: invalid frame CRC\n"); ++ return -EBADMSG; ++ } ++ ++ // ensure packet does not exceed maximum length ++ if (unlikely(((struct ssh_frame *)sf.ptr)->len > maxlen)) { ++ dev_warn(dev, "rx: parser: frame too large: %u bytes\n", ++ ((struct ssh_frame *)sf.ptr)->len); ++ return -EMSGSIZE; ++ } ++ ++ // pin down payload ++ sp.ptr = sf.ptr + sf.len + sizeof(u16); ++ sp.len = get_unaligned_le16(&((struct ssh_frame *)sf.ptr)->len); ++ ++ // check for frame + payload length ++ if (source->len < SSH_MESSAGE_LENGTH(sp.len)) { ++ dev_dbg(dev, "rx: parser: not enough data for payload\n"); ++ return 0; ++ } ++ ++ // validate payload crc ++ if (unlikely(!sshp_validate_crc(&sp, sp.ptr + sp.len))) { ++ dev_warn(dev, "rx: parser: invalid payload CRC\n"); ++ return -EBADMSG; ++ } ++ ++ *frame = (struct ssh_frame *)sf.ptr; ++ *payload = sp; ++ ++ dev_dbg(dev, "rx: parser: valid frame found (type: 0x%02x, len: %u)\n", ++ (*frame)->type, (*frame)->len); ++ ++ return 0; ++} ++ ++static int sshp_parse_command(const struct device *dev, ++ const struct ssam_span *source, ++ struct ssh_command **command, ++ struct ssam_span *command_data) ++{ ++ // check for minimum length ++ if (unlikely(source->len < sizeof(struct ssh_command))) { ++ *command = NULL; ++ command_data->ptr = NULL; ++ command_data->len = 0; ++ ++ dev_err(dev, "rx: parser: command payload is too short\n"); ++ return -ENOMSG; ++ } ++ ++ *command = (struct ssh_command *)source->ptr; ++ command_data->ptr = source->ptr + sizeof(struct ssh_command); ++ command_data->len = source->len - sizeof(struct ssh_command); ++ ++ dev_dbg(dev, "rx: parser: valid command found (tc: 0x%02x," ++ " cid: 0x%02x)\n", (*command)->tc, (*command)->cid); ++ ++ return 0; ++} ++ ++ ++static inline void sshp_buf_init(struct sshp_buf *buf, u8 *ptr, size_t cap) ++{ ++ buf->ptr = ptr; ++ buf->len = 0; ++ buf->cap = cap; ++} ++ ++static inline int sshp_buf_alloc(struct sshp_buf *buf, size_t cap, gfp_t flags) ++{ ++ u8 *ptr; ++ ++ ptr = kzalloc(cap, flags); ++ if (!ptr) ++ return -ENOMEM; ++ ++ sshp_buf_init(buf, ptr, cap); ++ return 0; ++ ++} ++ ++static inline void sshp_buf_free(struct sshp_buf *buf) ++{ ++ kfree(buf->ptr); ++ buf->ptr = NULL; ++ buf->len = 0; ++ buf->cap = 0; ++} ++ ++static inline void sshp_buf_drop(struct sshp_buf *buf, size_t n) ++{ ++ memmove(buf->ptr, buf->ptr + n, buf->len - n); ++ buf->len -= n; ++} ++ ++static inline size_t sshp_buf_read_from_fifo(struct sshp_buf *buf, ++ struct kfifo *fifo) ++{ ++ size_t n; ++ ++ n = kfifo_out(fifo, buf->ptr + buf->len, buf->cap - buf->len); ++ buf->len += n; ++ ++ return n; ++} ++ ++static inline void sshp_buf_span_from(struct sshp_buf *buf, size_t offset, ++ struct ssam_span *span) ++{ ++ span->ptr = buf->ptr + offset; ++ span->len = buf->len - offset; ++} ++ ++ ++/* -- Packet transport layer (ptl). ----------------------------------------- */ ++/* ++ * To simplify reasoning about the code below, we define a few concepts. The ++ * system below is similar to a state-machine for packets, however, there are ++ * too many states to explicitly write them down. To (somewhat) manage the ++ * states and packets we rely on flags, reference counting, and some simple ++ * concepts. State transitions are triggered by actions. ++ * ++ * >> Actions << ++ * ++ * - submit ++ * - transmission start (process next item in queue) ++ * - transmission finished (guaranteed to never be parallel to transmission ++ * start) ++ * - ACK received ++ * - NAK received (this is equivalent to issuing re-submit for all pending ++ * packets) ++ * - timeout (this is equivalent to re-issuing a submit or canceling) ++ * - cancel (non-pending and pending) ++ * ++ * >> Data Structures, Packet Ownership, General Overview << ++ * ++ * The code below employs two main data structures: The packet queue, containing ++ * all packets scheduled for transmission, and the set of pending packets, ++ * containing all packets awaiting an ACK. ++ * ++ * Shared ownership of a packet is controlled via reference counting. Inside the ++ * transmission system are a total of five packet owners: ++ * ++ * - the packet queue, ++ * - the pending set, ++ * - the transmitter thread, ++ * - the receiver thread (via ACKing), and ++ * - the timeout work item. ++ * ++ * Normal operation is as follows: The initial reference of the packet is ++ * obtained by submitting the packet and queueing it. The receiver thread ++ * takes packets from the queue. By doing this, it does not increment the ++ * refcount but takes over the reference (removing it from the queue). ++ * If the packet is sequenced (i.e. needs to be ACKed by the client), the ++ * transmitter thread sets-up the timeout and adds the packet to the pending set ++ * before starting to transmit it. As the timeout is handled by a reaper task, ++ * no additional reference for it is needed. After the transmit is done, the ++ * reference hold by the transmitter thread is dropped. If the packet is ++ * unsequenced (i.e. does not need an ACK), the packet is completed by the ++ * transmitter thread before dropping that reference. ++ * ++ * On receial of an ACK, the receiver thread removes and obtains the refernce to ++ * the packet from the pending set. On succes, the receiver thread will then ++ * complete the packet and drop its reference. ++ * ++ * On error, the completion callback is immediately run by on thread on which ++ * the error was detected. ++ * ++ * To ensure that a packet eventually leaves the system it is marked as "locked" ++ * directly before it is going to be completed or when it is canceled. Marking a ++ * packet as "locked" has the effect that passing and creating new references ++ * of the packet will be blocked. This means that the packet cannot be added ++ * to the queue, the pending set, and the timeout, or be picked up by the ++ * transmitter thread or receiver thread. To remove a packet from the system it ++ * has to be marked as locked and subsequently all references from the data ++ * structures (queue, pending) have to be removed. References held by threads ++ * will eventually be dropped automatically as their execution progresses. ++ * ++ * Note that the packet completion callback is, in case of success and for a ++ * sequenced packet, guaranteed to run on the receiver thread, thus providing a ++ * way to reliably identify responses to the packet. The packet completion ++ * callback is only run once and it does not indicate that the packet has fully ++ * left the system. In case of re-submission (and with somewhat unlikely ++ * timing), it may be possible that the packet is being re-transmitted while the ++ * completion callback runs. Completion will occur both on success and internal ++ * error, as well as when the packet is canceled. ++ * ++ * >> Flags << ++ * ++ * Flags are used to indicate the state and progression of a packet. Some flags ++ * have stricter guarantees than other: ++ * ++ * - locked ++ * Indicates if the packet is locked. If the packet is locked, passing and/or ++ * creating additional references to the packet is forbidden. The packet thus ++ * may not be queued, dequeued, or removed or added to the pending set. Note ++ * that the packet state flags may still change (e.g. it may be marked as ++ * ACKed, transmitted, ...). ++ * ++ * - completed ++ * Indicates if the packet completion has been run or is about to be run. This ++ * flag is used to ensure that the packet completion callback is only run ++ * once. ++ * ++ * - queued ++ * Indicates if a packet is present in the submission queue or not. This flag ++ * must only be modified with the queue lock held, and must be coherent ++ * presence of the packet in the queue. ++ * ++ * - pending ++ * Indicates if a packet is present in the set of pending packets or not. ++ * This flag must only be modified with the pending lock held, and must be ++ * coherent presence of the packet in the pending set. ++ * ++ * - transmitting ++ * Indicates if the packet is currently transmitting. In case of ++ * re-transmissions, it is only safe to wait on the "transmitted" completion ++ * after this flag has been set. The completion will be set both in success ++ * and error case. ++ * ++ * - transmitted ++ * Indicates if the packet has been transmitted. This flag is not cleared by ++ * the system, thus it indicates the first transmission only. ++ * ++ * - acked ++ * Indicates if the packet has been acknowledged by the client. There are no ++ * other guarantees given. For example, the packet may still be canceled ++ * and/or the completion may be triggered an error even though this bit is ++ * set. Rely on the status provided by completion instead. ++ * ++ * - canceled ++ * Indicates if the packet has been canceled from the outside. There are no ++ * other guarantees given. Specifically, the packet may be completed by ++ * another part of the system before the cancellation attempts to complete it. ++ * ++ * >> General Notes << ++ * ++ * To avoid deadlocks, if both queue and pending locks are required, the pending ++ * lock must be acquired before the queue lock. ++ */ ++ ++/** ++ * Maximum number transmission attempts per sequenced packet in case of ++ * time-outs. Must be smaller than 16. ++ */ ++#define SSH_PTL_MAX_PACKET_TRIES 3 ++ ++/** ++ * Timeout as ktime_t delta for ACKs. If we have not received an ACK in this ++ * time-frame after starting transmission, the packet will be re-submitted. ++ */ ++#define SSH_PTL_PACKET_TIMEOUT ms_to_ktime(1000) ++ ++/** ++ * Maximum time resolution for timeouts. Currently set to max(2 jiffies, 50ms). ++ * Should be larger than one jiffy to avoid direct re-scheduling of reaper ++ * work_struct. ++ */ ++#define SSH_PTL_PACKET_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50)) ++ ++/** ++ * Maximum number of sequenced packets concurrently waiting for an ACK. ++ * Packets marked as blocking will not be transmitted while this limit is ++ * reached. ++ */ ++#define SSH_PTL_MAX_PENDING 1 ++ ++#define SSH_PTL_RX_BUF_LEN 4096 ++ ++#define SSH_PTL_RX_FIFO_LEN 4096 ++ ++ ++enum ssh_ptl_state_flags { ++ SSH_PTL_SF_SHUTDOWN_BIT, ++}; ++ ++struct ssh_ptl_ops { ++ void (*data_received)(struct ssh_ptl *p, const struct ssam_span *data); ++}; ++ ++struct ssh_ptl { ++ struct serdev_device *serdev; ++ unsigned long state; ++ ++ struct { ++ spinlock_t lock; ++ struct list_head head; ++ } queue; ++ ++ struct { ++ spinlock_t lock; ++ struct list_head head; ++ atomic_t count; ++ } pending; ++ ++ struct { ++ bool thread_signal; ++ struct task_struct *thread; ++ struct wait_queue_head thread_wq; ++ struct wait_queue_head packet_wq; ++ struct ssh_packet *packet; ++ size_t offset; ++ } tx; ++ ++ struct { ++ struct task_struct *thread; ++ struct wait_queue_head wq; ++ struct kfifo fifo; ++ struct sshp_buf buf; ++ ++ struct { ++ u16 seqs[8]; ++ u16 offset; ++ } blocked; ++ } rx; ++ ++ struct { ++ ktime_t timeout; ++ ktime_t expires; ++ struct delayed_work reaper; ++ } rtx_timeout; ++ ++ struct ssh_ptl_ops ops; ++}; ++ ++ ++#define __ssam_prcond(func, p, fmt, ...) \ ++ do { \ ++ if ((p)) \ ++ func((p), fmt, ##__VA_ARGS__); \ ++ } while (0); ++ ++#define ptl_dbg(p, fmt, ...) dev_dbg(&(p)->serdev->dev, fmt, ##__VA_ARGS__) ++#define ptl_info(p, fmt, ...) dev_info(&(p)->serdev->dev, fmt, ##__VA_ARGS__) ++#define ptl_warn(p, fmt, ...) dev_warn(&(p)->serdev->dev, fmt, ##__VA_ARGS__) ++#define ptl_err(p, fmt, ...) dev_err(&(p)->serdev->dev, fmt, ##__VA_ARGS__) ++#define ptl_dbg_cond(p, fmt, ...) __ssam_prcond(ptl_dbg, p, fmt, ##__VA_ARGS__) ++ ++#define to_ssh_packet(ptr, member) \ ++ container_of(ptr, struct ssh_packet, member) ++ ++#define to_ssh_ptl(ptr, member) \ ++ container_of(ptr, struct ssh_ptl, member) ++ ++ ++#ifdef CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION ++ ++/** ++ * ssh_ptl_should_drop_ack_packet - error injection hook to drop ACK packets ++ * ++ * Useful to test detection and handling of automated re-transmits by the EC. ++ * Specifically of packets that the EC consideres not-ACKed but the driver ++ * already consideres ACKed (due to dropped ACK). In this case, the EC ++ * re-transmits the packet-to-be-ACKed and the driver should detect it as ++ * duplicate/already handled. Note that the driver should still send an ACK ++ * for the re-transmitted packet. ++ */ ++static noinline bool ssh_ptl_should_drop_ack_packet(void) ++{ ++ return false; ++} ++ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_ack_packet, TRUE); ++ ++/** ++ * ssh_ptl_should_drop_nak_packet - error injection hook to drop NAK packets ++ * ++ * Useful to test/force automated (timeout-based) re-transmit by the EC. ++ * Specifically, packets that have not reached the driver completely/with valid ++ * checksums. Only useful in combination with receival of (injected) bad data. ++ */ ++static noinline bool ssh_ptl_should_drop_nak_packet(void) ++{ ++ return false; ++} ++ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_nak_packet, TRUE); ++ ++/** ++ * ssh_ptl_should_drop_dsq_packet - error injection hook to drop sequenced data ++ * packet ++ * ++ * Useful to test re-transmit timeout of the driver. If the data packet has not ++ * been ACKed after a certain time, the driver should re-transmit the packet up ++ * to limited number of times defined in SSH_PTL_MAX_PACKET_TRIES. ++ */ ++static noinline bool ssh_ptl_should_drop_dsq_packet(void) ++{ ++ return false; ++} ++ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_dsq_packet, TRUE); ++ ++/** ++ * ssh_ptl_should_fail_write - error injection hook to make serdev_device_write ++ * fail ++ * ++ * Hook to simulate errors in serdev_device_write when transmitting packets. ++ */ ++static noinline int ssh_ptl_should_fail_write(void) ++{ ++ return 0; ++} ++ALLOW_ERROR_INJECTION(ssh_ptl_should_fail_write, ERRNO); ++ ++/** ++ * ssh_ptl_should_corrupt_tx_data - error injection hook to simualte invalid ++ * data being sent to the EC ++ * ++ * Hook to simulate corrupt/invalid data being sent from host (driver) to EC. ++ * Causes the packet data to be actively corrupted by overwriting it with ++ * pre-defined values, such that it becomes invalid, causing the EC to respond ++ * with a NAK packet. Useful to test handling of NAK packets received by the ++ * driver. ++ */ ++static noinline bool ssh_ptl_should_corrupt_tx_data(void) ++{ ++ return false; ++} ++ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_tx_data, TRUE); ++ ++/** ++ * ssh_ptl_should_corrupt_rx_syn - error injection hook to simulate invalid ++ * data being sent by the EC ++ * ++ * Hook to simulate invalid SYN bytes, i.e. an invalid start of messages and ++ * test handling thereof in the driver. ++ */ ++static noinline bool ssh_ptl_should_corrupt_rx_syn(void) ++{ ++ return false; ++} ++ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_syn, TRUE); ++ ++/** ++ * ssh_ptl_should_corrupt_rx_data - error injection hook to simulate invalid ++ * data being sent by the EC ++ * ++ * Hook to simulate invalid data/checksum of the message frame and test handling ++ * thereof in the driver. ++ */ ++static noinline bool ssh_ptl_should_corrupt_rx_data(void) ++{ ++ return false; ++} ++ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_data, TRUE); ++ ++ ++static inline bool __ssh_ptl_should_drop_ack_packet(struct ssh_packet *packet) ++{ ++ if (likely(!ssh_ptl_should_drop_ack_packet())) ++ return false; ++ ++ trace_ssam_ei_tx_drop_ack_packet(packet); ++ ptl_info(packet->ptl, "packet error injection: dropping ACK packet %p\n", ++ packet); ++ ++ return true; ++} ++ ++static inline bool __ssh_ptl_should_drop_nak_packet(struct ssh_packet *packet) ++{ ++ if (likely(!ssh_ptl_should_drop_nak_packet())) ++ return false; ++ ++ trace_ssam_ei_tx_drop_nak_packet(packet); ++ ptl_info(packet->ptl, "packet error injection: dropping NAK packet %p\n", ++ packet); ++ ++ return true; ++} ++ ++static inline bool __ssh_ptl_should_drop_dsq_packet(struct ssh_packet *packet) ++{ ++ if (likely(!ssh_ptl_should_drop_dsq_packet())) ++ return false; ++ ++ trace_ssam_ei_tx_drop_dsq_packet(packet); ++ ptl_info(packet->ptl, ++ "packet error injection: dropping sequenced data packet %p\n", ++ packet); ++ ++ return true; ++} ++ ++static bool ssh_ptl_should_drop_packet(struct ssh_packet *packet) ++{ ++ // ignore packets that don't carry any data (i.e. flush) ++ if (!packet->data.ptr || !packet->data.len) ++ return false; ++ ++ switch (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)]) { ++ case SSH_FRAME_TYPE_ACK: ++ return __ssh_ptl_should_drop_ack_packet(packet); ++ ++ case SSH_FRAME_TYPE_NAK: ++ return __ssh_ptl_should_drop_nak_packet(packet); ++ ++ case SSH_FRAME_TYPE_DATA_SEQ: ++ return __ssh_ptl_should_drop_dsq_packet(packet); ++ ++ default: ++ return false; ++ } ++} ++ ++static int ssh_ptl_write_buf(struct ssh_ptl *ptl, struct ssh_packet *packet, ++ const unsigned char *buf, size_t count) ++{ ++ int status; ++ ++ status = ssh_ptl_should_fail_write(); ++ if (unlikely(status)) { ++ trace_ssam_ei_tx_fail_write(packet, status); ++ ptl_info(packet->ptl, ++ "packet error injection: simulating transmit error %d, packet %p\n", ++ status, packet); ++ ++ return status; ++ } ++ ++ return serdev_device_write_buf(ptl->serdev, buf, count); ++} ++ ++static void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet) ++{ ++ // ignore packets that don't carry any data (i.e. flush) ++ if (!packet->data.ptr || !packet->data.len) ++ return; ++ ++ // only allow sequenced data packets to be modified ++ if (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)] != SSH_FRAME_TYPE_DATA_SEQ) ++ return; ++ ++ if (likely(!ssh_ptl_should_corrupt_tx_data())) ++ return; ++ ++ trace_ssam_ei_tx_corrupt_data(packet); ++ ptl_info(packet->ptl, ++ "packet error injection: simulating invalid transmit data on packet %p\n", ++ packet); ++ ++ /* ++ * NB: The value 0xb3 has been chosen more or less randomly so that it ++ * doesn't have any (major) overlap with the SYN bytes (aa 55) and is ++ * non-trivial (i.e. non-zero, non-0xff). ++ */ ++ memset(packet->data.ptr, 0xb3, packet->data.len); ++} ++ ++static void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl, ++ struct ssam_span *data) ++{ ++ struct ssam_span frame; ++ ++ // check if there actually is something to corrupt ++ if (!sshp_find_syn(data, &frame)) ++ return; ++ ++ if (likely(!ssh_ptl_should_corrupt_rx_syn())) ++ return; ++ ++ trace_ssam_ei_rx_corrupt_syn("data_length", data->len); ++ ++ data->ptr[1] = 0xb3; // set second byte of SYN to "random" value ++} ++ ++static void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl, ++ struct ssam_span *frame) ++{ ++ size_t payload_len, message_len; ++ struct ssh_frame *sshf; ++ ++ // ignore incomplete messages, will get handled once it's complete ++ if (frame->len < SSH_MESSAGE_LENGTH(0)) ++ return; ++ ++ // ignore incomplete messages, part 2 ++ payload_len = get_unaligned_le16(&frame->ptr[SSH_MSGOFFSET_FRAME(len)]); ++ message_len = SSH_MESSAGE_LENGTH(payload_len); ++ if (frame->len < message_len) ++ return; ++ ++ if (likely(!ssh_ptl_should_corrupt_rx_data())) ++ return; ++ ++ sshf = (struct ssh_frame *)&frame->ptr[SSH_MSGOFFSET_FRAME(type)]; ++ trace_ssam_ei_rx_corrupt_data(sshf); ++ ++ /* ++ * Flip bits in first byte of payload checksum. This is basically ++ * equivalent to a payload/frame data error without us having to worry ++ * about (the, arguably pretty small, probability of) accidental ++ * checksum collisions. ++ */ ++ frame->ptr[frame->len - 2] = ~frame->ptr[frame->len - 2]; ++} ++ ++#else /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */ ++ ++static inline bool ssh_ptl_should_drop_packet(struct ssh_packet *packet) ++{ ++ return false; ++} ++ ++static inline int ssh_ptl_write_buf(struct ssh_ptl *ptl, ++ struct ssh_packet *packet, ++ const unsigned char *buf, ++ size_t count) ++{ ++ return serdev_device_write_buf(ptl->serdev, buf, count); ++} ++ ++static inline void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet) ++{ ++} ++ ++static inline void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl, ++ struct ssam_span *data) ++{ ++} ++ ++static inline void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl, ++ struct ssam_span *frame) ++{ ++} ++ ++#endif /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */ ++ ++ ++static void __ssh_ptl_packet_release(struct kref *kref) ++{ ++ struct ssh_packet *p = to_ssh_packet(kref, refcnt); ++ ++ trace_ssam_packet_release(p); ++ ++ ptl_dbg_cond(p->ptl, "ptl: releasing packet %p\n", p); ++ p->ops->release(p); ++} ++ ++void ssh_packet_get(struct ssh_packet *packet) ++{ ++ kref_get(&packet->refcnt); ++} ++EXPORT_SYMBOL_GPL(ssh_packet_get); ++ ++void ssh_packet_put(struct ssh_packet *packet) ++{ ++ kref_put(&packet->refcnt, __ssh_ptl_packet_release); ++} ++EXPORT_SYMBOL_GPL(ssh_packet_put); ++ ++static inline u8 ssh_packet_get_seq(struct ssh_packet *packet) ++{ ++ return packet->data.ptr[SSH_MSGOFFSET_FRAME(seq)]; ++} ++ ++ ++struct ssh_packet_args { ++ unsigned long type; ++ u8 priority; ++ const struct ssh_packet_ops *ops; ++}; ++ ++static void ssh_packet_init(struct ssh_packet *packet, ++ const struct ssh_packet_args *args) ++{ ++ kref_init(&packet->refcnt); ++ ++ packet->ptl = NULL; ++ INIT_LIST_HEAD(&packet->queue_node); ++ INIT_LIST_HEAD(&packet->pending_node); ++ ++ packet->state = args->type & SSH_PACKET_FLAGS_TY_MASK; ++ packet->priority = args->priority; ++ packet->timestamp = KTIME_MAX; ++ ++ packet->data.ptr = NULL; ++ packet->data.len = 0; ++ ++ packet->ops = args->ops; ++} ++ ++ ++static struct kmem_cache *ssh_ctrl_packet_cache; ++ ++static int __init ssh_ctrl_packet_cache_init(void) ++{ ++ const unsigned int size = sizeof(struct ssh_packet) + SSH_MSG_LEN_CTRL; ++ const unsigned int align = __alignof__(struct ssh_packet); ++ struct kmem_cache *cache; ++ ++ cache = kmem_cache_create("ssam_ctrl_packet", size, align, 0, NULL); ++ if (!cache) ++ return -ENOMEM; ++ ++ ssh_ctrl_packet_cache = cache; ++ return 0; ++} ++ ++static void __exit ssh_ctrl_packet_cache_destroy(void) ++{ ++ kmem_cache_destroy(ssh_ctrl_packet_cache); ++ ssh_ctrl_packet_cache = NULL; ++} ++ ++static int ssh_ctrl_packet_alloc(struct ssh_packet **packet, ++ struct ssam_span *buffer, gfp_t flags) ++{ ++ *packet = kmem_cache_alloc(ssh_ctrl_packet_cache, flags); ++ if (!*packet) ++ return -ENOMEM; ++ ++ buffer->ptr = (u8 *)(*packet + 1); ++ buffer->len = SSH_MSG_LEN_CTRL; ++ ++ trace_ssam_ctrl_packet_alloc(*packet, buffer->len); ++ return 0; ++} ++ ++static void ssh_ctrl_packet_free(struct ssh_packet *p) ++{ ++ trace_ssam_ctrl_packet_free(p); ++ kmem_cache_free(ssh_ctrl_packet_cache, p); ++} ++ ++static const struct ssh_packet_ops ssh_ptl_ctrl_packet_ops = { ++ .complete = NULL, ++ .release = ssh_ctrl_packet_free, ++}; ++ ++ ++static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl *ptl, ktime_t now, ++ ktime_t expires) ++{ ++ unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now)); ++ ktime_t aexp = ktime_add(expires, SSH_PTL_PACKET_TIMEOUT_RESOLUTION); ++ ktime_t old; ++ ++ // re-adjust / schedule reaper if it is above resolution delta ++ old = READ_ONCE(ptl->rtx_timeout.expires); ++ while (ktime_before(aexp, old)) ++ old = cmpxchg64(&ptl->rtx_timeout.expires, old, expires); ++ ++ // if we updated the reaper expiration, modify work timeout ++ if (old == expires) ++ mod_delayed_work(system_wq, &ptl->rtx_timeout.reaper, delta); ++} ++ ++static void ssh_ptl_timeout_start(struct ssh_packet *packet) ++{ ++ struct ssh_ptl *ptl = packet->ptl; ++ ktime_t timestamp = ktime_get_coarse_boottime(); ++ ktime_t timeout = ptl->rtx_timeout.timeout; ++ ++ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)) ++ return; ++ ++ WRITE_ONCE(packet->timestamp, timestamp); ++ smp_mb__after_atomic(); ++ ++ ssh_ptl_timeout_reaper_mod(packet->ptl, timestamp, timestamp + timeout); ++} ++ ++ ++static struct list_head *__ssh_ptl_queue_find_entrypoint(struct ssh_packet *p) ++{ ++ struct list_head *head; ++ u8 priority = READ_ONCE(p->priority); ++ ++ /* ++ * We generally assume that there are less control (ACK/NAK) packets and ++ * re-submitted data packets as there are normal data packets (at least ++ * in situations in which many packets are queued; if there aren't many ++ * packets queued the decision on how to iterate should be basically ++ * irrellevant; the number of control/data packets is more or less ++ * limited via the maximum number of pending packets). Thus, when ++ * inserting a control or re-submitted data packet, (determined by their ++ * priority), we search from front to back. Normal data packets are, ++ * usually queued directly at the tail of the queue, so for those search ++ * from back to front. ++ */ ++ ++ if (priority > SSH_PACKET_PRIORITY_DATA) { ++ list_for_each(head, &p->ptl->queue.head) { ++ p = list_entry(head, struct ssh_packet, queue_node); ++ ++ if (READ_ONCE(p->priority) < priority) ++ break; ++ } ++ } else { ++ list_for_each_prev(head, &p->ptl->queue.head) { ++ p = list_entry(head, struct ssh_packet, queue_node); ++ ++ if (READ_ONCE(p->priority) >= priority) { ++ head = head->next; ++ break; ++ } ++ } ++ } ++ ++ ++ return head; ++} ++ ++static int ssh_ptl_queue_push(struct ssh_packet *packet) ++{ ++ struct ssh_ptl *ptl = packet->ptl; ++ struct list_head *head; ++ ++ spin_lock(&ptl->queue.lock); ++ ++ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state)) { ++ spin_unlock(&ptl->queue.lock); ++ return -ESHUTDOWN; ++ } ++ ++ // avoid further transitions when cancelling/completing ++ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)) { ++ spin_unlock(&ptl->queue.lock); ++ return -EINVAL; ++ } ++ ++ // if this packet has already been queued, do not add it ++ if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) { ++ spin_unlock(&ptl->queue.lock); ++ return -EALREADY; ++ } ++ ++ head = __ssh_ptl_queue_find_entrypoint(packet); ++ ++ ssh_packet_get(packet); ++ list_add_tail(&packet->queue_node, &ptl->queue.head); ++ ++ spin_unlock(&ptl->queue.lock); ++ return 0; ++} ++ ++static void ssh_ptl_queue_remove(struct ssh_packet *packet) ++{ ++ struct ssh_ptl *ptl = packet->ptl; ++ bool remove; ++ ++ spin_lock(&ptl->queue.lock); ++ ++ remove = test_and_clear_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state); ++ if (remove) ++ list_del(&packet->queue_node); ++ ++ spin_unlock(&ptl->queue.lock); ++ ++ if (remove) ++ ssh_packet_put(packet); ++} ++ ++ ++static void ssh_ptl_pending_push(struct ssh_packet *packet) ++{ ++ struct ssh_ptl *ptl = packet->ptl; ++ ++ spin_lock(&ptl->pending.lock); ++ ++ // if we are cancelling/completing this packet, do not add it ++ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)) { ++ spin_unlock(&ptl->pending.lock); ++ return; ++ } ++ ++ // in case it is already pending (e.g. re-submission), do not add it ++ if (test_and_set_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) { ++ spin_unlock(&ptl->pending.lock); ++ return; ++ } ++ ++ atomic_inc(&ptl->pending.count); ++ ssh_packet_get(packet); ++ list_add_tail(&packet->pending_node, &ptl->pending.head); ++ ++ spin_unlock(&ptl->pending.lock); ++} ++ ++static void ssh_ptl_pending_remove(struct ssh_packet *packet) ++{ ++ struct ssh_ptl *ptl = packet->ptl; ++ bool remove; ++ ++ spin_lock(&ptl->pending.lock); ++ ++ remove = test_and_clear_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state); ++ if (remove) { ++ list_del(&packet->pending_node); ++ atomic_dec(&ptl->pending.count); ++ } ++ ++ spin_unlock(&ptl->pending.lock); ++ ++ if (remove) ++ ssh_packet_put(packet); ++} ++ ++ ++static void __ssh_ptl_complete(struct ssh_packet *p, int status) ++{ ++ struct ssh_ptl *ptl = READ_ONCE(p->ptl); ++ ++ trace_ssam_packet_complete(p, status); ++ ++ ptl_dbg_cond(ptl, "ptl: completing packet %p\n", p); ++ if (status && status != -ECANCELED) ++ ptl_dbg_cond(ptl, "ptl: packet error: %d\n", status); ++ ++ if (p->ops->complete) ++ p->ops->complete(p, status); ++} ++ ++static void ssh_ptl_remove_and_complete(struct ssh_packet *p, int status) ++{ ++ /* ++ * A call to this function should in general be preceeded by ++ * set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->flags) to avoid re-adding the ++ * packet to the structures it's going to be removed from. ++ * ++ * The set_bit call does not need explicit memory barriers as the ++ * implicit barrier of the test_and_set_bit call below ensure that the ++ * flag is visible before we actually attempt to remove the packet. ++ */ ++ ++ if (test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) ++ return; ++ ++ ssh_ptl_queue_remove(p); ++ ssh_ptl_pending_remove(p); ++ ++ __ssh_ptl_complete(p, status); ++} ++ ++ ++static bool ssh_ptl_tx_can_process(struct ssh_packet *packet) ++{ ++ struct ssh_ptl *ptl = packet->ptl; ++ ++ if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &packet->state)) ++ return !atomic_read(&ptl->pending.count); ++ ++ // we can alwas process non-blocking packets ++ if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &packet->state)) ++ return true; ++ ++ // if we are already waiting for this packet, send it again ++ if (test_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) ++ return true; ++ ++ // otherwise: check if we have the capacity to send ++ return atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING; ++} ++ ++static struct ssh_packet *ssh_ptl_tx_pop(struct ssh_ptl *ptl) ++{ ++ struct ssh_packet *packet = ERR_PTR(-ENOENT); ++ struct ssh_packet *p, *n; ++ ++ spin_lock(&ptl->queue.lock); ++ list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) { ++ /* ++ * If we are cancelling or completing this packet, ignore it. ++ * It's going to be removed from this queue shortly. ++ */ ++ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) ++ continue; ++ ++ /* ++ * Packets should be ordered non-blocking/to-be-resent first. ++ * If we cannot process this packet, assume that we can't ++ * process any following packet either and abort. ++ */ ++ if (!ssh_ptl_tx_can_process(p)) { ++ packet = ERR_PTR(-EBUSY); ++ break; ++ } ++ ++ /* ++ * We are allowed to change the state now. Remove it from the ++ * queue and mark it as being transmitted. Note that we cannot ++ * add it to the set of pending packets yet, as queue locks must ++ * always be acquired before packet locks (otherwise we might ++ * run into a deadlock). ++ */ ++ ++ list_del(&p->queue_node); ++ ++ /* ++ * Ensure that the "queued" bit gets cleared after setting the ++ * "transmitting" bit to guaranteee non-zero flags. ++ */ ++ set_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state); ++ ++ packet = p; ++ break; ++ } ++ spin_unlock(&ptl->queue.lock); ++ ++ return packet; ++} ++ ++static struct ssh_packet *ssh_ptl_tx_next(struct ssh_ptl *ptl) ++{ ++ struct ssh_packet *p; ++ ++ p = ssh_ptl_tx_pop(ptl); ++ if (IS_ERR(p)) ++ return p; ++ ++ if (test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state)) { ++ ptl_dbg(ptl, "ptl: transmitting sequenced packet %p\n", p); ++ ssh_ptl_pending_push(p); ++ ssh_ptl_timeout_start(p); ++ } else { ++ ptl_dbg(ptl, "ptl: transmitting non-sequenced packet %p\n", p); ++ } ++ ++ /* ++ * Update number of tries. This directly influences the priority in case ++ * the packet is re-submitted (e.g. via timeout/NAK). Note that this is ++ * the only place where we update the priority in-flight. As this runs ++ * only on the tx-thread, this read-modify-write procedure is safe. ++ */ ++ WRITE_ONCE(p->priority, READ_ONCE(p->priority) + 1); ++ ++ return p; ++} ++ ++static void ssh_ptl_tx_compl_success(struct ssh_packet *packet) ++{ ++ struct ssh_ptl *ptl = packet->ptl; ++ ++ ptl_dbg(ptl, "ptl: successfully transmitted packet %p\n", packet); ++ ++ /* ++ * Transition to state to "transmitted". Ensure that the flags never get ++ * zero with barrier. ++ */ ++ set_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state); ++ ++ // if the packet is unsequenced, we're done: lock and complete ++ if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &packet->state)) { ++ set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state); ++ ssh_ptl_remove_and_complete(packet, 0); ++ } ++ ++ /* ++ * Notify that a packet transmission has finished. In general we're only ++ * waiting for one packet (if any), so wake_up_all should be fine. ++ */ ++ wake_up_all(&ptl->tx.packet_wq); ++} ++ ++static void ssh_ptl_tx_compl_error(struct ssh_packet *packet, int status) ++{ ++ /* ++ * Transmission failure: Lock the packet and try to complete it. Ensure ++ * that the flags never get zero with barrier. ++ */ ++ set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state); ++ ++ ptl_err(packet->ptl, "ptl: transmission error: %d\n", status); ++ ptl_dbg(packet->ptl, "ptl: failed to transmit packet: %p\n", packet); ++ ++ ssh_ptl_remove_and_complete(packet, status); ++ ++ /* ++ * Notify that a packet transmission has finished. In general we're only ++ * waiting for one packet (if any), so wake_up_all should be fine. ++ */ ++ wake_up_all(&packet->ptl->tx.packet_wq); ++} ++ ++static void ssh_ptl_tx_threadfn_wait(struct ssh_ptl *ptl) ++{ ++ wait_event_interruptible(ptl->tx.thread_wq, ++ READ_ONCE(ptl->tx.thread_signal) || kthread_should_stop()); ++ WRITE_ONCE(ptl->tx.thread_signal, false); ++} ++ ++static int ssh_ptl_tx_threadfn(void *data) ++{ ++ struct ssh_ptl *ptl = data; ++ ++ while (!kthread_should_stop()) { ++ unsigned char *buf; ++ bool drop = false; ++ size_t len = 0; ++ int status = 0; ++ ++ // if we don't have a packet, get the next and add it to pending ++ if (IS_ERR_OR_NULL(ptl->tx.packet)) { ++ ptl->tx.packet = ssh_ptl_tx_next(ptl); ++ ptl->tx.offset = 0; ++ ++ // if no packet is available, we are done ++ if (IS_ERR(ptl->tx.packet)) { ++ ssh_ptl_tx_threadfn_wait(ptl); ++ continue; ++ } ++ } ++ ++ // error injection: drop packet to simulate transmission problem ++ if (ptl->tx.offset == 0) ++ drop = ssh_ptl_should_drop_packet(ptl->tx.packet); ++ ++ // error injection: simulate invalid packet data ++ if (ptl->tx.offset == 0 && !drop) ++ ssh_ptl_tx_inject_invalid_data(ptl->tx.packet); ++ ++ // flush-packets don't have any data ++ if (likely(ptl->tx.packet->data.ptr && !drop)) { ++ buf = ptl->tx.packet->data.ptr + ptl->tx.offset; ++ len = ptl->tx.packet->data.len - ptl->tx.offset; ++ ++ ptl_dbg(ptl, "tx: sending data (length: %zu)\n", len); ++ print_hex_dump_debug("tx: ", DUMP_PREFIX_OFFSET, 16, 1, ++ buf, len, false); ++ ++ status = ssh_ptl_write_buf(ptl, ptl->tx.packet, buf, len); ++ } ++ ++ if (status < 0) { ++ // complete packet with error ++ ssh_ptl_tx_compl_error(ptl->tx.packet, status); ++ ssh_packet_put(ptl->tx.packet); ++ ptl->tx.packet = NULL; ++ ++ } else if (status == len) { ++ // complete packet and/or mark as transmitted ++ ssh_ptl_tx_compl_success(ptl->tx.packet); ++ ssh_packet_put(ptl->tx.packet); ++ ptl->tx.packet = NULL; ++ ++ } else { // need more buffer space ++ ptl->tx.offset += status; ++ ssh_ptl_tx_threadfn_wait(ptl); ++ } ++ } ++ ++ // cancel active packet before we actually stop ++ if (!IS_ERR_OR_NULL(ptl->tx.packet)) { ++ ssh_ptl_tx_compl_error(ptl->tx.packet, -ESHUTDOWN); ++ ssh_packet_put(ptl->tx.packet); ++ ptl->tx.packet = NULL; ++ } ++ ++ return 0; ++} ++ ++static inline void ssh_ptl_tx_wakeup(struct ssh_ptl *ptl, bool force) ++{ ++ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state)) ++ return; ++ ++ if (force || atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING) { ++ WRITE_ONCE(ptl->tx.thread_signal, true); ++ smp_mb__after_atomic(); ++ wake_up(&ptl->tx.thread_wq); ++ } ++} ++ ++static int ssh_ptl_tx_start(struct ssh_ptl *ptl) ++{ ++ ptl->tx.thread = kthread_run(ssh_ptl_tx_threadfn, ptl, "surface-sh-tx"); ++ if (IS_ERR(ptl->tx.thread)) ++ return PTR_ERR(ptl->tx.thread); ++ ++ return 0; ++} ++ ++static int ssh_ptl_tx_stop(struct ssh_ptl *ptl) ++{ ++ int status = 0; ++ ++ if (ptl->tx.thread) { ++ status = kthread_stop(ptl->tx.thread); ++ ptl->tx.thread = NULL; ++ } ++ ++ return status; ++} ++ ++ ++static struct ssh_packet *ssh_ptl_ack_pop(struct ssh_ptl *ptl, u8 seq_id) ++{ ++ struct ssh_packet *packet = ERR_PTR(-ENOENT); ++ struct ssh_packet *p, *n; ++ ++ spin_lock(&ptl->pending.lock); ++ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) { ++ /* ++ * We generally expect packets to be in order, so first packet ++ * to be added to pending is first to be sent, is first to be ++ * ACKed. ++ */ ++ if (unlikely(ssh_packet_get_seq(p) != seq_id)) ++ continue; ++ ++ /* ++ * In case we receive an ACK while handling a transmission error ++ * completion. The packet will be removed shortly. ++ */ ++ if (unlikely(test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) { ++ packet = ERR_PTR(-EPERM); ++ break; ++ } ++ ++ /* ++ * Mark packet as ACKed and remove it from pending. Ensure that ++ * the flags never get zero with barrier. ++ */ ++ set_bit(SSH_PACKET_SF_ACKED_BIT, &p->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state); ++ ++ atomic_dec(&ptl->pending.count); ++ list_del(&p->pending_node); ++ packet = p; ++ ++ break; ++ } ++ spin_unlock(&ptl->pending.lock); ++ ++ return packet; ++} ++ ++static void ssh_ptl_wait_until_transmitted(struct ssh_packet *packet) ++{ ++ wait_event(packet->ptl->tx.packet_wq, ++ test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state) ++ || test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)); ++} ++ ++static void ssh_ptl_acknowledge(struct ssh_ptl *ptl, u8 seq) ++{ ++ struct ssh_packet *p; ++ int status = 0; ++ ++ p = ssh_ptl_ack_pop(ptl, seq); ++ if (IS_ERR(p)) { ++ if (PTR_ERR(p) == -ENOENT) { ++ /* ++ * The packet has not been found in the set of pending ++ * packets. ++ */ ++ ptl_warn(ptl, "ptl: received ACK for non-pending" ++ " packet\n"); ++ } else { ++ /* ++ * The packet is pending, but we are not allowed to take ++ * it because it has been locked. ++ */ ++ } ++ return; ++ } ++ ++ ptl_dbg(ptl, "ptl: received ACK for packet %p\n", p); ++ ++ /* ++ * It is possible that the packet has been transmitted, but the state ++ * has not been updated from "transmitting" to "transmitted" yet. ++ * In that case, we need to wait for this transition to occur in order ++ * to determine between success or failure. ++ */ ++ if (test_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state)) ++ ssh_ptl_wait_until_transmitted(p); ++ ++ /* ++ * The packet will already be locked in case of a transmission error or ++ * cancellation. Let the transmitter or cancellation issuer complete the ++ * packet. ++ */ ++ if (unlikely(test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) { ++ ssh_packet_put(p); ++ return; ++ } ++ ++ if (unlikely(!test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &p->state))) { ++ ptl_err(ptl, "ptl: received ACK before packet had been fully" ++ " transmitted\n"); ++ status = -EREMOTEIO; ++ } ++ ++ ssh_ptl_remove_and_complete(p, status); ++ ssh_packet_put(p); ++ ++ ssh_ptl_tx_wakeup(ptl, false); ++} ++ ++ ++static int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p) ++{ ++ struct ssh_ptl *ptl_old; ++ int status; ++ ++ trace_ssam_packet_submit(p); ++ ++ // validate packet fields ++ if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &p->state)) { ++ if (p->data.ptr || test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state)) ++ return -EINVAL; ++ } else if (!p->data.ptr) { ++ return -EINVAL; ++ } ++ ++ /* ++ * The ptl reference only gets set on or before the first submission. ++ * After the first submission, it has to be read-only. ++ */ ++ ptl_old = READ_ONCE(p->ptl); ++ if (ptl_old == NULL) ++ WRITE_ONCE(p->ptl, ptl); ++ else if (ptl_old != ptl) ++ return -EALREADY; ++ ++ status = ssh_ptl_queue_push(p); ++ if (status) ++ return status; ++ ++ ssh_ptl_tx_wakeup(ptl, !test_bit(SSH_PACKET_TY_BLOCKING_BIT, &p->state)); ++ return 0; ++} ++ ++static void __ssh_ptl_resubmit(struct ssh_packet *packet) ++{ ++ struct list_head *head; ++ ++ trace_ssam_packet_resubmit(packet); ++ ++ spin_lock(&packet->ptl->queue.lock); ++ ++ // if this packet has already been queued, do not add it ++ if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) { ++ spin_unlock(&packet->ptl->queue.lock); ++ return; ++ } ++ ++ // find first node with lower priority ++ head = __ssh_ptl_queue_find_entrypoint(packet); ++ ++ WRITE_ONCE(packet->timestamp, KTIME_MAX); ++ smp_mb__after_atomic(); ++ ++ // add packet ++ ssh_packet_get(packet); ++ list_add_tail(&packet->queue_node, head); ++ ++ spin_unlock(&packet->ptl->queue.lock); ++} ++ ++static void ssh_ptl_resubmit_pending(struct ssh_ptl *ptl) ++{ ++ struct ssh_packet *p; ++ bool resub = false; ++ u8 try; ++ ++ /* ++ * Note: We deliberately do not remove/attempt to cancel and complete ++ * packets that are out of tires in this function. The packet will be ++ * eventually canceled and completed by the timeout. Removing the packet ++ * here could lead to overly eager cancelation if the packet has not ++ * been re-transmitted yet but the tries-counter already updated (i.e ++ * ssh_ptl_tx_next removed the packet from the queue and updated the ++ * counter, but re-transmission for the last try has not actually ++ * started yet). ++ */ ++ ++ spin_lock(&ptl->pending.lock); ++ ++ // re-queue all pending packets ++ list_for_each_entry(p, &ptl->pending.head, pending_node) { ++ // avoid further transitions if locked ++ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) ++ continue; ++ ++ // do not re-schedule if packet is out of tries ++ try = ssh_packet_priority_get_try(READ_ONCE(p->priority)); ++ if (try >= SSH_PTL_MAX_PACKET_TRIES) ++ continue; ++ ++ resub = true; ++ __ssh_ptl_resubmit(p); ++ } ++ ++ spin_unlock(&ptl->pending.lock); ++ ++ ssh_ptl_tx_wakeup(ptl, resub); ++} ++ ++static void ssh_ptl_cancel(struct ssh_packet *p) ++{ ++ if (test_and_set_bit(SSH_PACKET_SF_CANCELED_BIT, &p->state)) ++ return; ++ ++ trace_ssam_packet_cancel(p); ++ ++ /* ++ * Lock packet and commit with memory barrier. If this packet has ++ * already been locked, it's going to be removed and completed by ++ * another party, which should have precedence. ++ */ ++ if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) ++ return; ++ ++ /* ++ * By marking the packet as locked and employing the implicit memory ++ * barrier of test_and_set_bit, we have guaranteed that, at this point, ++ * the packet cannot be added to the queue any more. ++ * ++ * In case the packet has never been submitted, packet->ptl is NULL. If ++ * the packet is currently being submitted, packet->ptl may be NULL or ++ * non-NULL. Due marking the packet as locked above and committing with ++ * the memory barrier, we have guaranteed that, if packet->ptl is NULL, ++ * the packet will never be added to the queue. If packet->ptl is ++ * non-NULL, we don't have any guarantees. ++ */ ++ ++ if (READ_ONCE(p->ptl)) { ++ ssh_ptl_remove_and_complete(p, -ECANCELED); ++ ssh_ptl_tx_wakeup(p->ptl, false); ++ } else if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) { ++ __ssh_ptl_complete(p, -ECANCELED); ++ } ++} ++ ++ ++static ktime_t ssh_packet_get_expiration(struct ssh_packet *p, ktime_t timeout) ++{ ++ ktime_t timestamp = READ_ONCE(p->timestamp); ++ ++ if (timestamp != KTIME_MAX) ++ return ktime_add(timestamp, timeout); ++ else ++ return KTIME_MAX; ++} ++ ++static void ssh_ptl_timeout_reap(struct work_struct *work) ++{ ++ struct ssh_ptl *ptl = to_ssh_ptl(work, rtx_timeout.reaper.work); ++ struct ssh_packet *p, *n; ++ LIST_HEAD(claimed); ++ ktime_t now = ktime_get_coarse_boottime(); ++ ktime_t timeout = ptl->rtx_timeout.timeout; ++ ktime_t next = KTIME_MAX; ++ bool resub = false; ++ ++ trace_ssam_ptl_timeout_reap("pending", atomic_read(&ptl->pending.count)); ++ ++ /* ++ * Mark reaper as "not pending". This is done before checking any ++ * packets to avoid lost-update type problems. ++ */ ++ WRITE_ONCE(ptl->rtx_timeout.expires, KTIME_MAX); ++ smp_mb__after_atomic(); ++ ++ spin_lock(&ptl->pending.lock); ++ ++ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) { ++ ktime_t expires = ssh_packet_get_expiration(p, timeout); ++ u8 try; ++ ++ /* ++ * Check if the timeout hasn't expired yet. Find out next ++ * expiration date to be handled after this run. ++ */ ++ if (ktime_after(expires, now)) { ++ next = ktime_before(expires, next) ? expires : next; ++ continue; ++ } ++ ++ // avoid further transitions if locked ++ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) ++ continue; ++ ++ trace_ssam_packet_timeout(p); ++ ++ // check if we still have some tries left ++ try = ssh_packet_priority_get_try(READ_ONCE(p->priority)); ++ if (likely(try < SSH_PTL_MAX_PACKET_TRIES)) { ++ resub = true; ++ __ssh_ptl_resubmit(p); ++ continue; ++ } ++ ++ // no more tries left: cancel the packet ++ ++ // if someone else has locked the packet already, don't use it ++ if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) ++ continue; ++ ++ /* ++ * We have now marked the packet as locked. Thus it cannot be ++ * added to the pending list again after we've removed it here. ++ * We can therefore re-use the pending_node of this packet ++ * temporarily. ++ */ ++ ++ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state); ++ ++ atomic_dec(&ptl->pending.count); ++ list_del(&p->pending_node); ++ ++ list_add_tail(&p->pending_node, &claimed); ++ } ++ ++ spin_unlock(&ptl->pending.lock); ++ ++ // cancel and complete the packet ++ list_for_each_entry_safe(p, n, &claimed, pending_node) { ++ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) { ++ ssh_ptl_queue_remove(p); ++ __ssh_ptl_complete(p, -ETIMEDOUT); ++ } ++ ++ // drop the reference we've obtained by removing it from pending ++ list_del(&p->pending_node); ++ ssh_packet_put(p); ++ } ++ ++ // ensure that reaper doesn't run again immediately ++ next = max(next, ktime_add(now, SSH_PTL_PACKET_TIMEOUT_RESOLUTION)); ++ if (next != KTIME_MAX) ++ ssh_ptl_timeout_reaper_mod(ptl, now, next); ++ ++ // force-wakeup to properly handle re-transmits if we've re-submitted ++ ssh_ptl_tx_wakeup(ptl, resub); ++} ++ ++ ++static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, u8 seq) ++{ ++ int i; ++ ++ // check if SEQ has been seen recently (i.e. packet was re-transmitted) ++ for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) { ++ if (likely(ptl->rx.blocked.seqs[i] != seq)) ++ continue; ++ ++ ptl_dbg(ptl, "ptl: ignoring repeated data packet\n"); ++ return true; ++ } ++ ++ // update list of blocked seuence IDs ++ ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = seq; ++ ptl->rx.blocked.offset = (ptl->rx.blocked.offset + 1) ++ % ARRAY_SIZE(ptl->rx.blocked.seqs); ++ ++ return false; ++} ++ ++static void ssh_ptl_rx_dataframe(struct ssh_ptl *ptl, ++ const struct ssh_frame *frame, ++ const struct ssam_span *payload) ++{ ++ if (ssh_ptl_rx_retransmit_check(ptl, frame->seq)) ++ return; ++ ++ ptl->ops.data_received(ptl, payload); ++} ++ ++static void ssh_ptl_send_ack(struct ssh_ptl *ptl, u8 seq) ++{ ++ struct ssh_packet_args args; ++ struct ssh_packet *packet; ++ struct ssam_span buf; ++ struct msgbuf msgb; ++ int status; ++ ++ status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL); ++ if (status) { ++ ptl_err(ptl, "ptl: failed to allocate ACK packet\n"); ++ return; ++ } ++ ++ args.type = 0; ++ args.priority = SSH_PACKET_PRIORITY(ACK, 0); ++ args.ops = &ssh_ptl_ctrl_packet_ops; ++ ssh_packet_init(packet, &args); ++ ++ msgb_init(&msgb, buf.ptr, buf.len); ++ msgb_push_ack(&msgb, seq); ++ ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb)); ++ ++ ssh_ptl_submit(ptl, packet); ++ ssh_packet_put(packet); ++} ++ ++static void ssh_ptl_send_nak(struct ssh_ptl *ptl) ++{ ++ struct ssh_packet_args args; ++ struct ssh_packet *packet; ++ struct ssam_span buf; ++ struct msgbuf msgb; ++ int status; ++ ++ status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL); ++ if (status) { ++ ptl_err(ptl, "ptl: failed to allocate NAK packet\n"); ++ return; ++ } ++ ++ args.type = 0; ++ args.priority = SSH_PACKET_PRIORITY(NAK, 0); ++ args.ops = &ssh_ptl_ctrl_packet_ops; ++ ssh_packet_init(packet, &args); ++ ++ msgb_init(&msgb, buf.ptr, buf.len); ++ msgb_push_nak(&msgb); ++ ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb)); ++ ++ ssh_ptl_submit(ptl, packet); ++ ssh_packet_put(packet); ++} ++ ++static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source) ++{ ++ struct ssh_frame *frame; ++ struct ssam_span payload; ++ struct ssam_span aligned; ++ bool syn_found; ++ int status; ++ ++ // error injection: modify data to simulate corrupt SYN bytes ++ ssh_ptl_rx_inject_invalid_syn(ptl, source); ++ ++ // find SYN ++ syn_found = sshp_find_syn(source, &aligned); ++ ++ if (unlikely(aligned.ptr - source->ptr) > 0) { ++ ptl_warn(ptl, "rx: parser: invalid start of frame, skipping\n"); ++ ++ /* ++ * Notes: ++ * - This might send multiple NAKs in case the communication ++ * starts with an invalid SYN and is broken down into multiple ++ * pieces. This should generally be handled fine, we just ++ * might receive duplicate data in this case, which is ++ * detected when handling data frames. ++ * - This path will also be executed on invalid CRCs: When an ++ * invalid CRC is encountered, the code below will skip data ++ * until direclty after the SYN. This causes the search for ++ * the next SYN, which is generally not placed directly after ++ * the last one. ++ */ ++ ssh_ptl_send_nak(ptl); ++ } ++ ++ if (unlikely(!syn_found)) ++ return aligned.ptr - source->ptr; ++ ++ // error injection: modify data to simulate corruption ++ ssh_ptl_rx_inject_invalid_data(ptl, &aligned); ++ ++ // parse and validate frame ++ status = sshp_parse_frame(&ptl->serdev->dev, &aligned, &frame, &payload, ++ SSH_PTL_RX_BUF_LEN); ++ if (status) // invalid frame: skip to next syn ++ return aligned.ptr - source->ptr + sizeof(u16); ++ if (!frame) // not enough data ++ return aligned.ptr - source->ptr; ++ ++ trace_ssam_rx_frame_received(frame); ++ ++ switch (frame->type) { ++ case SSH_FRAME_TYPE_ACK: ++ ssh_ptl_acknowledge(ptl, frame->seq); ++ break; ++ ++ case SSH_FRAME_TYPE_NAK: ++ ssh_ptl_resubmit_pending(ptl); ++ break; ++ ++ case SSH_FRAME_TYPE_DATA_SEQ: ++ ssh_ptl_send_ack(ptl, frame->seq); ++ /* fallthrough */ ++ ++ case SSH_FRAME_TYPE_DATA_NSQ: ++ ssh_ptl_rx_dataframe(ptl, frame, &payload); ++ break; ++ ++ default: ++ ptl_warn(ptl, "ptl: received frame with unknown type 0x%02x\n", ++ frame->type); ++ break; ++ } ++ ++ return aligned.ptr - source->ptr + SSH_MESSAGE_LENGTH(frame->len); ++} ++ ++static int ssh_ptl_rx_threadfn(void *data) ++{ ++ struct ssh_ptl *ptl = data; ++ ++ while (true) { ++ struct ssam_span span; ++ size_t offs = 0; ++ size_t n; ++ ++ wait_event_interruptible(ptl->rx.wq, ++ !kfifo_is_empty(&ptl->rx.fifo) ++ || kthread_should_stop()); ++ if (kthread_should_stop()) ++ break; ++ ++ // copy from fifo to evaluation buffer ++ n = sshp_buf_read_from_fifo(&ptl->rx.buf, &ptl->rx.fifo); ++ ++ ptl_dbg(ptl, "rx: received data (size: %zu)\n", n); ++ print_hex_dump_debug("rx: ", DUMP_PREFIX_OFFSET, 16, 1, ++ ptl->rx.buf.ptr + ptl->rx.buf.len - n, ++ n, false); ++ ++ // parse until we need more bytes or buffer is empty ++ while (offs < ptl->rx.buf.len) { ++ sshp_buf_span_from(&ptl->rx.buf, offs, &span); ++ n = ssh_ptl_rx_eval(ptl, &span); ++ if (n == 0) ++ break; // need more bytes ++ ++ offs += n; ++ } ++ ++ // throw away the evaluated parts ++ sshp_buf_drop(&ptl->rx.buf, offs); ++ } ++ ++ return 0; ++} ++ ++static inline void ssh_ptl_rx_wakeup(struct ssh_ptl *ptl) ++{ ++ wake_up(&ptl->rx.wq); ++} ++ ++static int ssh_ptl_rx_start(struct ssh_ptl *ptl) ++{ ++ if (ptl->rx.thread) ++ return 0; ++ ++ ptl->rx.thread = kthread_run(ssh_ptl_rx_threadfn, ptl, "surface-sh-rx"); ++ if (IS_ERR(ptl->rx.thread)) ++ return PTR_ERR(ptl->rx.thread); ++ ++ return 0; ++} ++ ++static int ssh_ptl_rx_stop(struct ssh_ptl *ptl) ++{ ++ int status = 0; ++ ++ if (ptl->rx.thread) { ++ status = kthread_stop(ptl->rx.thread); ++ ptl->rx.thread = NULL; ++ } ++ ++ return status; ++} ++ ++static int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n) ++{ ++ int used; ++ ++ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state)) ++ return -ESHUTDOWN; ++ ++ used = kfifo_in(&ptl->rx.fifo, buf, n); ++ if (used) ++ ssh_ptl_rx_wakeup(ptl); ++ ++ return used; ++} ++ ++ ++struct ssh_flush_packet { ++ struct ssh_packet base; ++ struct completion completion; ++ int status; ++}; ++ ++static void ssh_ptl_flush_complete(struct ssh_packet *p, int status) ++{ ++ struct ssh_flush_packet *packet; ++ ++ packet = container_of(p, struct ssh_flush_packet, base); ++ packet->status = status; ++} ++ ++static void ssh_ptl_flush_release(struct ssh_packet *p) ++{ ++ struct ssh_flush_packet *packet; ++ ++ packet = container_of(p, struct ssh_flush_packet, base); ++ complete_all(&packet->completion); ++} ++ ++static const struct ssh_packet_ops ssh_flush_packet_ops = { ++ .complete = ssh_ptl_flush_complete, ++ .release = ssh_ptl_flush_release, ++}; ++ ++/** ++ * ssh_ptl_shutdown - shut down the packet transmission layer ++ * @ptl: packet transmission layer ++ * ++ * Shuts down the packet transmission layer, removing and canceling all queued ++ * and pending packets. Packets canceled by this operation will be completed ++ * with -ESHUTDOWN as status. ++ * ++ * As a result of this function, the transmission layer will be marked as shut ++ * down. Submission of packets after the transmission layer has been shut down ++ * will fail with -ESHUTDOWN. ++ */ ++static void ssh_ptl_shutdown(struct ssh_ptl *ptl) ++{ ++ LIST_HEAD(complete_q); ++ LIST_HEAD(complete_p); ++ struct ssh_packet *p, *n; ++ int status; ++ ++ // ensure that no new packets (including ACK/NAK) can be submitted ++ set_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state); ++ smp_mb__after_atomic(); ++ ++ status = ssh_ptl_rx_stop(ptl); ++ if (status) ++ ptl_err(ptl, "ptl: failed to stop receiver thread\n"); ++ ++ status = ssh_ptl_tx_stop(ptl); ++ if (status) ++ ptl_err(ptl, "ptl: failed to stop transmitter thread\n"); ++ ++ cancel_delayed_work_sync(&ptl->rtx_timeout.reaper); ++ ++ /* ++ * At this point, all threads have been stopped. This means that the ++ * only references to packets from inside the system are in the queue ++ * and pending set. ++ * ++ * Note: We still need locks here because someone could still be ++ * cancelling packets. ++ * ++ * Note 2: We can re-use queue_node (or pending_node) if we mark the ++ * packet as locked an then remove it from the queue (or pending set ++ * respecitvely). Marking the packet as locked avoids re-queueing ++ * (which should already be prevented by having stopped the treads...) ++ * and not setting QUEUED_BIT (or PENDING_BIT) prevents removal from a ++ * new list via other threads (e.g. canellation). ++ * ++ * Note 3: There may be overlap between complete_p and complete_q. ++ * This is handled via test_and_set_bit on the "completed" flag ++ * (also handles cancelation). ++ */ ++ ++ // mark queued packets as locked and move them to complete_q ++ spin_lock(&ptl->queue.lock); ++ list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) { ++ set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state); ++ ++ list_del(&p->queue_node); ++ list_add_tail(&p->queue_node, &complete_q); ++ } ++ spin_unlock(&ptl->queue.lock); ++ ++ // mark pending packets as locked and move them to complete_p ++ spin_lock(&ptl->pending.lock); ++ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) { ++ set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state); ++ ++ list_del(&p->pending_node); ++ list_add_tail(&p->pending_node, &complete_q); ++ } ++ atomic_set(&ptl->pending.count, 0); ++ spin_unlock(&ptl->pending.lock); ++ ++ // complete and drop packets on complete_q ++ list_for_each_entry(p, &complete_q, queue_node) { ++ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) ++ __ssh_ptl_complete(p, -ESHUTDOWN); ++ ++ ssh_packet_put(p); ++ } ++ ++ // complete and drop packets on complete_p ++ list_for_each_entry(p, &complete_p, pending_node) { ++ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) ++ __ssh_ptl_complete(p, -ESHUTDOWN); ++ ++ ssh_packet_put(p); ++ } ++ ++ /* ++ * At this point we have guaranteed that the system doesn't reference ++ * any packets any more. ++ */ ++} ++ ++static inline struct device *ssh_ptl_get_device(struct ssh_ptl *ptl) ++{ ++ return ptl->serdev ? &ptl->serdev->dev : NULL; ++} ++ ++static int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev, ++ struct ssh_ptl_ops *ops) ++{ ++ int i, status; ++ ++ ptl->serdev = serdev; ++ ptl->state = 0; ++ ++ spin_lock_init(&ptl->queue.lock); ++ INIT_LIST_HEAD(&ptl->queue.head); ++ ++ spin_lock_init(&ptl->pending.lock); ++ INIT_LIST_HEAD(&ptl->pending.head); ++ atomic_set_release(&ptl->pending.count, 0); ++ ++ ptl->tx.thread = NULL; ++ ptl->tx.thread_signal = false; ++ ptl->tx.packet = NULL; ++ ptl->tx.offset = 0; ++ init_waitqueue_head(&ptl->tx.thread_wq); ++ init_waitqueue_head(&ptl->tx.packet_wq); ++ ++ ptl->rx.thread = NULL; ++ init_waitqueue_head(&ptl->rx.wq); ++ ++ ptl->rtx_timeout.timeout = SSH_PTL_PACKET_TIMEOUT; ++ ptl->rtx_timeout.expires = KTIME_MAX; ++ INIT_DELAYED_WORK(&ptl->rtx_timeout.reaper, ssh_ptl_timeout_reap); ++ ++ ptl->ops = *ops; ++ ++ // initialize list of recent/blocked SEQs with invalid sequence IDs ++ for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) ++ ptl->rx.blocked.seqs[i] = 0xFFFF; ++ ptl->rx.blocked.offset = 0; ++ ++ status = kfifo_alloc(&ptl->rx.fifo, SSH_PTL_RX_FIFO_LEN, GFP_KERNEL); ++ if (status) ++ return status; ++ ++ status = sshp_buf_alloc(&ptl->rx.buf, SSH_PTL_RX_BUF_LEN, GFP_KERNEL); ++ if (status) ++ kfifo_free(&ptl->rx.fifo); ++ ++ return status; ++} ++ ++static void ssh_ptl_destroy(struct ssh_ptl *ptl) ++{ ++ kfifo_free(&ptl->rx.fifo); ++ sshp_buf_free(&ptl->rx.buf); ++} ++ ++ ++/* -- Request transport layer (rtl). ---------------------------------------- */ ++ ++#define SSH_RTL_REQUEST_TIMEOUT ms_to_ktime(3000) ++#define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50)) ++ ++#define SSH_RTL_MAX_PENDING 3 ++ ++ ++enum ssh_rtl_state_flags { ++ SSH_RTL_SF_SHUTDOWN_BIT, ++}; ++ ++struct ssh_rtl_ops { ++ void (*handle_event)(struct ssh_rtl *rtl, const struct ssh_command *cmd, ++ const struct ssam_span *data); ++}; ++ ++struct ssh_rtl { ++ struct ssh_ptl ptl; ++ unsigned long state; ++ ++ struct { ++ spinlock_t lock; ++ struct list_head head; ++ } queue; ++ ++ struct { ++ spinlock_t lock; ++ struct list_head head; ++ atomic_t count; ++ } pending; ++ ++ struct { ++ struct work_struct work; ++ } tx; ++ ++ struct { ++ ktime_t timeout; ++ ktime_t expires; ++ struct delayed_work reaper; ++ } rtx_timeout; ++ ++ struct ssh_rtl_ops ops; ++}; ++ ++ ++#define rtl_dbg(r, fmt, ...) ptl_dbg(&(r)->ptl, fmt, ##__VA_ARGS__) ++#define rtl_info(p, fmt, ...) ptl_info(&(p)->ptl, fmt, ##__VA_ARGS__) ++#define rtl_warn(r, fmt, ...) ptl_warn(&(r)->ptl, fmt, ##__VA_ARGS__) ++#define rtl_err(r, fmt, ...) ptl_err(&(r)->ptl, fmt, ##__VA_ARGS__) ++#define rtl_dbg_cond(r, fmt, ...) __ssam_prcond(rtl_dbg, r, fmt, ##__VA_ARGS__) ++ ++#define to_ssh_rtl(ptr, member) \ ++ container_of(ptr, struct ssh_rtl, member) ++ ++#define to_ssh_request(ptr, member) \ ++ container_of(ptr, struct ssh_request, member) ++ ++static inline struct ssh_rtl *ssh_request_rtl(struct ssh_request *rqst) ++{ ++ struct ssh_ptl *ptl = READ_ONCE(rqst->packet.ptl); ++ return likely(ptl) ? to_ssh_rtl(ptl, ptl) : NULL; ++} ++ ++ ++/** ++ * ssh_rtl_should_drop_response - error injection hook to drop request responses ++ * ++ * Useful to cause request transmission timeouts in the driver by dropping the ++ * response to a request. ++ */ ++static noinline_if_inject bool ssh_rtl_should_drop_response(void) ++{ ++ return false; ++} ++ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response, TRUE); ++ ++ ++static inline u16 ssh_request_get_rqid(struct ssh_request *rqst) ++{ ++ return get_unaligned_le16(rqst->packet.data.ptr ++ + SSH_MSGOFFSET_COMMAND(rqid)); ++} ++ ++static inline u32 ssh_request_get_rqid_safe(struct ssh_request *rqst) ++{ ++ if (!rqst->packet.data.ptr) ++ return -1; ++ ++ return ssh_request_get_rqid(rqst); ++} ++ ++ ++static void ssh_rtl_queue_remove(struct ssh_request *rqst) ++{ ++ struct ssh_rtl *rtl = ssh_request_rtl(rqst); ++ bool remove; ++ ++ spin_lock(&rtl->queue.lock); ++ ++ remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state); ++ if (remove) ++ list_del(&rqst->node); ++ ++ spin_unlock(&rtl->queue.lock); ++ ++ if (remove) ++ ssh_request_put(rqst); ++} ++ ++static void ssh_rtl_pending_remove(struct ssh_request *rqst) ++{ ++ struct ssh_rtl *rtl = ssh_request_rtl(rqst); ++ bool remove; ++ ++ spin_lock(&rtl->pending.lock); ++ ++ remove = test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state); ++ if (remove) { ++ atomic_dec(&rtl->pending.count); ++ list_del(&rqst->node); ++ } ++ ++ spin_unlock(&rtl->pending.lock); ++ ++ if (remove) ++ ssh_request_put(rqst); ++} ++ ++ ++static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status) ++{ ++ struct ssh_rtl *rtl = ssh_request_rtl(rqst); ++ ++ trace_ssam_request_complete(rqst, status); ++ ++ // rtl/ptl may not be set if we're cancelling before submitting ++ rtl_dbg_cond(rtl, "rtl: completing request (rqid: 0x%04x," ++ " status: %d)\n", ssh_request_get_rqid_safe(rqst), status); ++ ++ if (status && status != -ECANCELED) ++ rtl_dbg_cond(rtl, "rtl: request error: %d\n", status); ++ ++ rqst->ops->complete(rqst, NULL, NULL, status); ++} ++ ++static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst, ++ const struct ssh_command *cmd, ++ const struct ssam_span *data) ++{ ++ struct ssh_rtl *rtl = ssh_request_rtl(rqst); ++ ++ trace_ssam_request_complete(rqst, 0); ++ ++ rtl_dbg(rtl, "rtl: completing request with response" ++ " (rqid: 0x%04x)\n", ssh_request_get_rqid(rqst)); ++ ++ rqst->ops->complete(rqst, cmd, data, 0); ++} ++ ++ ++static bool ssh_rtl_tx_can_process(struct ssh_request *rqst) ++{ ++ struct ssh_rtl *rtl = ssh_request_rtl(rqst); ++ ++ if (test_bit(SSH_REQUEST_TY_FLUSH_BIT, &rqst->state)) ++ return !atomic_read(&rtl->pending.count); ++ ++ return atomic_read(&rtl->pending.count) < SSH_RTL_MAX_PENDING; ++} ++ ++static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl) ++{ ++ struct ssh_request *rqst = ERR_PTR(-ENOENT); ++ struct ssh_request *p, *n; ++ ++ spin_lock(&rtl->queue.lock); ++ ++ // find first non-locked request and remove it ++ list_for_each_entry_safe(p, n, &rtl->queue.head, node) { ++ if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state))) ++ continue; ++ ++ if (!ssh_rtl_tx_can_process(p)) { ++ rqst = ERR_PTR(-EBUSY); ++ break; ++ } ++ ++ /* ++ * Remove from queue and mark as transmitting. Ensure that the ++ * state does not get zero via memory barrier. ++ */ ++ set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &p->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &p->state); ++ ++ list_del(&p->node); ++ ++ rqst = p; ++ break; ++ } ++ ++ spin_unlock(&rtl->queue.lock); ++ return rqst; ++} ++ ++static int ssh_rtl_tx_pending_push(struct ssh_request *rqst) ++{ ++ struct ssh_rtl *rtl = ssh_request_rtl(rqst); ++ ++ spin_lock(&rtl->pending.lock); ++ ++ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) { ++ spin_unlock(&rtl->pending.lock); ++ return -EINVAL; ++ } ++ ++ if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) { ++ spin_unlock(&rtl->pending.lock); ++ return -EALREADY; ++ } ++ ++ atomic_inc(&rtl->pending.count); ++ ssh_request_get(rqst); ++ list_add_tail(&rqst->node, &rtl->pending.head); ++ ++ spin_unlock(&rtl->pending.lock); ++ return 0; ++} ++ ++static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl) ++{ ++ struct ssh_request *rqst; ++ int status; ++ ++ // get and prepare next request for transmit ++ rqst = ssh_rtl_tx_next(rtl); ++ if (IS_ERR(rqst)) ++ return PTR_ERR(rqst); ++ ++ // add to/mark as pending ++ status = ssh_rtl_tx_pending_push(rqst); ++ if (status) { ++ ssh_request_put(rqst); ++ return -EAGAIN; ++ } ++ ++ // submit packet ++ status = ssh_ptl_submit(&rtl->ptl, &rqst->packet); ++ if (status == -ESHUTDOWN) { ++ /* ++ * Packet has been refused due to the packet layer shutting ++ * down. Complete it here. ++ */ ++ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state); ++ smp_mb__after_atomic(); ++ ++ ssh_rtl_pending_remove(rqst); ++ ssh_rtl_complete_with_status(rqst, -ESHUTDOWN); ++ ++ ssh_request_put(rqst); ++ return -ESHUTDOWN; ++ ++ } else if (status) { ++ /* ++ * If submitting the packet failed and the packet layer isn't ++ * shutting down, the packet has either been submmitted/queued ++ * before (-EALREADY, which cannot happen as we have guaranteed ++ * that requests cannot be re-submitted), or the packet was ++ * marked as locked (-EINVAL). To mark the packet locked at this ++ * stage, the request, and thus the packets itself, had to have ++ * been canceled. Simply drop the reference. Cancellation itself ++ * will remove it from the set of pending requests. ++ */ ++ ++ WARN_ON(status != -EINVAL); ++ ++ ssh_request_put(rqst); ++ return -EAGAIN; ++ } ++ ++ ssh_request_put(rqst); ++ return 0; ++} ++ ++static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl) ++{ ++ bool empty; ++ ++ spin_lock(&rtl->queue.lock); ++ empty = list_empty(&rtl->queue.head); ++ spin_unlock(&rtl->queue.lock); ++ ++ return empty; ++} ++ ++static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl) ++{ ++ if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING) ++ return false; ++ ++ if (ssh_rtl_queue_empty(rtl)) ++ return false; ++ ++ return schedule_work(&rtl->tx.work); ++} ++ ++static void ssh_rtl_tx_work_fn(struct work_struct *work) ++{ ++ struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work); ++ int i, status; ++ ++ /* ++ * Try to be nice and not block the workqueue: Run a maximum of 10 ++ * tries, then re-submit if necessary. This should not be neccesary, ++ * for normal execution, but guarantee it anyway. ++ */ ++ for (i = 0; i < 10; i++) { ++ status = ssh_rtl_tx_try_process_one(rtl); ++ if (status == -ENOENT || status == -EBUSY) ++ return; // no more requests to process ++ ++ if (status == -ESHUTDOWN) { ++ /* ++ * Packet system shutting down. No new packets can be ++ * transmitted. Return silently, the party initiating ++ * the shutdown should handle the rest. ++ */ ++ return; ++ } ++ ++ WARN_ON(status != 0 && status != -EAGAIN); ++ } ++ ++ // out of tries, reschedule ++ ssh_rtl_tx_schedule(rtl); ++} ++ ++ ++static int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst) ++{ ++ trace_ssam_request_submit(rqst); ++ ++ /* ++ * Ensure that requests expecting a response are sequenced. If this ++ * invariant ever changes, see the comment in ssh_rtl_complete on what ++ * is required to be changed in the code. ++ */ ++ if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &rqst->state)) ++ if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &rqst->packet.state)) ++ return -EINVAL; ++ ++ // try to set ptl and check if this request has already been submitted ++ if (cmpxchg(&rqst->packet.ptl, NULL, &rtl->ptl) != NULL) ++ return -EALREADY; ++ ++ spin_lock(&rtl->queue.lock); ++ ++ if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) { ++ spin_unlock(&rtl->queue.lock); ++ return -ESHUTDOWN; ++ } ++ ++ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) { ++ spin_unlock(&rtl->queue.lock); ++ return -EINVAL; ++ } ++ ++ ssh_request_get(rqst); ++ set_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state); ++ list_add_tail(&rqst->node, &rtl->queue.head); ++ ++ spin_unlock(&rtl->queue.lock); ++ ++ ssh_rtl_tx_schedule(rtl); ++ return 0; ++} ++ ++ ++static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now, ++ ktime_t expires) ++{ ++ unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now)); ++ ktime_t aexp = ktime_add(expires, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION); ++ ktime_t old; ++ ++ // re-adjust / schedule reaper if it is above resolution delta ++ old = READ_ONCE(rtl->rtx_timeout.expires); ++ while (ktime_before(aexp, old)) ++ old = cmpxchg64(&rtl->rtx_timeout.expires, old, expires); ++ ++ // if we updated the reaper expiration, modify work timeout ++ if (old == expires) ++ mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta); ++} ++ ++static void ssh_rtl_timeout_start(struct ssh_request *rqst) ++{ ++ struct ssh_rtl *rtl = ssh_request_rtl(rqst); ++ ktime_t timestamp = ktime_get_coarse_boottime(); ++ ktime_t timeout = rtl->rtx_timeout.timeout; ++ ++ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) ++ return; ++ ++ WRITE_ONCE(rqst->timestamp, timestamp); ++ smp_mb__after_atomic(); ++ ++ ssh_rtl_timeout_reaper_mod(rtl, timestamp, timestamp + timeout); ++} ++ ++ ++static void ssh_rtl_complete(struct ssh_rtl *rtl, ++ const struct ssh_command *command, ++ const struct ssam_span *command_data) ++{ ++ struct ssh_request *r = NULL; ++ struct ssh_request *p, *n; ++ u16 rqid = get_unaligned_le16(&command->rqid); ++ ++ trace_ssam_rx_response_received(command, command_data->len); ++ ++ /* ++ * Get request from pending based on request ID and mark it as response ++ * received and locked. ++ */ ++ spin_lock(&rtl->pending.lock); ++ list_for_each_entry_safe(p, n, &rtl->pending.head, node) { ++ // we generally expect requests to be processed in order ++ if (unlikely(ssh_request_get_rqid(p) != rqid)) ++ continue; ++ ++ // simulate response timeout ++ if (ssh_rtl_should_drop_response()) { ++ spin_unlock(&rtl->pending.lock); ++ ++ trace_ssam_ei_rx_drop_response(p); ++ rtl_info(rtl, "request error injection: " ++ "dropping response for request %p\n", ++ &p->packet); ++ return; ++ } ++ ++ /* ++ * Mark as "response received" and "locked" as we're going to ++ * complete it. Ensure that the state doesn't get zero by ++ * employing a memory barrier. ++ */ ++ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state); ++ set_bit(SSH_REQUEST_SF_RSPRCVD_BIT, &p->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &p->state); ++ ++ atomic_dec(&rtl->pending.count); ++ list_del(&p->node); ++ ++ r = p; ++ break; ++ } ++ spin_unlock(&rtl->pending.lock); ++ ++ if (!r) { ++ rtl_warn(rtl, "rtl: dropping unexpected command message" ++ " (rqid = 0x%04x)\n", rqid); ++ return; ++ } ++ ++ // if the request hasn't been completed yet, we will do this now ++ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) { ++ ssh_request_put(r); ++ ssh_rtl_tx_schedule(rtl); ++ return; ++ } ++ ++ /* ++ * Make sure the request has been transmitted. In case of a sequenced ++ * request, we are guaranteed that the completion callback will run on ++ * the receiver thread directly when the ACK for the packet has been ++ * received. Similarly, this function is guaranteed to run on the ++ * receiver thread. Thus we are guaranteed that if the packet has been ++ * successfully transmitted and received an ACK, the transmitted flag ++ * has been set and is visible here. ++ * ++ * We are currently not handling unsequenced packets here, as those ++ * should never expect a response as ensured in ssh_rtl_submit. If this ++ * ever changes, one would have to test for ++ * ++ * (r->state & (transmitting | transmitted)) ++ * ++ * on unsequenced packets to determine if they could have been ++ * transmitted. There are no synchronization guarantees as in the ++ * sequenced case, since, in this case, the callback function will not ++ * run on the same thread. Thus an exact determination is impossible. ++ */ ++ if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state)) { ++ rtl_err(rtl, "rtl: received response before ACK for request" ++ " (rqid = 0x%04x)\n", rqid); ++ ++ /* ++ * NB: Timeout has already been canceled, request already been ++ * removed from pending and marked as locked and completed. As ++ * we receive a "false" response, the packet might still be ++ * queued though. ++ */ ++ ssh_rtl_queue_remove(r); ++ ++ ssh_rtl_complete_with_status(r, -EREMOTEIO); ++ ssh_request_put(r); ++ ++ ssh_rtl_tx_schedule(rtl); ++ return; ++ } ++ ++ /* ++ * NB: Timeout has already been canceled, request already been ++ * removed from pending and marked as locked and completed. The request ++ * can also not be queued any more, as it has been marked as ++ * transmitting and later transmitted. Thus no need to remove it from ++ * anywhere. ++ */ ++ ++ ssh_rtl_complete_with_rsp(r, command, command_data); ++ ssh_request_put(r); ++ ++ ssh_rtl_tx_schedule(rtl); ++} ++ ++ ++static bool ssh_rtl_cancel_nonpending(struct ssh_request *r) ++{ ++ struct ssh_rtl *rtl; ++ unsigned long state, fixed; ++ bool remove; ++ ++ /* ++ * Handle unsubmitted request: Try to mark the packet as locked, ++ * expecting the state to be zero (i.e. unsubmitted). Note that, if ++ * setting the state worked, we might still be adding the packet to the ++ * queue in a currently executing submit call. In that case, however, ++ * ptl reference must have been set previously, as locked is checked ++ * after setting ptl. Thus only if we successfully lock this request and ++ * ptl is NULL, we have successfully removed the request. ++ * Otherwise we need to try and grab it from the queue. ++ * ++ * Note that if the CMPXCHG fails, we are guaranteed that ptl has ++ * been set and is non-NULL, as states can only be nonzero after this ++ * has been set. Also note that we need to fetch the static (type) flags ++ * to ensure that they don't cause the cmpxchg to fail. ++ */ ++ fixed = READ_ONCE(r->state) & SSH_REQUEST_FLAGS_TY_MASK; ++ state = cmpxchg(&r->state, fixed, SSH_REQUEST_SF_LOCKED_BIT); ++ if (!state && !READ_ONCE(r->packet.ptl)) { ++ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ return true; ++ ++ ssh_rtl_complete_with_status(r, -ECANCELED); ++ return true; ++ } ++ ++ rtl = ssh_request_rtl(r); ++ spin_lock(&rtl->queue.lock); ++ ++ /* ++ * Note: 1) Requests cannot be re-submitted. 2) If a request is queued, ++ * it cannot be "transmitting"/"pending" yet. Thus, if we successfully ++ * remove the the request here, we have removed all its occurences in ++ * the system. ++ */ ++ ++ remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state); ++ if (!remove) { ++ spin_unlock(&rtl->queue.lock); ++ return false; ++ } ++ ++ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state); ++ list_del(&r->node); ++ ++ spin_unlock(&rtl->queue.lock); ++ ++ ssh_request_put(r); // drop reference obtained from queue ++ ++ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ return true; ++ ++ ssh_rtl_complete_with_status(r, -ECANCELED); ++ return true; ++} ++ ++static bool ssh_rtl_cancel_pending(struct ssh_request *r) ++{ ++ // if the packet is already locked, it's going to be removed shortly ++ if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state)) ++ return true; ++ ++ /* ++ * Now that we have locked the packet, we have guaranteed that it can't ++ * be added to the system any more. If rtl is zero, the locked ++ * check in ssh_rtl_submit has not been run and any submission, ++ * currently in progress or called later, won't add the packet. Thus we ++ * can directly complete it. ++ */ ++ if (!ssh_request_rtl(r)) { ++ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ return true; ++ ++ ssh_rtl_complete_with_status(r, -ECANCELED); ++ return true; ++ } ++ ++ /* ++ * Try to cancel the packet. If the packet has not been completed yet, ++ * this will subsequently (and synchronously) call the completion ++ * callback of the packet, which will complete the request. ++ */ ++ ssh_ptl_cancel(&r->packet); ++ ++ /* ++ * If the packet has been completed with success, i.e. has not been ++ * canceled by the above call, the request may not have been completed ++ * yet (may be waiting for a response). Check if we need to do this ++ * here. ++ */ ++ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ return true; ++ ++ ssh_rtl_queue_remove(r); ++ ssh_rtl_pending_remove(r); ++ ssh_rtl_complete_with_status(r, -ECANCELED); ++ ++ return true; ++} ++ ++static bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending) ++{ ++ struct ssh_rtl *rtl; ++ bool canceled; ++ ++ if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state)) ++ return true; ++ ++ trace_ssam_request_cancel(rqst); ++ ++ if (pending) ++ canceled = ssh_rtl_cancel_pending(rqst); ++ else ++ canceled = ssh_rtl_cancel_nonpending(rqst); ++ ++ // note: rtl may be NULL if request has not been submitted yet ++ rtl = ssh_request_rtl(rqst); ++ if (canceled && rtl) ++ ssh_rtl_tx_schedule(rtl); ++ ++ return canceled; ++} ++ ++ ++static void ssh_rtl_packet_callback(struct ssh_packet *p, int status) ++{ ++ struct ssh_request *r = to_ssh_request(p, packet); ++ ++ if (unlikely(status)) { ++ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state); ++ ++ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ return; ++ ++ /* ++ * The packet may get cancelled even though it has not been ++ * submitted yet. The request may still be queued. Check the ++ * queue and remove it if necessary. As the timeout would have ++ * been started in this function on success, there's no need to ++ * cancel it here. ++ */ ++ ssh_rtl_queue_remove(r); ++ ssh_rtl_pending_remove(r); ++ ssh_rtl_complete_with_status(r, status); ++ ++ ssh_rtl_tx_schedule(ssh_request_rtl(r)); ++ return; ++ } ++ ++ /* ++ * Mark as transmitted, ensure that state doesn't get zero by inserting ++ * a memory barrier. ++ */ ++ set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &r->state); ++ ++ // if we expect a response, we just need to start the timeout ++ if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &r->state)) { ++ ssh_rtl_timeout_start(r); ++ return; ++ } ++ ++ /* ++ * If we don't expect a response, lock, remove, and complete the ++ * request. Note that, at this point, the request is guaranteed to have ++ * left the queue and no timeout has been started. Thus we only need to ++ * remove it from pending. If the request has already been completed (it ++ * may have been canceled) return. ++ */ ++ ++ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state); ++ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ return; ++ ++ ssh_rtl_pending_remove(r); ++ ssh_rtl_complete_with_status(r, 0); ++ ++ ssh_rtl_tx_schedule(ssh_request_rtl(r)); ++} ++ ++ ++static ktime_t ssh_request_get_expiration(struct ssh_request *r, ktime_t timeo) ++{ ++ ktime_t timestamp = READ_ONCE(r->timestamp); ++ ++ if (timestamp != KTIME_MAX) ++ return ktime_add(timestamp, timeo); ++ else ++ return KTIME_MAX; ++} ++ ++static void ssh_rtl_timeout_reap(struct work_struct *work) ++{ ++ struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work); ++ struct ssh_request *r, *n; ++ LIST_HEAD(claimed); ++ ktime_t now = ktime_get_coarse_boottime(); ++ ktime_t timeout = rtl->rtx_timeout.timeout; ++ ktime_t next = KTIME_MAX; ++ ++ trace_ssam_rtl_timeout_reap("pending", atomic_read(&rtl->pending.count)); ++ ++ /* ++ * Mark reaper as "not pending". This is done before checking any ++ * requests to avoid lost-update type problems. ++ */ ++ WRITE_ONCE(rtl->rtx_timeout.expires, KTIME_MAX); ++ smp_mb__after_atomic(); ++ ++ spin_lock(&rtl->pending.lock); ++ list_for_each_entry_safe(r, n, &rtl->pending.head, node) { ++ ktime_t expires = ssh_request_get_expiration(r, timeout); ++ ++ /* ++ * Check if the timeout hasn't expired yet. Find out next ++ * expiration date to be handled after this run. ++ */ ++ if (ktime_after(expires, now)) { ++ next = ktime_before(expires, next) ? expires : next; ++ continue; ++ } ++ ++ // avoid further transitions if locked ++ if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state)) ++ continue; ++ ++ /* ++ * We have now marked the packet as locked. Thus it cannot be ++ * added to the pending or queued lists again after we've ++ * removed it here. We can therefore re-use the node of this ++ * packet temporarily. ++ */ ++ ++ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state); ++ ++ atomic_dec(&rtl->pending.count); ++ list_del(&r->node); ++ ++ list_add_tail(&r->node, &claimed); ++ } ++ spin_unlock(&rtl->pending.lock); ++ ++ // cancel and complete the request ++ list_for_each_entry_safe(r, n, &claimed, node) { ++ trace_ssam_request_timeout(r); ++ ++ /* ++ * At this point we've removed the packet from pending. This ++ * means that we've obtained the last (only) reference of the ++ * system to it. Thus we can just complete it. ++ */ ++ if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ ssh_rtl_complete_with_status(r, -ETIMEDOUT); ++ ++ // drop the reference we've obtained by removing it from pending ++ list_del(&r->node); ++ ssh_request_put(r); ++ } ++ ++ // ensure that reaper doesn't run again immediately ++ next = max(next, ktime_add(now, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION)); ++ if (next != KTIME_MAX) ++ ssh_rtl_timeout_reaper_mod(rtl, now, next); ++ ++ ssh_rtl_tx_schedule(rtl); ++} ++ ++ ++static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd, ++ const struct ssam_span *data) ++{ ++ trace_ssam_rx_event_received(cmd, data->len); ++ ++ rtl_dbg(rtl, "rtl: handling event (rqid: 0x%04x)\n", ++ get_unaligned_le16(&cmd->rqid)); ++ ++ rtl->ops.handle_event(rtl, cmd, data); ++} ++ ++static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data) ++{ ++ struct ssh_rtl *rtl = to_ssh_rtl(p, ptl); ++ struct device *dev = &p->serdev->dev; ++ struct ssh_command *command; ++ struct ssam_span command_data; ++ ++ if (sshp_parse_command(dev, data, &command, &command_data)) ++ return; ++ ++ if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid))) ++ ssh_rtl_rx_event(rtl, command, &command_data); ++ else ++ ssh_rtl_complete(rtl, command, &command_data); ++} ++ ++static void ssh_rtl_rx_data(struct ssh_ptl *p, const struct ssam_span *data) ++{ ++ switch (data->ptr[0]) { ++ case SSH_PLD_TYPE_CMD: ++ ssh_rtl_rx_command(p, data); ++ break; ++ ++ default: ++ ptl_err(p, "rtl: rx: unknown frame payload type" ++ " (type: 0x%02x)\n", data->ptr[0]); ++ break; ++ } ++} ++ ++ ++static inline struct device *ssh_rtl_get_device(struct ssh_rtl *rtl) ++{ ++ return ssh_ptl_get_device(&rtl->ptl); ++} ++ ++static inline bool ssh_rtl_tx_flush(struct ssh_rtl *rtl) ++{ ++ return flush_work(&rtl->tx.work); ++} ++ ++static inline int ssh_rtl_tx_start(struct ssh_rtl *rtl) ++{ ++ int status; ++ bool sched; ++ ++ status = ssh_ptl_tx_start(&rtl->ptl); ++ if (status) ++ return status; ++ ++ /* ++ * If the packet layer has been shut down and restarted without shutting ++ * down the request layer, there may still be requests queued and not ++ * handled. ++ */ ++ spin_lock(&rtl->queue.lock); ++ sched = !list_empty(&rtl->queue.head); ++ spin_unlock(&rtl->queue.lock); ++ ++ if (sched) ++ ssh_rtl_tx_schedule(rtl); ++ ++ return 0; ++} ++ ++static inline int ssh_rtl_rx_start(struct ssh_rtl *rtl) ++{ ++ return ssh_ptl_rx_start(&rtl->ptl); ++} ++ ++static int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev, ++ const struct ssh_rtl_ops *ops) ++{ ++ struct ssh_ptl_ops ptl_ops; ++ int status; ++ ++ ptl_ops.data_received = ssh_rtl_rx_data; ++ ++ status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops); ++ if (status) ++ return status; ++ ++ spin_lock_init(&rtl->queue.lock); ++ INIT_LIST_HEAD(&rtl->queue.head); ++ ++ spin_lock_init(&rtl->pending.lock); ++ INIT_LIST_HEAD(&rtl->pending.head); ++ atomic_set_release(&rtl->pending.count, 0); ++ ++ INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn); ++ ++ rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT; ++ rtl->rtx_timeout.expires = KTIME_MAX; ++ INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap); ++ ++ rtl->ops = *ops; ++ ++ return 0; ++} ++ ++static void ssh_rtl_destroy(struct ssh_rtl *rtl) ++{ ++ ssh_ptl_destroy(&rtl->ptl); ++} ++ ++ ++static void ssh_rtl_packet_release(struct ssh_packet *p) ++{ ++ struct ssh_request *rqst = to_ssh_request(p, packet); ++ rqst->ops->release(rqst); ++} ++ ++static const struct ssh_packet_ops ssh_rtl_packet_ops = { ++ .complete = ssh_rtl_packet_callback, ++ .release = ssh_rtl_packet_release, ++}; ++ ++static void ssh_request_init(struct ssh_request *rqst, ++ enum ssam_request_flags flags, ++ const struct ssh_request_ops *ops) ++{ ++ struct ssh_packet_args packet_args; ++ ++ packet_args.type = BIT(SSH_PACKET_TY_BLOCKING_BIT); ++ if (!(flags & SSAM_REQUEST_UNSEQUENCED)) ++ packet_args.type |= BIT(SSH_PACKET_TY_SEQUENCED_BIT); ++ ++ packet_args.priority = SSH_PACKET_PRIORITY(DATA, 0); ++ packet_args.ops = &ssh_rtl_packet_ops; ++ ++ ssh_packet_init(&rqst->packet, &packet_args); ++ INIT_LIST_HEAD(&rqst->node); ++ ++ rqst->state = 0; ++ if (flags & SSAM_REQUEST_HAS_RESPONSE) ++ rqst->state |= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT); ++ ++ rqst->timestamp = KTIME_MAX; ++ rqst->ops = ops; ++} ++ ++ ++struct ssh_flush_request { ++ struct ssh_request base; ++ struct completion completion; ++ int status; ++}; ++ ++static void ssh_rtl_flush_request_complete(struct ssh_request *r, ++ const struct ssh_command *cmd, ++ const struct ssam_span *data, ++ int status) ++{ ++ struct ssh_flush_request *rqst; ++ ++ rqst = container_of(r, struct ssh_flush_request, base); ++ rqst->status = status; ++} ++ ++static void ssh_rtl_flush_request_release(struct ssh_request *r) ++{ ++ struct ssh_flush_request *rqst; ++ ++ rqst = container_of(r, struct ssh_flush_request, base); ++ complete_all(&rqst->completion); ++} ++ ++static const struct ssh_request_ops ssh_rtl_flush_request_ops = { ++ .complete = ssh_rtl_flush_request_complete, ++ .release = ssh_rtl_flush_request_release, ++}; ++ ++/** ++ * ssh_rtl_flush - flush the request transmission layer ++ * @rtl: request transmission layer ++ * @timeout: timeout for the flush operation in jiffies ++ * ++ * Queue a special flush request and wait for its completion. This request ++ * will be completed after all other currently queued and pending requests ++ * have been completed. Instead of a normal data packet, this request submits ++ * a special flush packet, meaning that upon completion, also the underlying ++ * packet transmission layer has been flushed. ++ * ++ * Flushing the request layer gurarantees that all previously submitted ++ * requests have been fully completed before this call returns. Additinally, ++ * flushing blocks execution of all later submitted requests until the flush ++ * has been completed. ++ * ++ * If the caller ensures that no new requests are submitted after a call to ++ * this function, the request transmission layer is guaranteed to have no ++ * remaining requests when this call returns. The same guarantee does not hold ++ * for the packet layer, on which control packets may still be queued after ++ * this call. See the documentation of ssh_ptl_flush for more details on ++ * packet layer flushing. ++ * ++ * Return: Zero on success, -ETIMEDOUT if the flush timed out and has been ++ * canceled as a result of the timeout, or -ESHUTDOWN if the packet and/or ++ * request transmission layer has been shut down before this call. May also ++ * return -EINTR if the underlying packet transmission has been interrupted. ++ */ ++static int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout) ++{ ++ const unsigned init_flags = SSAM_REQUEST_UNSEQUENCED; ++ struct ssh_flush_request rqst; ++ int status; ++ ++ ssh_request_init(&rqst.base, init_flags, &ssh_rtl_flush_request_ops); ++ rqst.base.packet.state |= BIT(SSH_PACKET_TY_FLUSH_BIT); ++ rqst.base.packet.priority = SSH_PACKET_PRIORITY(FLUSH, 0); ++ rqst.base.state |= BIT(SSH_REQUEST_TY_FLUSH_BIT); ++ ++ init_completion(&rqst.completion); ++ ++ status = ssh_rtl_submit(rtl, &rqst.base); ++ if (status) ++ return status; ++ ++ ssh_request_put(&rqst.base); ++ ++ if (wait_for_completion_timeout(&rqst.completion, timeout)) ++ return 0; ++ ++ ssh_rtl_cancel(&rqst.base, true); ++ wait_for_completion(&rqst.completion); ++ ++ WARN_ON(rqst.status != 0 && rqst.status != -ECANCELED ++ && rqst.status != -ESHUTDOWN && rqst.status != -EINTR); ++ ++ return rqst.status == -ECANCELED ? -ETIMEDOUT : status; ++} ++ ++ ++static void ssh_rtl_shutdown(struct ssh_rtl *rtl) ++{ ++ struct ssh_request *r, *n; ++ LIST_HEAD(claimed); ++ int pending; ++ ++ set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state); ++ smp_mb__after_atomic(); ++ ++ // remove requests from queue ++ spin_lock(&rtl->queue.lock); ++ list_for_each_entry_safe(r, n, &rtl->queue.head, node) { ++ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state); ++ ++ list_del(&r->node); ++ list_add_tail(&r->node, &claimed); ++ } ++ spin_unlock(&rtl->queue.lock); ++ ++ /* ++ * We have now guaranteed that the queue is empty and no more new ++ * requests can be submitted (i.e. it will stay empty). This means that ++ * calling ssh_rtl_tx_schedule will not schedule tx.work any more. So we ++ * can simply call cancel_work_sync on tx.work here and when that ++ * returns, we've locked it down. This also means that after this call, ++ * we don't submit any more packets to the underlying packet layer, so ++ * we can also shut that down. ++ */ ++ ++ cancel_work_sync(&rtl->tx.work); ++ ssh_ptl_shutdown(&rtl->ptl); ++ cancel_delayed_work_sync(&rtl->rtx_timeout.reaper); ++ ++ /* ++ * Shutting down the packet layer should also have caneled all requests. ++ * Thus the pending set should be empty. Attempt to handle this ++ * gracefully anyways, even though this should be dead code. ++ */ ++ ++ pending = atomic_read(&rtl->pending.count); ++ if (WARN_ON(pending)) { ++ spin_lock(&rtl->pending.lock); ++ list_for_each_entry_safe(r, n, &rtl->pending.head, node) { ++ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state); ++ ++ list_del(&r->node); ++ list_add_tail(&r->node, &claimed); ++ } ++ spin_unlock(&rtl->pending.lock); ++ } ++ ++ // finally cancel and complete requests ++ list_for_each_entry_safe(r, n, &claimed, node) { ++ // test_and_set because we still might compete with cancellation ++ if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ ssh_rtl_complete_with_status(r, -ESHUTDOWN); ++ ++ // drop the reference we've obtained by removing it from list ++ list_del(&r->node); ++ ssh_request_put(r); ++ } ++} ++ ++ ++/* -- Event notifier/callbacks. --------------------------------------------- */ ++/* ++ * The notifier system is based on linux/notifier.h, specifically the SRCU ++ * implementation. The difference to that is, that some bits of the notifier ++ * call return value can be tracked accross multiple calls. This is done so that ++ * handling of events can be tracked and a warning can be issued in case an ++ * event goes unhandled. The idea of that waring is that it should help discover ++ * and identify new/currently unimplemented features. ++ */ ++ ++struct ssam_nf_head { ++ struct srcu_struct srcu; ++ struct ssam_notifier_block __rcu *head; ++}; ++ ++ ++int ssam_nfblk_call_chain(struct ssam_nf_head *nh, struct ssam_event *event) ++{ ++ struct ssam_notifier_block *nb, *next_nb; ++ int ret = 0, idx; ++ ++ idx = srcu_read_lock(&nh->srcu); ++ ++ nb = rcu_dereference_raw(nh->head); ++ while (nb) { ++ next_nb = rcu_dereference_raw(nb->next); ++ ++ ret = (ret & SSAM_NOTIF_STATE_MASK) | nb->fn(nb, event); ++ if (ret & SSAM_NOTIF_STOP) ++ break; ++ ++ nb = next_nb; ++ } ++ ++ srcu_read_unlock(&nh->srcu, idx); ++ return ret; ++} ++ ++/* ++ * Note: This function must be synchronized by the caller with respect to other ++ * insert and/or remove calls. ++ */ ++int __ssam_nfblk_insert(struct ssam_nf_head *nh, struct ssam_notifier_block *nb) ++{ ++ struct ssam_notifier_block **link = &nh->head; ++ ++ while ((*link) != NULL) { ++ if (unlikely((*link) == nb)) { ++ WARN(1, "double register detected"); ++ return -EINVAL; ++ } ++ ++ if (nb->priority > (*link)->priority) ++ break; ++ ++ link = &((*link)->next); ++ } ++ ++ nb->next = *link; ++ rcu_assign_pointer(*link, nb); ++ ++ return 0; ++} ++ ++/* ++ * Note: This function must be synchronized by the caller with respect to other ++ * insert and/or remove calls. On success, the caller _must_ ensure SRCU ++ * synchronization by calling `synchronize_srcu(&nh->srcu)` after leaving the ++ * critical section, to ensure that the removed notifier block is not in use any ++ * more. ++ */ ++int __ssam_nfblk_remove(struct ssam_nf_head *nh, struct ssam_notifier_block *nb) ++{ ++ struct ssam_notifier_block **link = &nh->head; ++ ++ while ((*link) != NULL) { ++ if ((*link) == nb) { ++ rcu_assign_pointer(*link, nb->next); ++ return 0; ++ } ++ ++ link = &((*link)->next); ++ } ++ ++ return -ENOENT; ++} ++ ++static int ssam_nf_head_init(struct ssam_nf_head *nh) ++{ ++ int status; ++ ++ status = init_srcu_struct(&nh->srcu); ++ if (status) ++ return status; ++ ++ nh->head = NULL; ++ return 0; ++} ++ ++static void ssam_nf_head_destroy(struct ssam_nf_head *nh) ++{ ++ cleanup_srcu_struct(&nh->srcu); ++} ++ ++ ++/* -- Event/notification registry. ------------------------------------------ */ ++ ++struct ssam_nf_refcount_key { ++ struct ssam_event_registry reg; ++ struct ssam_event_id id; ++}; ++ ++struct ssam_nf_refcount_entry { ++ struct rb_node node; ++ struct ssam_nf_refcount_key key; ++ int refcount; ++}; ++ ++struct ssam_nf { ++ struct mutex lock; ++ struct rb_root refcount; ++ struct ssam_nf_head head[SSH_NUM_EVENTS]; ++}; ++ ++ ++static int ssam_nf_refcount_inc(struct ssam_nf *nf, ++ struct ssam_event_registry reg, ++ struct ssam_event_id id) ++{ ++ struct ssam_nf_refcount_entry *entry; ++ struct ssam_nf_refcount_key key; ++ struct rb_node **link = &nf->refcount.rb_node; ++ struct rb_node *parent = NULL; ++ int cmp; ++ ++ key.reg = reg; ++ key.id = id; ++ ++ while (*link) { ++ entry = rb_entry(*link, struct ssam_nf_refcount_entry, node); ++ parent = *link; ++ ++ cmp = memcmp(&key, &entry->key, sizeof(key)); ++ if (cmp < 0) { ++ link = &(*link)->rb_left; ++ } else if (cmp > 0) { ++ link = &(*link)->rb_right; ++ } else if (entry->refcount < INT_MAX) { ++ return ++entry->refcount; ++ } else { ++ return -ENOSPC; ++ } ++ } ++ ++ entry = kzalloc(sizeof(*entry), GFP_KERNEL); ++ if (!entry) ++ return -ENOMEM; ++ ++ entry->key = key; ++ entry->refcount = 1; ++ ++ rb_link_node(&entry->node, parent, link); ++ rb_insert_color(&entry->node, &nf->refcount); ++ ++ return entry->refcount; ++} ++ ++static int ssam_nf_refcount_dec(struct ssam_nf *nf, ++ struct ssam_event_registry reg, ++ struct ssam_event_id id) ++{ ++ struct ssam_nf_refcount_entry *entry; ++ struct ssam_nf_refcount_key key; ++ struct rb_node *node = nf->refcount.rb_node; ++ int cmp, rc; ++ ++ key.reg = reg; ++ key.id = id; ++ ++ while (node) { ++ entry = rb_entry(node, struct ssam_nf_refcount_entry, node); ++ ++ cmp = memcmp(&key, &entry->key, sizeof(key)); ++ if (cmp < 0) { ++ node = node->rb_left; ++ } else if (cmp > 0) { ++ node = node->rb_right; ++ } else { ++ rc = --entry->refcount; ++ ++ if (rc == 0) { ++ rb_erase(&entry->node, &nf->refcount); ++ kfree(entry); ++ } ++ ++ return rc; ++ } ++ } ++ ++ return -ENOENT; ++} ++ ++static bool ssam_nf_refcount_empty(struct ssam_nf *nf) ++{ ++ return RB_EMPTY_ROOT(&nf->refcount); ++} ++ ++static void ssam_nf_call(struct ssam_nf *nf, struct device *dev, u16 rqid, ++ struct ssam_event *event) ++{ ++ struct ssam_nf_head *nf_head; ++ int status, nf_ret; ++ ++ if (!ssh_rqid_is_event(rqid)) { ++ dev_warn(dev, "event: unsupported rqid: 0x%04x\n", rqid); ++ return; ++ } ++ ++ nf_head = &nf->head[ssh_rqid_to_event(rqid)]; ++ nf_ret = ssam_nfblk_call_chain(nf_head, event); ++ status = ssam_notifier_to_errno(nf_ret); ++ ++ if (status < 0) { ++ dev_err(dev, "event: error handling event: %d " ++ "(tc: 0x%02x, cid: 0x%02x, iid: 0x%02x, chn: 0x%02x)\n", ++ status, event->target_category, event->command_id, ++ event->instance_id, event->channel); ++ } ++ ++ if (!(nf_ret & SSAM_NOTIF_HANDLED)) { ++ dev_warn(dev, "event: unhandled event (rqid: 0x%02x, " ++ "tc: 0x%02x, cid: 0x%02x, iid: 0x%02x, chn: 0x%02x)\n", ++ rqid, event->target_category, event->command_id, ++ event->instance_id, event->channel); ++ } ++} ++ ++static int ssam_nf_init(struct ssam_nf *nf) ++{ ++ int i, status; ++ ++ for (i = 0; i < SSH_NUM_EVENTS; i++) { ++ status = ssam_nf_head_init(&nf->head[i]); ++ if (status) ++ break; ++ } ++ ++ if (status) { ++ for (i = i - 1; i >= 0; i--) ++ ssam_nf_head_destroy(&nf->head[i]); ++ ++ return status; ++ } ++ ++ mutex_init(&nf->lock); ++ return 0; ++} ++ ++static void ssam_nf_destroy(struct ssam_nf *nf) ++{ ++ int i; ++ ++ for (i = 0; i < SSH_NUM_EVENTS; i++) ++ ssam_nf_head_destroy(&nf->head[i]); ++ ++ mutex_destroy(&nf->lock); ++} ++ ++ ++/* -- Event/async request completion system. -------------------------------- */ ++ ++#define SSAM_CPLT_WQ_NAME "ssam_cpltq" ++ ++ ++struct ssam_cplt; ++struct ssam_event_item; ++ ++struct ssam_event_item_ops { ++ void (*free)(struct ssam_event_item *); ++}; ++ ++struct ssam_event_item { ++ struct list_head node; ++ u16 rqid; ++ ++ struct ssam_event_item_ops ops; ++ struct ssam_event event; // must be last ++}; ++ ++struct ssam_event_queue { ++ struct ssam_cplt *cplt; ++ ++ spinlock_t lock; ++ struct list_head head; ++ struct work_struct work; ++}; ++ ++struct ssam_event_channel { ++ struct ssam_event_queue queue[SSH_NUM_EVENTS]; ++}; ++ ++struct ssam_cplt { ++ struct device *dev; ++ struct workqueue_struct *wq; ++ ++ struct { ++ struct ssam_event_channel channel[SSH_NUM_CHANNELS]; ++ struct ssam_nf notif; ++ } event; ++}; ++ ++ ++/** ++ * Maximum payload length for cached `ssam_event_item`s. ++ * ++ * This length has been chosen to be accomodate standard touchpad and keyboard ++ * input events. Events with larger payloads will be allocated separately. ++ */ ++#define SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN 32 ++ ++static struct kmem_cache *ssam_event_item_cache; ++ ++static int ssam_event_item_cache_init(void) ++{ ++ const unsigned int size = sizeof(struct ssam_event_item) ++ + SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN; ++ const unsigned int align = __alignof__(struct ssam_event_item); ++ struct kmem_cache *cache; ++ ++ cache = kmem_cache_create("ssam_event_item", size, align, 0, NULL); ++ if (!cache) ++ return -ENOMEM; ++ ++ ssam_event_item_cache = cache; ++ return 0; ++} ++ ++static void ssam_event_item_cache_destroy(void) ++{ ++ kmem_cache_destroy(ssam_event_item_cache); ++ ssam_event_item_cache = NULL; ++} ++ ++static void __ssam_event_item_free_cached(struct ssam_event_item *item) ++{ ++ kmem_cache_free(ssam_event_item_cache, item); ++} ++ ++static void __ssam_event_item_free_generic(struct ssam_event_item *item) ++{ ++ kfree(item); ++} ++ ++static inline void ssam_event_item_free(struct ssam_event_item *item) ++{ ++ trace_ssam_event_item_free(item); ++ item->ops.free(item); ++} ++ ++static struct ssam_event_item *ssam_event_item_alloc(size_t len, gfp_t flags) ++{ ++ struct ssam_event_item *item; ++ ++ if (len <= SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN) { ++ item = kmem_cache_alloc(ssam_event_item_cache, GFP_KERNEL); ++ if (!item) ++ return NULL; ++ ++ item->ops.free = __ssam_event_item_free_cached; ++ } else { ++ const size_t n = sizeof(struct ssam_event_item) + len; ++ item = kzalloc(n, GFP_KERNEL); ++ if (!item) ++ return NULL; ++ ++ item->ops.free = __ssam_event_item_free_generic; ++ } ++ ++ item->event.length = len; ++ ++ trace_ssam_event_item_alloc(item, len); ++ return item; ++} ++ ++ ++static void ssam_event_queue_push(struct ssam_event_queue *q, ++ struct ssam_event_item *item) ++{ ++ spin_lock(&q->lock); ++ list_add_tail(&item->node, &q->head); ++ spin_unlock(&q->lock); ++} ++ ++static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q) ++{ ++ struct ssam_event_item *item; ++ ++ spin_lock(&q->lock); ++ item = list_first_entry_or_null(&q->head, struct ssam_event_item, node); ++ if (item) ++ list_del(&item->node); ++ spin_unlock(&q->lock); ++ ++ return item; ++} ++ ++static bool ssam_event_queue_is_empty(struct ssam_event_queue *q) ++{ ++ bool empty; ++ ++ spin_lock(&q->lock); ++ empty = list_empty(&q->head); ++ spin_unlock(&q->lock); ++ ++ return empty; ++} ++ ++static struct ssam_event_queue *ssam_cplt_get_event_queue( ++ struct ssam_cplt *cplt, u8 channel, u16 rqid) ++{ ++ u16 event = ssh_rqid_to_event(rqid); ++ u16 chidx = ssh_channel_to_index(channel); ++ ++ if (!ssh_rqid_is_event(rqid)) { ++ dev_err(cplt->dev, "event: unsupported rqid: 0x%04x\n", rqid); ++ return NULL; ++ } ++ ++ if (!ssh_channel_is_valid(channel)) { ++ dev_warn(cplt->dev, "event: unsupported channel: %u\n", ++ channel); ++ chidx = 0; ++ } ++ ++ return &cplt->event.channel[chidx].queue[event]; ++} ++ ++static inline bool ssam_cplt_submit(struct ssam_cplt *cplt, ++ struct work_struct *work) ++{ ++ return queue_work(cplt->wq, work); ++} ++ ++static int ssam_cplt_submit_event(struct ssam_cplt *cplt, ++ struct ssam_event_item *item) ++{ ++ struct ssam_event_queue *evq; ++ ++ evq = ssam_cplt_get_event_queue(cplt, item->event.channel, item->rqid); ++ if (!evq) ++ return -EINVAL; ++ ++ ssam_event_queue_push(evq, item); ++ ssam_cplt_submit(cplt, &evq->work); ++ return 0; ++} ++ ++static void ssam_cplt_flush(struct ssam_cplt *cplt) ++{ ++ flush_workqueue(cplt->wq); ++} ++ ++static void ssam_event_queue_work_fn(struct work_struct *work) ++{ ++ struct ssam_event_queue *queue; ++ struct ssam_event_item *item; ++ struct ssam_nf *nf; ++ struct device *dev; ++ int i; ++ ++ queue = container_of(work, struct ssam_event_queue, work); ++ nf = &queue->cplt->event.notif; ++ dev = queue->cplt->dev; ++ ++ for (i = 0; i < 10; i++) { ++ item = ssam_event_queue_pop(queue); ++ if (item == NULL) ++ return; ++ ++ ssam_nf_call(nf, dev, item->rqid, &item->event); ++ ssam_event_item_free(item); ++ } ++ ++ if (!ssam_event_queue_is_empty(queue)) ++ ssam_cplt_submit(queue->cplt, &queue->work); ++} ++ ++static void ssam_event_queue_init(struct ssam_cplt *cplt, ++ struct ssam_event_queue *evq) ++{ ++ evq->cplt = cplt; ++ spin_lock_init(&evq->lock); ++ INIT_LIST_HEAD(&evq->head); ++ INIT_WORK(&evq->work, ssam_event_queue_work_fn); ++} ++ ++static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev) ++{ ++ struct ssam_event_channel *channel; ++ int status, c, i; ++ ++ cplt->dev = dev; ++ ++ cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME); ++ if (!cplt->wq) ++ return -ENOMEM; ++ ++ for (c = 0; c < ARRAY_SIZE(cplt->event.channel); c++) { ++ channel = &cplt->event.channel[c]; ++ ++ for (i = 0; i < ARRAY_SIZE(channel->queue); i++) ++ ssam_event_queue_init(cplt, &channel->queue[i]); ++ } ++ ++ status = ssam_nf_init(&cplt->event.notif); ++ if (status) ++ destroy_workqueue(cplt->wq); ++ ++ return status; ++} ++ ++static void ssam_cplt_destroy(struct ssam_cplt *cplt) ++{ ++ /* ++ * Note: destroy_workqueue ensures that all currently queued work will ++ * be fully completed and the workqueue drained. This means that this ++ * call will inherently also free any queued ssam_event_items, thus we ++ * don't have to take care of that here explicitly. ++ */ ++ destroy_workqueue(cplt->wq); ++ ssam_nf_destroy(&cplt->event.notif); ++} ++ ++ ++/* -- Main SSAM device structures. ------------------------------------------ */ ++ ++enum ssam_controller_state { ++ SSAM_CONTROLLER_UNINITIALIZED, ++ SSAM_CONTROLLER_INITIALIZED, ++ SSAM_CONTROLLER_STARTED, ++ SSAM_CONTROLLER_STOPPED, ++ SSAM_CONTROLLER_SUSPENDED, ++}; ++ ++struct ssam_device_caps { ++ u32 notif_display:1; ++ u32 notif_d0exit:1; ++}; ++ ++struct ssam_controller { ++ enum ssam_controller_state state; ++ ++ struct ssh_rtl rtl; ++ struct ssam_cplt cplt; ++ ++ struct { ++ struct ssh_seq_counter seq; ++ struct ssh_rqid_counter rqid; ++ } counter; ++ ++ struct { ++ int num; ++ bool wakeup_enabled; ++ } irq; ++ ++ struct ssam_device_caps caps; ++}; ++ ++ ++#define ssam_dbg(ctrl, fmt, ...) rtl_dbg(&(ctrl)->rtl, fmt, ##__VA_ARGS__) ++#define ssam_info(ctrl, fmt, ...) rtl_info(&(ctrl)->rtl, fmt, ##__VA_ARGS__) ++#define ssam_warn(ctrl, fmt, ...) rtl_warn(&(ctrl)->rtl, fmt, ##__VA_ARGS__) ++#define ssam_err(ctrl, fmt, ...) rtl_err(&(ctrl)->rtl, fmt, ##__VA_ARGS__) ++ ++#define to_ssam_controller(ptr, member) \ ++ container_of(ptr, struct ssam_controller, member) ++ ++struct device *ssam_controller_device(struct ssam_controller *c) ++{ ++ return ssh_rtl_get_device(&c->rtl); ++} ++EXPORT_SYMBOL_GPL(ssam_controller_device); ++ ++ ++static void ssam_handle_event(struct ssh_rtl *rtl, ++ const struct ssh_command *cmd, ++ const struct ssam_span *data) ++{ ++ struct ssam_controller *ctrl = to_ssam_controller(rtl, rtl); ++ struct ssam_event_item *item; ++ ++ item = ssam_event_item_alloc(data->len, GFP_KERNEL); ++ if (!item) ++ return; ++ ++ item->rqid = get_unaligned_le16(&cmd->rqid); ++ item->event.target_category = cmd->tc; ++ item->event.command_id = cmd->cid; ++ item->event.instance_id = cmd->iid; ++ item->event.channel = cmd->chn_in; ++ memcpy(&item->event.data[0], data->ptr, data->len); ++ ++ ssam_cplt_submit_event(&ctrl->cplt, item); ++} ++ ++static const struct ssh_rtl_ops ssam_rtl_ops = { ++ .handle_event = ssam_handle_event, ++}; ++ ++ ++static bool ssam_notifier_empty(struct ssam_controller *ctrl); ++static void ssam_notifier_unregister_all(struct ssam_controller *ctrl); ++ ++ ++#define SSAM_SSH_DSM_REVISION 0 ++#define SSAM_SSH_DSM_NOTIF_D0 8 ++static const guid_t SSAM_SSH_DSM_UUID = GUID_INIT(0xd5e383e1, 0xd892, 0x4a76, ++ 0x89, 0xfc, 0xf6, 0xaa, 0xae, 0x7e, 0xd5, 0xb5); ++ ++static int ssam_device_caps_load_from_acpi(acpi_handle handle, ++ struct ssam_device_caps *caps) ++{ ++ union acpi_object *obj; ++ u64 funcs = 0; ++ int i; ++ ++ // set defaults ++ caps->notif_display = true; ++ caps->notif_d0exit = false; ++ ++ if (!acpi_has_method(handle, "_DSM")) ++ return 0; ++ ++ // get function availability bitfield ++ obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_UUID, 0, 0, NULL, ++ ACPI_TYPE_BUFFER); ++ if (!obj) ++ return -EFAULT; ++ ++ for (i = 0; i < obj->buffer.length && i < 8; i++) ++ funcs |= (((u64)obj->buffer.pointer[i]) << (i * 8)); ++ ++ ACPI_FREE(obj); ++ ++ // D0 exit/entry notification ++ if (funcs & BIT(SSAM_SSH_DSM_NOTIF_D0)) { ++ obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_UUID, ++ SSAM_SSH_DSM_REVISION, SSAM_SSH_DSM_NOTIF_D0, ++ NULL, ACPI_TYPE_INTEGER); ++ if (!obj) ++ return -EFAULT; ++ ++ caps->notif_d0exit = !!obj->integer.value; ++ ACPI_FREE(obj); ++ } ++ ++ return 0; ++} ++ ++static int ssam_controller_init(struct ssam_controller *ctrl, ++ struct serdev_device *serdev) ++{ ++ acpi_handle handle = ACPI_HANDLE(&serdev->dev); ++ int status; ++ ++ if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_UNINITIALIZED) { ++ dev_err(&serdev->dev, "embedded controller already initialized\n"); ++ return -EBUSY; ++ } ++ ++ status = ssam_device_caps_load_from_acpi(handle, &ctrl->caps); ++ if (status) ++ return status; ++ ++ dev_dbg(&serdev->dev, "device capabilities:\n"); ++ dev_dbg(&serdev->dev, " notif_display: %u\n", ctrl->caps.notif_display); ++ dev_dbg(&serdev->dev, " notif_d0exit: %u\n", ctrl->caps.notif_d0exit); ++ ++ ssh_seq_reset(&ctrl->counter.seq); ++ ssh_rqid_reset(&ctrl->counter.rqid); ++ ++ // initialize event/request completion system ++ status = ssam_cplt_init(&ctrl->cplt, &serdev->dev); ++ if (status) ++ return status; ++ ++ // initialize request and packet transmission layers ++ status = ssh_rtl_init(&ctrl->rtl, serdev, &ssam_rtl_ops); ++ if (status) { ++ ssam_cplt_destroy(&ctrl->cplt); ++ return status; ++ } ++ ++ // update state ++ smp_store_release(&ctrl->state, SSAM_CONTROLLER_INITIALIZED); ++ return 0; ++} ++ ++static int ssam_controller_start(struct ssam_controller *ctrl) ++{ ++ int status; ++ ++ if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_INITIALIZED) ++ return -EINVAL; ++ ++ status = ssh_rtl_tx_start(&ctrl->rtl); ++ if (status) ++ return status; ++ ++ status = ssh_rtl_rx_start(&ctrl->rtl); ++ if (status) { ++ ssh_rtl_tx_flush(&ctrl->rtl); ++ return status; ++ } ++ ++ smp_store_release(&ctrl->state, SSAM_CONTROLLER_STARTED); ++ return 0; ++} ++ ++static void ssam_controller_shutdown(struct ssam_controller *ctrl) ++{ ++ enum ssam_controller_state s = smp_load_acquire(&ctrl->state); ++ int status; ++ ++ if (s == SSAM_CONTROLLER_UNINITIALIZED || s == SSAM_CONTROLLER_STOPPED) ++ return; ++ ++ // try to flush pending events and requests while everything still works ++ status = ssh_rtl_flush(&ctrl->rtl, msecs_to_jiffies(5000)); ++ if (status) { ++ ssam_err(ctrl, "failed to flush request transmission layer: %d\n", ++ status); ++ } ++ ++ // try to flush out all currently completing requests and events ++ ssam_cplt_flush(&ctrl->cplt); ++ ++ /* ++ * We expect all notifiers to have been removed by the respective client ++ * driver that set them up at this point. If this warning occurs, some ++ * client driver has not done that... ++ */ ++ WARN_ON(!ssam_notifier_empty(ctrl)); ++ ++ /* ++ * Nevertheless, we should still take care of drivers that don't behave ++ * well. Thus disable all enabled events, unregister all notifiers. ++ */ ++ ssam_notifier_unregister_all(ctrl); ++ ++ // cancel rem. requests, ensure no new ones can be queued, stop threads ++ ssh_rtl_tx_flush(&ctrl->rtl); ++ ssh_rtl_shutdown(&ctrl->rtl); ++ ++ smp_store_release(&ctrl->state, SSAM_CONTROLLER_STOPPED); ++} ++ ++static void ssam_controller_destroy(struct ssam_controller *ctrl) ++{ ++ if (smp_load_acquire(&ctrl->state) == SSAM_CONTROLLER_UNINITIALIZED) ++ return; ++ ++ /* ++ * Note: New events could still have been received after the previous ++ * flush in ssam_controller_shutdown, before the request transport layer ++ * has been shut down. At this point, after the shutdown, we can be sure ++ * that no new events will be queued. The call to ssam_cplt_destroy will ++ * ensure that those remaining are being completed and freed. ++ */ ++ ++ // actually free resources ++ ssam_cplt_destroy(&ctrl->cplt); ++ ssh_rtl_destroy(&ctrl->rtl); ++ ++ smp_store_release(&ctrl->state, SSAM_CONTROLLER_UNINITIALIZED); ++} ++ ++static int ssam_controller_suspend(struct ssam_controller *ctrl) ++{ ++ if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_STARTED) ++ return -EINVAL; ++ ++ ssam_dbg(ctrl, "pm: suspending controller\n"); ++ smp_store_release(&ctrl->state, SSAM_CONTROLLER_SUSPENDED); ++ return 0; ++} ++ ++static int ssam_controller_resume(struct ssam_controller *ctrl) ++{ ++ if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_SUSPENDED) ++ return -EINVAL; ++ ++ ssam_dbg(ctrl, "pm: resuming controller\n"); ++ smp_store_release(&ctrl->state, SSAM_CONTROLLER_STARTED); ++ return 0; ++} ++ ++ ++static inline ++int ssam_controller_receive_buf(struct ssam_controller *ctrl, ++ const unsigned char *buf, size_t n) ++{ ++ return ssh_ptl_rx_rcvbuf(&ctrl->rtl.ptl, buf, n); ++} ++ ++static inline void ssam_controller_write_wakeup(struct ssam_controller *ctrl) ++{ ++ ssh_ptl_tx_wakeup(&ctrl->rtl.ptl, true); ++} ++ ++ ++/* -- Top-level request interface ------------------------------------------- */ ++ ++ssize_t ssam_request_write_data(struct ssam_span *buf, ++ struct ssam_controller *ctrl, ++ struct ssam_request *spec) ++{ ++ struct msgbuf msgb; ++ u16 rqid; ++ u8 seq; ++ ++ if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE) ++ return -EINVAL; ++ ++ msgb_init(&msgb, buf->ptr, buf->len); ++ seq = ssh_seq_next(&ctrl->counter.seq); ++ rqid = ssh_rqid_next(&ctrl->counter.rqid); ++ msgb_push_cmd(&msgb, seq, rqid, spec); ++ ++ return msgb_bytes_used(&msgb); ++} ++EXPORT_SYMBOL_GPL(ssam_request_write_data); ++ ++ ++static void ssam_request_sync_complete(struct ssh_request *rqst, ++ const struct ssh_command *cmd, ++ const struct ssam_span *data, int status) ++{ ++ struct ssh_rtl *rtl = ssh_request_rtl(rqst); ++ struct ssam_request_sync *r; ++ ++ r = container_of(rqst, struct ssam_request_sync, base); ++ r->status = status; ++ ++ if (r->resp) ++ r->resp->length = 0; ++ ++ if (status) { ++ rtl_dbg_cond(rtl, "rsp: request failed: %d\n", status); ++ return; ++ } ++ ++ if (!data) // handle requests without a response ++ return; ++ ++ if (!r->resp || !r->resp->pointer) { ++ if (data->len) { ++ rtl_warn(rtl, "rsp: no response buffer provided, " ++ "dropping data\n"); ++ } ++ return; ++ } ++ ++ if (data->len > r->resp->capacity) { ++ rtl_err(rtl, "rsp: response buffer too small, " ++ "capacity: %zu bytes, got: %zu bytes\n", ++ r->resp->capacity, data->len); ++ r->status = -ENOSPC; ++ return; ++ } ++ ++ r->resp->length = data->len; ++ memcpy(r->resp->pointer, data->ptr, data->len); ++} ++ ++static void ssam_request_sync_release(struct ssh_request *rqst) ++{ ++ complete_all(&container_of(rqst, struct ssam_request_sync, base)->comp); ++} ++ ++static const struct ssh_request_ops ssam_request_sync_ops = { ++ .release = ssam_request_sync_release, ++ .complete = ssam_request_sync_complete, ++}; ++ ++ ++int ssam_request_sync_alloc(size_t payload_len, gfp_t flags, ++ struct ssam_request_sync **rqst, ++ struct ssam_span *buffer) ++{ ++ size_t msglen = SSH_COMMAND_MESSAGE_LENGTH(payload_len); ++ ++ *rqst = kzalloc(sizeof(struct ssam_request_sync) + msglen, flags); ++ if (!*rqst) ++ return -ENOMEM; ++ ++ buffer->ptr = (u8 *)(*rqst + 1); ++ buffer->len = msglen; ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(ssam_request_sync_alloc); ++ ++void ssam_request_sync_init(struct ssam_request_sync *rqst, ++ enum ssam_request_flags flags) ++{ ++ ssh_request_init(&rqst->base, flags, &ssam_request_sync_ops); ++ init_completion(&rqst->comp); ++ rqst->resp = NULL; ++ rqst->status = 0; ++} ++EXPORT_SYMBOL_GPL(ssam_request_sync_init); ++ ++int ssam_request_sync_submit(struct ssam_controller *ctrl, ++ struct ssam_request_sync *rqst) ++{ ++ enum ssam_controller_state state = smp_load_acquire(&ctrl->state); ++ int status; ++ ++ if (state == SSAM_CONTROLLER_SUSPENDED) { ++ ssam_warn(ctrl, "rqst: embedded controller is suspended\n"); ++ ssh_request_put(&rqst->base); ++ return -EPERM; ++ } ++ ++ if (state != SSAM_CONTROLLER_STARTED) { ++ ssam_warn(ctrl, "rqst: embedded controller is uninitialized\n"); ++ ssh_request_put(&rqst->base); ++ return -ENXIO; ++ } ++ ++ status = ssh_rtl_submit(&ctrl->rtl, &rqst->base); ++ ssh_request_put(&rqst->base); ++ ++ return status; ++} ++EXPORT_SYMBOL_GPL(ssam_request_sync_submit); ++ ++int ssam_request_sync(struct ssam_controller *ctrl, struct ssam_request *spec, ++ struct ssam_response *rsp) ++{ ++ struct ssam_request_sync *rqst; ++ struct ssam_span buf; ++ size_t len; ++ int status; ++ ++ // prevent overflow, allows us to skip checks later on ++ if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE) { ++ ssam_err(ctrl, "rqst: request payload too large\n"); ++ return -EINVAL; ++ } ++ ++ status = ssam_request_sync_alloc(spec->length, GFP_KERNEL, &rqst, &buf); ++ if (status) ++ return status; ++ ++ ssam_request_sync_init(rqst, spec->flags); ++ ssam_request_sync_set_resp(rqst, rsp); ++ ++ len = ssam_request_write_data(&buf, ctrl, spec); ++ ssam_request_sync_set_data(rqst, buf.ptr, len); ++ ++ status = ssam_request_sync_submit(ctrl, rqst); ++ if (!status) ++ status = ssam_request_sync_wait(rqst); ++ ++ kfree(rqst); ++ return status; ++} ++EXPORT_SYMBOL_GPL(ssam_request_sync); ++ ++int ssam_request_sync_with_buffer(struct ssam_controller *ctrl, ++ struct ssam_request *spec, ++ struct ssam_response *rsp, ++ struct ssam_span *buf) ++{ ++ struct ssam_request_sync rqst; ++ size_t len; ++ int status; ++ ++ // prevent overflow, allows us to skip checks later on ++ if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE) { ++ ssam_err(ctrl, "rqst: request payload too large\n"); ++ return -EINVAL; ++ } ++ ++ ssam_request_sync_init(&rqst, spec->flags); ++ ssam_request_sync_set_resp(&rqst, rsp); ++ ++ len = ssam_request_write_data(buf, ctrl, spec); ++ ssam_request_sync_set_data(&rqst, buf->ptr, len); ++ ++ status = ssam_request_sync_submit(ctrl, &rqst); ++ if (!status) ++ status = ssam_request_sync_wait(&rqst); ++ ++ return status; ++} ++EXPORT_SYMBOL_GPL(ssam_request_sync_with_buffer); ++ ++ ++/* -- Internal SAM requests. ------------------------------------------------ */ ++ ++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, { ++ .target_category = SSAM_SSH_TC_SAM, ++ .command_id = 0x13, ++ .instance_id = 0x00, ++ .channel = 0x01, ++}); ++ ++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, { ++ .target_category = SSAM_SSH_TC_SAM, ++ .command_id = 0x15, ++ .instance_id = 0x00, ++ .channel = 0x01, ++}); ++ ++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, { ++ .target_category = SSAM_SSH_TC_SAM, ++ .command_id = 0x16, ++ .instance_id = 0x00, ++ .channel = 0x01, ++}); ++ ++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, { ++ .target_category = SSAM_SSH_TC_SAM, ++ .command_id = 0x33, ++ .instance_id = 0x00, ++ .channel = 0x01, ++}); ++ ++static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, { ++ .target_category = SSAM_SSH_TC_SAM, ++ .command_id = 0x34, ++ .instance_id = 0x00, ++ .channel = 0x01, ++}); ++ ++static int ssam_ssh_event_enable(struct ssam_controller *ctrl, ++ struct ssam_event_registry reg, ++ struct ssam_event_id id, u8 flags) ++{ ++ struct ssh_notification_params params; ++ struct ssam_request rqst; ++ struct ssam_response result; ++ int status; ++ ++ u16 rqid = ssh_tc_to_rqid(id.target_category); ++ u8 buf[1] = { 0x00 }; ++ ++ // only allow RQIDs that lie within event spectrum ++ if (!ssh_rqid_is_event(rqid)) ++ return -EINVAL; ++ ++ params.target_category = id.target_category; ++ params.instance_id = id.instance; ++ params.flags = flags; ++ put_unaligned_le16(rqid, ¶ms.request_id); ++ ++ rqst.target_category = reg.target_category; ++ rqst.command_id = reg.cid_enable; ++ rqst.instance_id = 0x00; ++ rqst.channel = reg.channel; ++ rqst.flags = SSAM_REQUEST_HAS_RESPONSE; ++ rqst.length = sizeof(params); ++ rqst.payload = (u8 *)¶ms; ++ ++ result.capacity = ARRAY_SIZE(buf); ++ result.length = 0; ++ result.pointer = buf; ++ ++ status = ssam_request_sync_onstack(ctrl, &rqst, &result, sizeof(params)); ++ if (status) { ++ ssam_err(ctrl, "failed to enable event source " ++ "(tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n", ++ id.target_category, id.instance, reg.target_category); ++ } ++ ++ if (buf[0] != 0x00) { ++ ssam_warn(ctrl, "unexpected result while enabling event source: " ++ "0x%02x (tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n", ++ buf[0], id.target_category, id.instance, ++ reg.target_category); ++ } ++ ++ return status; ++ ++} ++ ++static int ssam_ssh_event_disable(struct ssam_controller *ctrl, ++ struct ssam_event_registry reg, ++ struct ssam_event_id id, u8 flags) ++{ ++ struct ssh_notification_params params; ++ struct ssam_request rqst; ++ struct ssam_response result; ++ int status; ++ ++ u16 rqid = ssh_tc_to_rqid(id.target_category); ++ u8 buf[1] = { 0x00 }; ++ ++ // only allow RQIDs that lie within event spectrum ++ if (!ssh_rqid_is_event(rqid)) ++ return -EINVAL; ++ ++ params.target_category = id.target_category; ++ params.instance_id = id.instance; ++ params.flags = flags; ++ put_unaligned_le16(rqid, ¶ms.request_id); ++ ++ rqst.target_category = reg.target_category; ++ rqst.command_id = reg.cid_disable; ++ rqst.instance_id = 0x00; ++ rqst.channel = reg.channel; ++ rqst.flags = SSAM_REQUEST_HAS_RESPONSE; ++ rqst.length = sizeof(params); ++ rqst.payload = (u8 *)¶ms; ++ ++ result.capacity = ARRAY_SIZE(buf); ++ result.length = 0; ++ result.pointer = buf; ++ ++ status = ssam_request_sync_onstack(ctrl, &rqst, &result, sizeof(params)); ++ if (status) { ++ ssam_err(ctrl, "failed to disable event source " ++ "(tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n", ++ id.target_category, id.instance, reg.target_category); ++ } ++ ++ if (buf[0] != 0x00) { ++ ssam_warn(ctrl, "unexpected result while disabling event source: " ++ "0x%02x (tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n", ++ buf[0], id.target_category, id.instance, ++ reg.target_category); ++ } ++ ++ return status; ++} ++ ++ ++/* -- Wrappers for internal SAM requests. ----------------------------------- */ ++ ++static int ssam_log_firmware_version(struct ssam_controller *ctrl) ++{ ++ __le32 __version; ++ u32 version, a, b, c; ++ int status; ++ ++ status = ssam_ssh_get_firmware_version(ctrl, &__version); ++ if (status) ++ return status; ++ ++ version = le32_to_cpu(__version); ++ a = (version >> 24) & 0xff; ++ b = ((version >> 8) & 0xffff); ++ c = version & 0xff; ++ ++ ssam_info(ctrl, "SAM controller version: %u.%u.%u\n", a, b, c); ++ return 0; ++} ++ ++static int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl) ++{ ++ int status; ++ u8 response; ++ ++ if (!ctrl->caps.notif_display) ++ return 0; ++ ++ ssam_dbg(ctrl, "pm: notifying display off\n"); ++ ++ status = ssam_ssh_notif_display_off(ctrl, &response); ++ if (status) ++ return status; ++ ++ if (response != 0) { ++ ssam_err(ctrl, "unexpected response from display-off notification: " ++ "0x%02x\n", response); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++static int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl) ++{ ++ int status; ++ u8 response; ++ ++ if (!ctrl->caps.notif_display) ++ return 0; ++ ++ ssam_dbg(ctrl, "pm: notifying display on\n"); ++ ++ status = ssam_ssh_notif_display_on(ctrl, &response); ++ if (status) ++ return status; ++ ++ if (response != 0) { ++ ssam_err(ctrl, "unexpected response from display-on notification: " ++ "0x%02x\n", response); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++static int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl) ++{ ++ int status; ++ u8 response; ++ ++ if (!ctrl->caps.notif_d0exit) ++ return 0; ++ ++ ssam_dbg(ctrl, "pm: notifying D0 exit\n"); ++ ++ status = ssam_ssh_notif_d0_exit(ctrl, &response); ++ if (status) ++ return status; ++ ++ if (response != 0) { ++ ssam_err(ctrl, "unexpected response from D0-exit notification: " ++ "0x%02x\n", response); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++static int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl) ++{ ++ int status; ++ u8 response; ++ ++ if (!ctrl->caps.notif_d0exit) ++ return 0; ++ ++ ssam_dbg(ctrl, "pm: notifying D0 entry\n"); ++ ++ status = ssam_ssh_notif_d0_entry(ctrl, &response); ++ if (status) ++ return status; ++ ++ if (response != 0) { ++ ssam_err(ctrl, "unexpected response from D0-entry notification: " ++ "0x%02x\n", response); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++ ++/* -- Top-level event registry interface. ----------------------------------- */ ++ ++int ssam_notifier_register(struct ssam_controller *ctrl, ++ struct ssam_event_notifier *n) ++{ ++ u16 rqid = ssh_tc_to_rqid(n->event.id.target_category); ++ struct ssam_nf_head *nf_head; ++ struct ssam_nf *nf; ++ int rc, status; ++ ++ if (!ssh_rqid_is_event(rqid)) ++ return -EINVAL; ++ ++ nf = &ctrl->cplt.event.notif; ++ nf_head = &nf->head[ssh_rqid_to_event(rqid)]; ++ ++ mutex_lock(&nf->lock); ++ ++ if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_STARTED) { ++ mutex_unlock(&nf->lock); ++ return -ENXIO; ++ } ++ ++ rc = ssam_nf_refcount_inc(nf, n->event.reg, n->event.id); ++ if (rc < 0) { ++ mutex_unlock(&nf->lock); ++ return rc; ++ } ++ ++ ssam_dbg(ctrl, "enabling event (reg: 0x%02x, tc: 0x%02x, iid: 0x%02x, " ++ "rc: %d)\n", n->event.reg.target_category, ++ n->event.id.target_category, n->event.id.instance, rc); ++ ++ status = __ssam_nfblk_insert(nf_head, &n->base); ++ if (status) { ++ ssam_nf_refcount_dec(nf, n->event.reg, n->event.id); ++ mutex_unlock(&nf->lock); ++ return status; ++ } ++ ++ if (rc == 1) { ++ status = ssam_ssh_event_enable(ctrl, n->event.reg, n->event.id, ++ n->event.flags); ++ if (status) { ++ __ssam_nfblk_remove(nf_head, &n->base); ++ ssam_nf_refcount_dec(nf, n->event.reg, n->event.id); ++ mutex_unlock(&nf->lock); ++ synchronize_srcu(&nf_head->srcu); ++ return status; ++ } ++ } ++ ++ mutex_unlock(&nf->lock); ++ return 0; ++ ++} ++EXPORT_SYMBOL_GPL(ssam_notifier_register); ++ ++int ssam_notifier_unregister(struct ssam_controller *ctrl, ++ struct ssam_event_notifier *n) ++{ ++ u16 rqid = ssh_tc_to_rqid(n->event.id.target_category); ++ struct ssam_nf_head *nf_head; ++ struct ssam_nf *nf; ++ int rc, status = 0; ++ ++ if (!ssh_rqid_is_event(rqid)) ++ return -EINVAL; ++ ++ nf = &ctrl->cplt.event.notif; ++ nf_head = &nf->head[ssh_rqid_to_event(rqid)]; ++ ++ mutex_lock(&nf->lock); ++ ++ if (smp_load_acquire(&ctrl->state) != SSAM_CONTROLLER_STARTED) { ++ mutex_unlock(&nf->lock); ++ return -ENXIO; ++ } ++ ++ rc = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id); ++ if (rc < 0) { ++ mutex_unlock(&nf->lock); ++ return rc; ++ } ++ ++ ssam_dbg(ctrl, "disabling event (reg: 0x%02x, tc: 0x%02x, iid: 0x%02x, " ++ "rc: %d)\n", n->event.reg.target_category, ++ n->event.id.target_category, n->event.id.instance, rc); ++ ++ if (rc == 0) { ++ status = ssam_ssh_event_disable(ctrl, n->event.reg, n->event.id, ++ n->event.flags); ++ } ++ ++ __ssam_nfblk_remove(nf_head, &n->base); ++ mutex_unlock(&nf->lock); ++ synchronize_srcu(&nf_head->srcu); ++ ++ return status; ++} ++EXPORT_SYMBOL_GPL(ssam_notifier_unregister); ++ ++static bool ssam_notifier_empty(struct ssam_controller *ctrl) ++{ ++ struct ssam_nf *nf = &ctrl->cplt.event.notif; ++ bool result; ++ ++ mutex_lock(&nf->lock); ++ result = ssam_nf_refcount_empty(nf); ++ mutex_unlock(&nf->lock); ++ ++ return result; ++} ++ ++static void ssam_notifier_unregister_all(struct ssam_controller *ctrl) ++{ ++ struct ssam_nf *nf = &ctrl->cplt.event.notif; ++ struct ssam_nf_refcount_entry *pos, *n; ++ ++ mutex_lock(&nf->lock); ++ rbtree_postorder_for_each_entry_safe(pos, n, &nf->refcount, node) { ++ // ignore errors, will get logged in call ++ ssam_ssh_event_disable(ctrl, pos->key.reg, pos->key.id, 0); ++ kfree(pos); ++ } ++ nf->refcount = RB_ROOT; ++ mutex_unlock(&nf->lock); ++} ++ ++ ++/* -- Wakeup IRQ. ----------------------------------------------------------- */ ++ ++static const struct acpi_gpio_params gpio_ssam_wakeup_int = { 0, 0, false }; ++static const struct acpi_gpio_params gpio_ssam_wakeup = { 1, 0, false }; ++ ++static const struct acpi_gpio_mapping ssam_acpi_gpios[] = { ++ { "ssam_wakeup-int-gpio", &gpio_ssam_wakeup_int, 1 }, ++ { "ssam_wakeup-gpio", &gpio_ssam_wakeup, 1 }, ++ { }, ++}; ++ ++static irqreturn_t ssam_irq_handle(int irq, void *dev_id) ++{ ++ struct ssam_controller *ctrl = dev_id; ++ ++ ssam_dbg(ctrl, "pm: wake irq triggered\n"); ++ ++ // Note: Proper wakeup detection is currently unimplemented. ++ // When the EC is in display-off or any other non-D0 state, it ++ // does not send events/notifications to the host. Instead it ++ // signals that there are events available via the wakeup IRQ. ++ // This driver is responsible for calling back to the EC to ++ // release these events one-by-one. ++ // ++ // This IRQ should not cause a full system resume by its own. ++ // Instead, events should be handled by their respective subsystem ++ // drivers, which in turn should signal whether a full system ++ // resume should be performed. ++ // ++ // TODO: Send GPIO callback command repeatedly to EC until callback ++ // returns 0x00. Return flag of callback is "has more events". ++ // Each time the command is sent, one event is "released". Once ++ // all events have been released (return = 0x00), the GPIO is ++ // re-armed. Detect wakeup events during this process, go back to ++ // sleep if no wakeup event has been received. ++ ++ return IRQ_HANDLED; ++} ++ ++static int ssam_irq_setup(struct ssam_controller *ctrl) ++{ ++ struct device *dev = ssam_controller_device(ctrl); ++ struct gpio_desc *gpiod; ++ int irq; ++ int status; ++ ++ /* ++ * The actual GPIO interrupt is declared in ACPI as TRIGGER_HIGH. ++ * However, the GPIO line only gets reset by sending the GPIO callback ++ * command to SAM (or alternatively the display-on notification). As ++ * proper handling for this interrupt is not implemented yet, leaving ++ * the IRQ at TRIGGER_HIGH would cause an IRQ storm (as the callback ++ * never gets sent and thus the line line never gets reset). To avoid ++ * this, mark the IRQ as TRIGGER_RISING for now, only creating a single ++ * interrupt, and let the SAM resume callback during the controller ++ * resume process clear it. ++ */ ++ const int irqf = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING; ++ ++ gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS); ++ if (IS_ERR(gpiod)) ++ return PTR_ERR(gpiod); ++ ++ irq = gpiod_to_irq(gpiod); ++ gpiod_put(gpiod); ++ ++ if (irq < 0) ++ return irq; ++ ++ status = request_threaded_irq(irq, NULL, ssam_irq_handle, irqf, ++ "surface_sam_wakeup", ctrl); ++ if (status) ++ return status; ++ ++ ctrl->irq.num = irq; ++ return 0; ++} ++ ++static void ssam_irq_free(struct ssam_controller *ctrl) ++{ ++ free_irq(ctrl->irq.num, ctrl); ++ ctrl->irq.num = -1; ++} ++ ++ ++/* -- Glue layer (serdev_device -> ssam_controller). ------------------------ */ ++ ++static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf, ++ size_t n) ++{ ++ struct ssam_controller *ctrl = serdev_device_get_drvdata(dev); ++ return ssam_controller_receive_buf(ctrl, buf, n); ++} ++ ++static void ssam_write_wakeup(struct serdev_device *dev) ++{ ++ struct ssam_controller *ctrl = serdev_device_get_drvdata(dev); ++ ssam_controller_write_wakeup(ctrl); ++} ++ ++static const struct serdev_device_ops ssam_serdev_ops = { ++ .receive_buf = ssam_receive_buf, ++ .write_wakeup = ssam_write_wakeup, ++}; ++ ++ ++/* -- ACPI based device setup. ---------------------------------------------- */ ++ ++static acpi_status ssam_serdev_setup_via_acpi_crs(struct acpi_resource *rsc, ++ void *ctx) ++{ ++ struct serdev_device *serdev = ctx; ++ struct acpi_resource_common_serialbus *serial; ++ struct acpi_resource_uart_serialbus *uart; ++ bool flow_control; ++ int status = 0; ++ ++ if (rsc->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) ++ return AE_OK; ++ ++ serial = &rsc->data.common_serial_bus; ++ if (serial->type != ACPI_RESOURCE_SERIAL_TYPE_UART) ++ return AE_OK; ++ ++ uart = &rsc->data.uart_serial_bus; ++ ++ // set up serdev device ++ serdev_device_set_baudrate(serdev, uart->default_baud_rate); ++ ++ // serdev currently only supports RTSCTS flow control ++ if (uart->flow_control & (~((u8) ACPI_UART_FLOW_CONTROL_HW))) { ++ dev_warn(&serdev->dev, "setup: unsupported flow control" ++ " (value: 0x%02x)\n", uart->flow_control); ++ } ++ ++ // set RTSCTS flow control ++ flow_control = uart->flow_control & ACPI_UART_FLOW_CONTROL_HW; ++ serdev_device_set_flow_control(serdev, flow_control); ++ ++ // serdev currently only supports EVEN/ODD parity ++ switch (uart->parity) { ++ case ACPI_UART_PARITY_NONE: ++ status = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE); ++ break; ++ case ACPI_UART_PARITY_EVEN: ++ status = serdev_device_set_parity(serdev, SERDEV_PARITY_EVEN); ++ break; ++ case ACPI_UART_PARITY_ODD: ++ status = serdev_device_set_parity(serdev, SERDEV_PARITY_ODD); ++ break; ++ default: ++ dev_warn(&serdev->dev, "setup: unsupported parity" ++ " (value: 0x%02x)\n", uart->parity); ++ break; ++ } ++ ++ if (status) { ++ dev_err(&serdev->dev, "setup: failed to set parity" ++ " (value: 0x%02x)\n", uart->parity); ++ return status; ++ } ++ ++ return AE_CTRL_TERMINATE; // we've found the resource and are done ++} ++ ++static acpi_status ssam_serdev_setup_via_acpi(acpi_handle handle, ++ struct serdev_device *serdev) ++{ ++ return acpi_walk_resources(handle, METHOD_NAME__CRS, ++ ssam_serdev_setup_via_acpi_crs, serdev); ++} ++ ++ ++/* -- Power management. ----------------------------------------------------- */ ++ ++static void surface_sam_ssh_shutdown(struct device *dev) ++{ ++ struct ssam_controller *c = dev_get_drvdata(dev); ++ int status; ++ ++ /* ++ * Try to signal display-off and D0-exit, ignore any errors. ++ * ++ * Note: It has not been established yet if this is actually ++ * necessary/useful for shutdown. ++ */ ++ ++ status = ssam_ctrl_notif_display_off(c); ++ if (status) ++ ssam_err(c, "pm: display-off notification failed: %d\n", status); ++ ++ status = ssam_ctrl_notif_d0_exit(c); ++ if (status) ++ ssam_err(c, "pm: D0-exit notification failed: %d\n", status); ++} ++ ++static int surface_sam_ssh_suspend(struct device *dev) ++{ ++ struct ssam_controller *c = dev_get_drvdata(dev); ++ int status; ++ ++ /* ++ * Try to signal display-off and D0-exit, enable IRQ wakeup if ++ * specified. Abort on error. ++ * ++ * Note: Signalling display-off/display-on should normally be done from ++ * some sort of display state notifier. As that is not available, signal ++ * it here. ++ */ ++ ++ status = ssam_ctrl_notif_display_off(c); ++ if (status) { ++ ssam_err(c, "pm: display-off notification failed: %d\n", status); ++ return status; ++ } ++ ++ status = ssam_ctrl_notif_d0_exit(c); ++ if (status) { ++ ssam_err(c, "pm: D0-exit notification failed: %d\n", status); ++ goto err_notif; ++ } ++ ++ if (device_may_wakeup(dev)) { ++ status = enable_irq_wake(c->irq.num); ++ if (status) { ++ ssam_err(c, "failed to disable wake IRQ: %d\n", status); ++ goto err_irq; ++ } ++ ++ c->irq.wakeup_enabled = true; ++ } else { ++ c->irq.wakeup_enabled = false; ++ } ++ ++ WARN_ON(ssam_controller_suspend(c)); ++ return 0; ++ ++err_irq: ++ ssam_ctrl_notif_d0_entry(c); ++err_notif: ++ ssam_ctrl_notif_display_on(c); ++ return status; ++} ++ ++static int surface_sam_ssh_resume(struct device *dev) ++{ ++ struct ssam_controller *c = dev_get_drvdata(dev); ++ int status; ++ ++ WARN_ON(ssam_controller_resume(c)); ++ ++ /* ++ * Try to disable IRQ wakeup (if specified), signal display-on and ++ * D0-entry. In case of errors, log them and try to restore normal ++ * operation state as far as possible. ++ * ++ * Note: Signalling display-off/display-on should normally be done from ++ * some sort of display state notifier. As that is not available, signal ++ * it here. ++ */ ++ ++ if (c->irq.wakeup_enabled) { ++ status = disable_irq_wake(c->irq.num); ++ if (status) ++ ssam_err(c, "failed to disable wake IRQ: %d\n", status); ++ ++ c->irq.wakeup_enabled = false; ++ } ++ ++ status = ssam_ctrl_notif_d0_entry(c); ++ if (status) ++ ssam_err(c, "pm: display-on notification failed: %d\n", status); ++ ++ status = ssam_ctrl_notif_display_on(c); ++ if (status) ++ ssam_err(c, "pm: D0-entry notification failed: %d\n", status); ++ ++ return 0; ++} ++ ++static SIMPLE_DEV_PM_OPS(surface_sam_ssh_pm_ops, surface_sam_ssh_suspend, ++ surface_sam_ssh_resume); ++ ++ ++/* -- Device/driver setup. -------------------------------------------------- */ ++ ++static struct ssam_controller ssam_controller = { ++ .state = SSAM_CONTROLLER_UNINITIALIZED, ++}; ++static DEFINE_MUTEX(ssam_controller_lock); ++ ++static int __ssam_client_link(struct ssam_controller *c, struct device *client) ++{ ++ const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER; ++ struct device_link *link; ++ struct device *ctrldev; ++ ++ if (smp_load_acquire(&c->state) != SSAM_CONTROLLER_STARTED) ++ return -ENXIO; ++ ++ if ((ctrldev = ssam_controller_device(c)) == NULL) ++ return -ENXIO; ++ ++ if ((link = device_link_add(client, ctrldev, flags)) == NULL) ++ return -ENOMEM; ++ ++ /* ++ * Return -ENXIO if supplier driver is on its way to be removed. In this ++ * case, the controller won't be around for much longer and the device ++ * link is not going to save us any more, as unbinding is already in ++ * progress. ++ */ ++ if (link->status == DL_STATE_SUPPLIER_UNBIND) ++ return -ENXIO; ++ ++ return 0; ++} ++ ++int ssam_client_bind(struct device *client, struct ssam_controller **ctrl) ++{ ++ struct ssam_controller *c = &ssam_controller; ++ int status; ++ ++ mutex_lock(&ssam_controller_lock); ++ status = __ssam_client_link(c, client); ++ mutex_unlock(&ssam_controller_lock); ++ ++ *ctrl = status == 0 ? c : NULL; ++ return status; ++} ++EXPORT_SYMBOL_GPL(ssam_client_bind); ++ ++ ++static int surface_sam_ssh_probe(struct serdev_device *serdev) ++{ ++ struct ssam_controller *ctrl = &ssam_controller; ++ acpi_handle *ssh = ACPI_HANDLE(&serdev->dev); ++ int status; ++ ++ if (gpiod_count(&serdev->dev, NULL) < 0) ++ return -ENODEV; ++ ++ status = devm_acpi_dev_add_driver_gpios(&serdev->dev, ssam_acpi_gpios); ++ if (status) ++ return status; ++ ++ // set up EC ++ mutex_lock(&ssam_controller_lock); ++ ++ // initialize controller ++ status = ssam_controller_init(ctrl, serdev); ++ if (status) ++ goto err_ctrl_init; ++ ++ // set up serdev device ++ serdev_device_set_drvdata(serdev, ctrl); ++ serdev_device_set_client_ops(serdev, &ssam_serdev_ops); ++ status = serdev_device_open(serdev); ++ if (status) ++ goto err_devopen; ++ ++ status = ssam_serdev_setup_via_acpi(ssh, serdev); ++ if (ACPI_FAILURE(status)) ++ goto err_devinit; ++ ++ // start controller ++ status = ssam_controller_start(ctrl); ++ if (status) ++ goto err_devinit; ++ ++ // initial SAM requests: log version, notify default/init power states ++ status = ssam_log_firmware_version(ctrl); ++ if (status) ++ goto err_initrq; ++ ++ status = ssam_ctrl_notif_d0_entry(ctrl); ++ if (status) ++ goto err_initrq; ++ ++ status = ssam_ctrl_notif_display_on(ctrl); ++ if (status) ++ goto err_initrq; ++ ++ // setup IRQ ++ status = ssam_irq_setup(ctrl); ++ if (status) ++ goto err_initrq; ++ ++ mutex_unlock(&ssam_controller_lock); ++ ++ /* ++ * TODO: The EC can wake up the system via the associated GPIO interrupt ++ * in multiple situations. One of which is the remaining battery ++ * capacity falling below a certain threshold. Normally, we should ++ * use the device_init_wakeup function, however, the EC also seems ++ * to have other reasons for waking up the system and it seems ++ * that Windows has additional checks whether the system should be ++ * resumed. In short, this causes some spurious unwanted wake-ups. ++ * For now let's thus default power/wakeup to false. ++ */ ++ device_set_wakeup_capable(&serdev->dev, true); ++ acpi_walk_dep_device_list(ssh); ++ ++ return 0; ++ ++err_initrq: ++ ssam_controller_shutdown(ctrl); ++err_devinit: ++ serdev_device_close(serdev); ++err_devopen: ++ ssam_controller_destroy(ctrl); ++err_ctrl_init: ++ serdev_device_set_drvdata(serdev, NULL); ++ mutex_unlock(&ssam_controller_lock); ++ return status; ++} ++ ++static void surface_sam_ssh_remove(struct serdev_device *serdev) ++{ ++ struct ssam_controller *ctrl = serdev_device_get_drvdata(serdev); ++ int status; ++ ++ mutex_lock(&ssam_controller_lock); ++ ssam_irq_free(ctrl); ++ ++ // suspend EC and disable events ++ status = ssam_ctrl_notif_display_off(ctrl); ++ if (status) { ++ dev_err(&serdev->dev, "display-off notification failed: %d\n", ++ status); ++ } ++ ++ status = ssam_ctrl_notif_d0_exit(ctrl); ++ if (status) { ++ dev_err(&serdev->dev, "D0-exit notification failed: %d\n", ++ status); ++ } ++ ++ ssam_controller_shutdown(ctrl); ++ ++ // shut down actual transport ++ serdev_device_wait_until_sent(serdev, 0); ++ serdev_device_close(serdev); ++ ++ ssam_controller_destroy(ctrl); ++ ++ device_set_wakeup_capable(&serdev->dev, false); ++ serdev_device_set_drvdata(serdev, NULL); ++ mutex_unlock(&ssam_controller_lock); ++} ++ ++ ++static const struct acpi_device_id surface_sam_ssh_match[] = { ++ { "MSHW0084", 0 }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(acpi, surface_sam_ssh_match); ++ ++static struct serdev_device_driver surface_sam_ssh = { ++ .probe = surface_sam_ssh_probe, ++ .remove = surface_sam_ssh_remove, ++ .driver = { ++ .name = "surface_sam_ssh", ++ .acpi_match_table = surface_sam_ssh_match, ++ .pm = &surface_sam_ssh_pm_ops, ++ .shutdown = surface_sam_ssh_shutdown, ++ .probe_type = PROBE_PREFER_ASYNCHRONOUS, ++ }, ++}; ++ ++ ++/* -- Module setup. --------------------------------------------------------- */ ++ ++static int __init surface_sam_ssh_init(void) ++{ ++ int status; ++ ++ status = ssh_ctrl_packet_cache_init(); ++ if (status) ++ goto err_cpkg; ++ ++ status = ssam_event_item_cache_init(); ++ if (status) ++ goto err_evitem; ++ ++ status = serdev_device_driver_register(&surface_sam_ssh); ++ if (status) ++ goto err_register; ++ ++ return 0; ++ ++err_register: ++ ssam_event_item_cache_destroy(); ++err_evitem: ++ ssh_ctrl_packet_cache_destroy(); ++err_cpkg: ++ return status; ++} ++ ++static void __exit surface_sam_ssh_exit(void) ++{ ++ serdev_device_driver_unregister(&surface_sam_ssh); ++ ssam_event_item_cache_destroy(); ++ ssh_ctrl_packet_cache_destroy(); ++} ++ ++/* ++ * Ensure that the driver is loaded late due to some issues with the UART ++ * communication. Specifically, we want to ensure that DMA is ready and being ++ * used. Not using DMA can result in spurious communication failures, ++ * especially during boot, which among other things will result in wrong ++ * battery information (via ACPI _BIX) being displayed. Using a late init_call ++ * instead of the normal module_init gives the DMA subsystem time to ++ * initialize and via that results in a more stable communication, avoiding ++ * such failures. ++ */ ++late_initcall(surface_sam_ssh_init); ++module_exit(surface_sam_ssh_exit); ++ ++MODULE_AUTHOR("Maximilian Luz "); ++MODULE_DESCRIPTION("Surface Serial Hub Driver for 5th Generation Surface Devices"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/platform/x86/surface_sam/surface_sam_ssh.h b/drivers/platform/x86/surface_sam/surface_sam_ssh.h +new file mode 100644 +index 0000000000000..ba57adb2a3c9d +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_ssh.h +@@ -0,0 +1,717 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Interface for Surface Serial Hub (SSH). ++ * ++ * The SSH is the main communication hub for communication between host and ++ * the Surface/System Aggregator Module (SAM) on newer Microsoft Surface ++ * devices (Book 2, Pro 5, Laptops, ...). Also referred to as SAM-over-SSH. ++ * Older devices (Book 1, Pro 4) use SAM-over-HID (via I2C). ++ */ ++ ++#ifndef _SURFACE_SAM_SSH_H ++#define _SURFACE_SAM_SSH_H ++ ++#include ++#include ++ ++ ++/* -- Data structures for SAM-over-SSH communication. ----------------------- */ ++ ++/** ++ * enum ssh_frame_type - Frame types for SSH frames. ++ * @SSH_FRAME_TYPE_DATA_SEQ: Indicates a data frame, followed by a payload with ++ * the length specified in the ssh_frame.len field. This ++ * frame is sequenced, meaning that an ACK is required. ++ * @SSH_FRAME_TYPE_DATA_NSQ: Same as SSH_FRAME_TYPE_DATA_SEQ, but unsequenced, ++ * meaning that the message does not have to be ACKed. ++ * @SSH_FRAME_TYPE_ACK: Indicates an ACK message. ++ * @SSH_FRAME_TYPE_NAK: Indicates an error response for previously sent ++ * frame. In general, this means that the frame and/or ++ * payload is malformed, e.g. a CRC is wrong. For command- ++ * type payloads, this can also mean that the command is ++ * invalid. ++ */ ++enum ssh_frame_type { ++ SSH_FRAME_TYPE_DATA_SEQ = 0x80, ++ SSH_FRAME_TYPE_DATA_NSQ = 0x00, ++ SSH_FRAME_TYPE_ACK = 0x40, ++ SSH_FRAME_TYPE_NAK = 0x04, ++}; ++ ++/** ++ * struct ssh_frame - SSH communication frame. ++ * @type: The type of the frame. See &enum ssh_frame_type. ++ * @len: The length of the frame payload directly following the CRC for this ++ * frame. Does not include the final CRC for that payload. ++ * @seq: The sequence number for this message/exchange. ++ */ ++struct ssh_frame { ++ u8 type; ++ __le16 len; ++ u8 seq; ++} __packed; ++ ++static_assert(sizeof(struct ssh_frame) == 4); ++ ++/* ++ * Maximum SSH frame payload length in bytes. This is the physical maximum ++ * length of the protocol. Implementations may set a more constrained limit. ++ */ ++#define SSH_FRAME_MAX_PAYLOAD_SIZE U16_MAX ++ ++/** ++ * enum ssh_payload_type - Type indicator for the SSH payload. ++ * @SSH_PLD_TYPE_CMD: The payload is a command structure with optional command ++ * payload. ++ */ ++enum ssh_payload_type { ++ SSH_PLD_TYPE_CMD = 0x80, ++}; ++ ++/** ++ * struct ssh_command - Payload of a command-type frame. ++ * @type: The type of the payload. See &enum ssh_payload_type. Should be ++ * SSH_PLD_TYPE_CMD for this struct. ++ * @tc: Command target category. ++ * @chn_out: Output channel. Should be zero if this an incoming (EC to host) ++ * message. ++ * @chn_in: Input channel. Should be zero if this is an outgoing (hos to EC) ++ * message. ++ * @iid: Instance ID. ++ * @rqid: Request ID. Used to match requests with responses and differentiate ++ * between responses and events. ++ * @cid: Command ID. ++ */ ++struct ssh_command { ++ u8 type; ++ u8 tc; ++ u8 chn_out; ++ u8 chn_in; ++ u8 iid; ++ __le16 rqid; ++ u8 cid; ++} __packed; ++ ++static_assert(sizeof(struct ssh_command) == 8); ++ ++/* ++ * Maximum SSH command payload length in bytes. This is the physical maximum ++ * length of the protocol. Implementations may set a more constrained limit. ++ */ ++#define SSH_COMMAND_MAX_PAYLOAD_SIZE \ ++ (SSH_FRAME_MAX_PAYLOAD_SIZE - sizeof(struct ssh_command)) ++ ++/** ++ * struct ssh_notification_params - Command payload to enable/disable SSH ++ * notifications. ++ * @target_category: The target category for which notifications should be ++ * enabled/disabled. ++ * @flags: Flags determining how notifications are being sent. ++ * @request_id: The request ID that is used to send these notifications. ++ * @instance_id: The specific instance in the given target category for ++ * which notifications should be enabled. ++ */ ++struct ssh_notification_params { ++ u8 target_category; ++ u8 flags; ++ __le16 request_id; ++ u8 instance_id; ++} __packed; ++ ++static_assert(sizeof(struct ssh_notification_params) == 5); ++ ++/** ++ * SSH message syncrhonization (SYN) bytes. ++ */ ++#define SSH_MSG_SYN ((u16)0x55aa) ++ ++/** ++ * Base-length of a SSH message. This is the minimum number of bytes required ++ * to form a message. The actual message length is SSH_MSG_LEN_BASE plus the ++ * length of the frame payload. ++ */ ++#define SSH_MSG_LEN_BASE (sizeof(struct ssh_frame) + 3ull * sizeof(u16)) ++ ++/** ++ * Length of a SSH control message. ++ */ ++#define SSH_MSG_LEN_CTRL SSH_MSG_LEN_BASE ++ ++/** ++ * Length of a SSH message with payload of specified size. ++ */ ++#define SSH_MESSAGE_LENGTH(payload_size) (SSH_MSG_LEN_BASE + payload_size) ++ ++/** ++ * Length of a SSH command message with command payload of specified size. ++ */ ++#define SSH_COMMAND_MESSAGE_LENGTH(payload_size) \ ++ SSH_MESSAGE_LENGTH(sizeof(struct ssh_command) + payload_size) ++ ++/** ++ * Offset of the specified struct ssh_frame field in the raw SSH message data. ++ */ ++#define SSH_MSGOFFSET_FRAME(field) \ ++ (sizeof(u16) + offsetof(struct ssh_frame, field)) ++ ++/** ++ * Offset of the specified struct ssh_command field in the raw SSH message data. ++ */ ++#define SSH_MSGOFFSET_COMMAND(field) \ ++ (2ull * sizeof(u16) + sizeof(struct ssh_frame) \ ++ + offsetof(struct ssh_command, field)) ++ ++/** ++ * struct ssam_span - reference to a buffer region ++ * @ptr: pointer to the buffer region ++ * @len: length of the buffer region ++ * ++ * A reference to a (non-owned) buffer segment, consisting of pointer and ++ * length. Use of this struct indicates non-owned data, i.e. data of which the ++ * life-time is managed (i.e. it is allocated/freed) via another pointer. ++ */ ++struct ssam_span { ++ u8 *ptr; ++ size_t len; ++}; ++ ++ ++/* -- Packet transport layer (ptl). ----------------------------------------- */ ++ ++enum ssh_packet_priority { ++ SSH_PACKET_PRIORITY_FLUSH = 0, ++ SSH_PACKET_PRIORITY_DATA = 0, ++ SSH_PACKET_PRIORITY_NAK = 1 << 4, ++ SSH_PACKET_PRIORITY_ACK = 2 << 4, ++}; ++ ++#define SSH_PACKET_PRIORITY(base, try) \ ++ ((SSH_PACKET_PRIORITY_##base) | ((try) & 0x0f)) ++ ++#define ssh_packet_priority_get_try(p) ((p) & 0x0f) ++ ++ ++enum ssh_packet_flags { ++ SSH_PACKET_SF_LOCKED_BIT, ++ SSH_PACKET_SF_QUEUED_BIT, ++ SSH_PACKET_SF_PENDING_BIT, ++ SSH_PACKET_SF_TRANSMITTING_BIT, ++ SSH_PACKET_SF_TRANSMITTED_BIT, ++ SSH_PACKET_SF_ACKED_BIT, ++ SSH_PACKET_SF_CANCELED_BIT, ++ SSH_PACKET_SF_COMPLETED_BIT, ++ ++ SSH_PACKET_TY_FLUSH_BIT, ++ SSH_PACKET_TY_SEQUENCED_BIT, ++ SSH_PACKET_TY_BLOCKING_BIT, ++ ++ SSH_PACKET_FLAGS_SF_MASK = ++ BIT(SSH_PACKET_SF_LOCKED_BIT) ++ | BIT(SSH_PACKET_SF_QUEUED_BIT) ++ | BIT(SSH_PACKET_SF_PENDING_BIT) ++ | BIT(SSH_PACKET_SF_TRANSMITTING_BIT) ++ | BIT(SSH_PACKET_SF_TRANSMITTED_BIT) ++ | BIT(SSH_PACKET_SF_ACKED_BIT) ++ | BIT(SSH_PACKET_SF_CANCELED_BIT) ++ | BIT(SSH_PACKET_SF_COMPLETED_BIT), ++ ++ SSH_PACKET_FLAGS_TY_MASK = ++ BIT(SSH_PACKET_TY_FLUSH_BIT) ++ | BIT(SSH_PACKET_TY_SEQUENCED_BIT) ++ | BIT(SSH_PACKET_TY_BLOCKING_BIT), ++}; ++ ++ ++struct ssh_ptl; ++struct ssh_packet; ++ ++struct ssh_packet_ops { ++ void (*release)(struct ssh_packet *p); ++ void (*complete)(struct ssh_packet *p, int status); ++}; ++ ++struct ssh_packet { ++ struct ssh_ptl *ptl; ++ struct kref refcnt; ++ ++ u8 priority; ++ ++ struct { ++ size_t len; ++ u8 *ptr; ++ } data; ++ ++ unsigned long state; ++ ktime_t timestamp; ++ ++ struct list_head queue_node; ++ struct list_head pending_node; ++ ++ const struct ssh_packet_ops *ops; ++}; ++ ++ ++void ssh_packet_get(struct ssh_packet *p); ++void ssh_packet_put(struct ssh_packet *p); ++ ++static inline void ssh_packet_set_data(struct ssh_packet *p, u8 *ptr, size_t len) ++{ ++ p->data.ptr = ptr; ++ p->data.len = len; ++} ++ ++ ++/* -- Request transport layer (rtl). ---------------------------------------- */ ++ ++enum ssh_request_flags { ++ SSH_REQUEST_SF_LOCKED_BIT, ++ SSH_REQUEST_SF_QUEUED_BIT, ++ SSH_REQUEST_SF_PENDING_BIT, ++ SSH_REQUEST_SF_TRANSMITTING_BIT, ++ SSH_REQUEST_SF_TRANSMITTED_BIT, ++ SSH_REQUEST_SF_RSPRCVD_BIT, ++ SSH_REQUEST_SF_CANCELED_BIT, ++ SSH_REQUEST_SF_COMPLETED_BIT, ++ ++ SSH_REQUEST_TY_FLUSH_BIT, ++ SSH_REQUEST_TY_HAS_RESPONSE_BIT, ++ ++ SSH_REQUEST_FLAGS_SF_MASK = ++ BIT(SSH_REQUEST_SF_LOCKED_BIT) ++ | BIT(SSH_REQUEST_SF_QUEUED_BIT) ++ | BIT(SSH_REQUEST_SF_PENDING_BIT) ++ | BIT(SSH_REQUEST_SF_TRANSMITTING_BIT) ++ | BIT(SSH_REQUEST_SF_TRANSMITTED_BIT) ++ | BIT(SSH_REQUEST_SF_RSPRCVD_BIT) ++ | BIT(SSH_REQUEST_SF_CANCELED_BIT) ++ | BIT(SSH_REQUEST_SF_COMPLETED_BIT), ++ ++ SSH_REQUEST_FLAGS_TY_MASK = ++ BIT(SSH_REQUEST_TY_FLUSH_BIT) ++ | BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT), ++}; ++ ++ ++struct ssh_rtl; ++struct ssh_request; ++ ++struct ssh_request_ops { ++ void (*release)(struct ssh_request *rqst); ++ void (*complete)(struct ssh_request *rqst, ++ const struct ssh_command *cmd, ++ const struct ssam_span *data, int status); ++}; ++ ++struct ssh_request { ++ struct ssh_packet packet; ++ struct list_head node; ++ ++ unsigned long state; ++ ktime_t timestamp; ++ ++ const struct ssh_request_ops *ops; ++}; ++ ++ ++static inline void ssh_request_get(struct ssh_request *r) ++{ ++ ssh_packet_get(&r->packet); ++} ++ ++static inline void ssh_request_put(struct ssh_request *r) ++{ ++ ssh_packet_put(&r->packet); ++} ++ ++static inline void ssh_request_set_data(struct ssh_request *r, u8 *ptr, size_t len) ++{ ++ ssh_packet_set_data(&r->packet, ptr, len); ++} ++ ++ ++/* -- Main data types and definitions --------------------------------------- */ ++ ++enum ssam_ssh_tc { ++ SSAM_SSH_TC_SAM = 0x01, // generic system functionality, real-time clock ++ SSAM_SSH_TC_BAT = 0x02, // battery/power subsystem ++ SSAM_SSH_TC_TMP = 0x03, // thermal subsystem ++ SSAM_SSH_TC_PMC = 0x04, ++ SSAM_SSH_TC_FAN = 0x05, ++ SSAM_SSH_TC_PoM = 0x06, ++ SSAM_SSH_TC_DBG = 0x07, ++ SSAM_SSH_TC_KBD = 0x08, // legacy keyboard (Laptop 1/2) ++ SSAM_SSH_TC_FWU = 0x09, ++ SSAM_SSH_TC_UNI = 0x0a, ++ SSAM_SSH_TC_LPC = 0x0b, ++ SSAM_SSH_TC_TCL = 0x0c, ++ SSAM_SSH_TC_SFL = 0x0d, ++ SSAM_SSH_TC_KIP = 0x0e, ++ SSAM_SSH_TC_EXT = 0x0f, ++ SSAM_SSH_TC_BLD = 0x10, ++ SSAM_SSH_TC_BAS = 0x11, // detachment system (Surface Book 2/3) ++ SSAM_SSH_TC_SEN = 0x12, ++ SSAM_SSH_TC_SRQ = 0x13, ++ SSAM_SSH_TC_MCU = 0x14, ++ SSAM_SSH_TC_HID = 0x15, // generic HID input subsystem ++ SSAM_SSH_TC_TCH = 0x16, ++ SSAM_SSH_TC_BKL = 0x17, ++ SSAM_SSH_TC_TAM = 0x18, ++ SSAM_SSH_TC_ACC = 0x19, ++ SSAM_SSH_TC_UFI = 0x1a, ++ SSAM_SSH_TC_USC = 0x1b, ++ SSAM_SSH_TC_PEN = 0x1c, ++ SSAM_SSH_TC_VID = 0x1d, ++ SSAM_SSH_TC_AUD = 0x1e, ++ SSAM_SSH_TC_SMC = 0x1f, ++ SSAM_SSH_TC_KPD = 0x20, ++ SSAM_SSH_TC_REG = 0x21, ++}; ++ ++struct ssam_controller; ++ ++/** ++ * struct ssam_event_flags - Flags for enabling/disabling SAM-over-SSH events ++ * @SSAM_EVENT_SEQUENCED: The event will be sent via a sequenced data frame. ++ */ ++enum ssam_event_flags { ++ SSAM_EVENT_SEQUENCED = BIT(0), ++}; ++ ++struct ssam_event { ++ u8 target_category; ++ u8 command_id; ++ u8 instance_id; ++ u8 channel; ++ u16 length; ++ u8 data[0]; ++}; ++ ++enum ssam_request_flags { ++ SSAM_REQUEST_HAS_RESPONSE = BIT(0), ++ SSAM_REQUEST_UNSEQUENCED = BIT(1), ++}; ++ ++struct ssam_request { ++ u8 target_category; ++ u8 command_id; ++ u8 instance_id; ++ u8 channel; ++ u16 flags; ++ u16 length; ++ const u8 *payload; ++}; ++ ++struct ssam_response { ++ size_t capacity; ++ size_t length; ++ u8 *pointer; ++}; ++ ++ ++int ssam_client_bind(struct device *client, struct ssam_controller **ctrl); ++ ++struct device *ssam_controller_device(struct ssam_controller *c); ++ ++ssize_t ssam_request_write_data(struct ssam_span *buf, ++ struct ssam_controller *ctrl, ++ struct ssam_request *spec); ++ ++ ++/* -- Synchronous request interface. ---------------------------------------- */ ++ ++struct ssam_request_sync { ++ struct ssh_request base; ++ struct completion comp; ++ struct ssam_response *resp; ++ int status; ++}; ++ ++int ssam_request_sync_alloc(size_t payload_len, gfp_t flags, ++ struct ssam_request_sync **rqst, ++ struct ssam_span *buffer); ++ ++void ssam_request_sync_init(struct ssam_request_sync *rqst, ++ enum ssam_request_flags flags); ++ ++static inline void ssam_request_sync_set_data(struct ssam_request_sync *rqst, ++ u8 *ptr, size_t len) ++{ ++ ssh_request_set_data(&rqst->base, ptr, len); ++} ++ ++static inline void ssam_request_sync_set_resp(struct ssam_request_sync *rqst, ++ struct ssam_response *resp) ++{ ++ rqst->resp = resp; ++} ++ ++int ssam_request_sync_submit(struct ssam_controller *ctrl, ++ struct ssam_request_sync *rqst); ++ ++static inline int ssam_request_sync_wait(struct ssam_request_sync *rqst) ++{ ++ wait_for_completion(&rqst->comp); ++ return rqst->status; ++} ++ ++int ssam_request_sync(struct ssam_controller *ctrl, struct ssam_request *spec, ++ struct ssam_response *rsp); ++ ++int ssam_request_sync_with_buffer(struct ssam_controller *ctrl, ++ struct ssam_request *spec, ++ struct ssam_response *rsp, ++ struct ssam_span *buf); ++ ++ ++#define ssam_request_sync_onstack(ctrl, rqst, rsp, payload_len) \ ++ ({ \ ++ u8 __data[SSH_COMMAND_MESSAGE_LENGTH(payload_len)]; \ ++ struct ssam_span __buf = { &__data[0], ARRAY_SIZE(__data) }; \ ++ int __status; \ ++ \ ++ /* ensure input does not overflow buffer */ \ ++ if ((rqst)->length <= payload_len) { \ ++ __status = ssam_request_sync_with_buffer( \ ++ ctrl, rqst, rsp, &__buf); \ ++ } else { \ ++ __status = -EINVAL; \ ++ } \ ++ \ ++ __status; \ ++ }) ++ ++ ++struct ssam_request_spec { ++ u8 target_category; ++ u8 command_id; ++ u8 instance_id; ++ u8 channel; ++ u8 flags; ++}; ++ ++struct ssam_request_spec_md { ++ u8 target_category; ++ u8 command_id; ++ u8 flags; ++}; ++ ++#define SSAM_DEFINE_SYNC_REQUEST_N(name, spec...) \ ++ int name(struct ssam_controller *ctrl) \ ++ { \ ++ struct ssam_request_spec s = (struct ssam_request_spec)spec; \ ++ struct ssam_request rqst; \ ++ \ ++ rqst.target_category = s.target_category; \ ++ rqst.command_id = s.command_id; \ ++ rqst.instance_id = s.instance_id; \ ++ rqst.channel = s.channel; \ ++ rqst.flags = s.flags; \ ++ rqst.length = 0; \ ++ rqst.payload = NULL; \ ++ \ ++ return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0); \ ++ } ++ ++#define SSAM_DEFINE_SYNC_REQUEST_W(name, wtype, spec...) \ ++ int name(struct ssam_controller *ctrl, const wtype *in) \ ++ { \ ++ struct ssam_request_spec s = (struct ssam_request_spec)spec; \ ++ struct ssam_request rqst; \ ++ \ ++ rqst.target_category = s.target_category; \ ++ rqst.command_id = s.command_id; \ ++ rqst.instance_id = s.instance_id; \ ++ rqst.channel = s.channel; \ ++ rqst.flags = s.flags; \ ++ rqst.length = sizeof(wtype); \ ++ rqst.payload = (u8 *)in; \ ++ \ ++ return ssam_request_sync_onstack(ctrl, &rqst, NULL, \ ++ sizeof(wtype)); \ ++ } ++ ++#define SSAM_DEFINE_SYNC_REQUEST_R(name, rtype, spec...) \ ++ int name(struct ssam_controller *ctrl, rtype *out) \ ++ { \ ++ struct ssam_request_spec s = (struct ssam_request_spec)spec; \ ++ struct ssam_request rqst; \ ++ struct ssam_response rsp; \ ++ int status; \ ++ \ ++ rqst.target_category = s.target_category; \ ++ rqst.command_id = s.command_id; \ ++ rqst.instance_id = s.instance_id; \ ++ rqst.channel = s.channel; \ ++ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \ ++ rqst.length = 0; \ ++ rqst.payload = NULL; \ ++ \ ++ rsp.capacity = sizeof(rtype); \ ++ rsp.length = 0; \ ++ rsp.pointer = (u8 *)out; \ ++ \ ++ status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \ ++ if (status) \ ++ return status; \ ++ \ ++ if (rsp.length != sizeof(rtype)) { \ ++ struct device *dev = ssam_controller_device(ctrl); \ ++ dev_err(dev, "rqst: invalid response length, expected %zu, got %zu" \ ++ " (tc: 0x%02x, cid: 0x%02x)", sizeof(rtype), \ ++ rsp.length, rqst.target_category, \ ++ rqst.command_id); \ ++ return -EIO; \ ++ } \ ++ \ ++ return 0; \ ++ } ++ ++#define SSAM_DEFINE_SYNC_REQUEST_MD_W(name, wtype, spec...) \ ++ int name(struct ssam_controller *ctrl, u8 chn, u8 iid, const wtype *in) \ ++ { \ ++ struct ssam_request_spec_md s \ ++ = (struct ssam_request_spec_md)spec; \ ++ struct ssam_request rqst; \ ++ \ ++ rqst.target_category = s.target_category; \ ++ rqst.command_id = s.command_id; \ ++ rqst.instance_id = iid; \ ++ rqst.channel = chn; \ ++ rqst.flags = s.flags; \ ++ rqst.length = sizeof(wtype); \ ++ rqst.payload = (u8 *)in; \ ++ \ ++ return ssam_request_sync_onstack(ctrl, &rqst, NULL, \ ++ sizeof(wtype)); \ ++ } ++ ++#define SSAM_DEFINE_SYNC_REQUEST_MD_R(name, rtype, spec...) \ ++ int name(struct ssam_controller *ctrl, u8 chn, u8 iid, rtype *out) \ ++ { \ ++ struct ssam_request_spec_md s \ ++ = (struct ssam_request_spec_md)spec; \ ++ struct ssam_request rqst; \ ++ struct ssam_response rsp; \ ++ int status; \ ++ \ ++ rqst.target_category = s.target_category; \ ++ rqst.command_id = s.command_id; \ ++ rqst.instance_id = iid; \ ++ rqst.channel = chn; \ ++ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \ ++ rqst.length = 0; \ ++ rqst.payload = NULL; \ ++ \ ++ rsp.capacity = sizeof(rtype); \ ++ rsp.length = 0; \ ++ rsp.pointer = (u8 *)out; \ ++ \ ++ status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \ ++ if (status) \ ++ return status; \ ++ \ ++ if (rsp.length != sizeof(rtype)) { \ ++ struct device *dev = ssam_controller_device(ctrl); \ ++ dev_err(dev, "rqst: invalid response length, expected %zu, got %zu" \ ++ " (tc: 0x%02x, cid: 0x%02x)", sizeof(rtype), \ ++ rsp.length, rqst.target_category, \ ++ rqst.command_id); \ ++ return -EIO; \ ++ } \ ++ \ ++ return 0; \ ++ } ++ ++ ++/* -- Event notifier/callbacks. --------------------------------------------- */ ++ ++#define SSAM_NOTIF_STATE_SHIFT 2 ++#define SSAM_NOTIF_STATE_MASK ((1 << SSAM_NOTIF_STATE_SHIFT) - 1) ++ ++#define SSAM_NOTIF_HANDLED BIT(0) ++#define SSAM_NOTIF_STOP BIT(1) ++ ++ ++struct ssam_notifier_block; ++ ++typedef u32 (*ssam_notifier_fn_t)(struct ssam_notifier_block *nb, ++ const struct ssam_event *event); ++ ++struct ssam_notifier_block { ++ struct ssam_notifier_block __rcu *next; ++ ssam_notifier_fn_t fn; ++ int priority; ++}; ++ ++ ++static inline u32 ssam_notifier_from_errno(int err) ++{ ++ if (WARN_ON(err > 0) || err == 0) ++ return 0; ++ else ++ return ((-err) << SSAM_NOTIF_STATE_SHIFT) | SSAM_NOTIF_STOP; ++} ++ ++static inline int ssam_notifier_to_errno(u32 ret) ++{ ++ return -(ret >> SSAM_NOTIF_STATE_SHIFT); ++} ++ ++ ++/* -- Event/notification registry. ------------------------------------------ */ ++ ++struct ssam_event_registry { ++ u8 target_category; ++ u8 channel; ++ u8 cid_enable; ++ u8 cid_disable; ++}; ++ ++struct ssam_event_id { ++ u8 target_category; ++ u8 instance; ++}; ++ ++ ++#define SSAM_EVENT_REGISTRY(tc, chn, cid_en, cid_dis) \ ++ ((struct ssam_event_registry) { \ ++ .target_category = (tc), \ ++ .channel = (chn), \ ++ .cid_enable = (cid_en), \ ++ .cid_disable = (cid_dis), \ ++ }) ++ ++#define SSAM_EVENT_ID(tc, iid) \ ++ ((struct ssam_event_id) { \ ++ .target_category = tc, \ ++ .instance = iid, \ ++ }) ++ ++ ++#define SSAM_EVENT_REGISTRY_SAM \ ++ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_SAM, 0x01, 0x0b, 0x0c) ++ ++#define SSAM_EVENT_REGISTRY_KIP \ ++ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, 0x02, 0x27, 0x28) ++ ++#define SSAM_EVENT_REGISTRY_REG \ ++ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02) ++ ++ ++struct ssam_event_notifier { ++ struct ssam_notifier_block base; ++ ++ struct { ++ struct ssam_event_registry reg; ++ struct ssam_event_id id; ++ u8 flags; ++ } event; ++}; ++ ++int ssam_notifier_register(struct ssam_controller *ctrl, ++ struct ssam_event_notifier *n); ++ ++int ssam_notifier_unregister(struct ssam_controller *ctrl, ++ struct ssam_event_notifier *n); ++ ++#endif /* _SURFACE_SAM_SSH_H */ +diff --git a/drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h b/drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h +new file mode 100644 +index 0000000000000..8ea9a2fc99d7e +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h +@@ -0,0 +1,587 @@ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM surface_sam_ssh ++ ++#if !defined(_SURFACE_SAM_SSH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _SURFACE_SAM_SSH_TRACE_H ++ ++#include ++ ++#include "surface_sam_ssh.h" ++ ++ ++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_SEQ); ++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_NSQ); ++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_ACK); ++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_NAK); ++ ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_LOCKED_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_QUEUED_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_PENDING_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTING_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTED_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_ACKED_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_CANCELED_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_COMPLETED_BIT); ++ ++TRACE_DEFINE_ENUM(SSH_PACKET_TY_FLUSH_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_TY_SEQUENCED_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_TY_BLOCKING_BIT); ++ ++TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_SF_MASK); ++TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_TY_MASK); ++ ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_LOCKED_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_QUEUED_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_PENDING_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTING_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTED_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_RSPRCVD_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_CANCELED_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_COMPLETED_BIT); ++ ++TRACE_DEFINE_ENUM(SSH_REQUEST_TY_FLUSH_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_TY_HAS_RESPONSE_BIT); ++ ++TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_SF_MASK); ++TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_TY_MASK); ++ ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SAM); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAT); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TMP); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_PMC); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_FAN); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_PoM); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_DBG); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_KBD); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_FWU); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_UNI); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_LPC); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCL); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SFL); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_KIP); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_EXT); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BLD); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAS); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SEN); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SRQ); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_MCU); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_HID); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCH); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BKL); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TAM); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_UFI); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_USC); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_PEN); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_VID); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_AUD); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SMC); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_KPD); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_REG); ++ ++ ++#define SSAM_PTR_UID_LEN 9 ++#define SSAM_U8_FIELD_NOT_APPLICABLE ((u16)-1) ++#define SSAM_SEQ_NOT_APPLICABLE ((u16)-1) ++#define SSAM_RQID_NOT_APPLICABLE ((u32)-1) ++#define SSAM_SSH_TC_NOT_APPLICABLE 0 ++ ++ ++#ifndef _SURFACE_SAM_SSH_TRACE_HELPERS ++#define _SURFACE_SAM_SSH_TRACE_HELPERS ++ ++static inline void ssam_trace_ptr_uid(const void *ptr, char* uid_str) ++{ ++ char buf[2 * sizeof(void*) + 1]; ++ ++ snprintf(buf, ARRAY_SIZE(buf), "%p", ptr); ++ memcpy(uid_str, &buf[ARRAY_SIZE(buf) - SSAM_PTR_UID_LEN], ++ SSAM_PTR_UID_LEN); ++} ++ ++static inline u16 ssam_trace_get_packet_seq(const struct ssh_packet *p) ++{ ++ if (!p->data.ptr || p->data.len < SSH_MESSAGE_LENGTH(0)) ++ return SSAM_SEQ_NOT_APPLICABLE; ++ ++ return p->data.ptr[SSH_MSGOFFSET_FRAME(seq)]; ++} ++ ++static inline u32 ssam_trace_get_request_id(const struct ssh_packet *p) ++{ ++ if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) ++ return SSAM_RQID_NOT_APPLICABLE; ++ ++ return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(rqid)]); ++} ++ ++static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p) ++{ ++ if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) ++ return SSAM_SSH_TC_NOT_APPLICABLE; ++ ++ return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(tc)]); ++} ++ ++#endif /* _SURFACE_SAM_SSH_TRACE_HELPERS */ ++ ++#define ssam_trace_get_command_field_u8(packet, field) \ ++ ((!packet || packet->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) \ ++ ? 0 : p->data.ptr[SSH_MSGOFFSET_COMMAND(field)]) ++ ++#define ssam_show_generic_u8_field(value) \ ++ __print_symbolic(value, \ ++ { SSAM_U8_FIELD_NOT_APPLICABLE, "N/A" } \ ++ ) ++ ++ ++#define ssam_show_frame_type(ty) \ ++ __print_symbolic(ty, \ ++ { SSH_FRAME_TYPE_DATA_SEQ, "DSEQ" }, \ ++ { SSH_FRAME_TYPE_DATA_NSQ, "DNSQ" }, \ ++ { SSH_FRAME_TYPE_ACK, "ACK" }, \ ++ { SSH_FRAME_TYPE_NAK, "NAK" } \ ++ ) ++ ++#define ssam_show_packet_type(type) \ ++ __print_flags(flags & SSH_PACKET_FLAGS_TY_MASK, "", \ ++ { BIT(SSH_PACKET_TY_FLUSH_BIT), "F" }, \ ++ { BIT(SSH_PACKET_TY_SEQUENCED_BIT), "S" }, \ ++ { BIT(SSH_PACKET_TY_BLOCKING_BIT), "B" } \ ++ ) ++ ++#define ssam_show_packet_state(state) \ ++ __print_flags(flags & SSH_PACKET_FLAGS_SF_MASK, "", \ ++ { BIT(SSH_PACKET_SF_LOCKED_BIT), "L" }, \ ++ { BIT(SSH_PACKET_SF_QUEUED_BIT), "Q" }, \ ++ { BIT(SSH_PACKET_SF_PENDING_BIT), "P" }, \ ++ { BIT(SSH_PACKET_SF_TRANSMITTING_BIT), "S" }, \ ++ { BIT(SSH_PACKET_SF_TRANSMITTED_BIT), "T" }, \ ++ { BIT(SSH_PACKET_SF_ACKED_BIT), "A" }, \ ++ { BIT(SSH_PACKET_SF_CANCELED_BIT), "C" }, \ ++ { BIT(SSH_PACKET_SF_COMPLETED_BIT), "F" } \ ++ ) ++ ++#define ssam_show_packet_seq(seq) \ ++ __print_symbolic(seq, \ ++ { SSAM_SEQ_NOT_APPLICABLE, "N/A" } \ ++ ) ++ ++ ++#define ssam_show_request_type(flags) \ ++ __print_flags(flags & SSH_REQUEST_FLAGS_TY_MASK, "", \ ++ { BIT(SSH_REQUEST_TY_FLUSH_BIT), "F" }, \ ++ { BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT), "R" } \ ++ ) ++ ++#define ssam_show_request_state(flags) \ ++ __print_flags(flags & SSH_REQUEST_FLAGS_SF_MASK, "", \ ++ { BIT(SSH_REQUEST_SF_LOCKED_BIT), "L" }, \ ++ { BIT(SSH_REQUEST_SF_QUEUED_BIT), "Q" }, \ ++ { BIT(SSH_REQUEST_SF_PENDING_BIT), "P" }, \ ++ { BIT(SSH_REQUEST_SF_TRANSMITTING_BIT), "S" }, \ ++ { BIT(SSH_REQUEST_SF_TRANSMITTED_BIT), "T" }, \ ++ { BIT(SSH_REQUEST_SF_RSPRCVD_BIT), "A" }, \ ++ { BIT(SSH_REQUEST_SF_CANCELED_BIT), "C" }, \ ++ { BIT(SSH_REQUEST_SF_COMPLETED_BIT), "F" } \ ++ ) ++ ++#define ssam_show_request_id(rqid) \ ++ __print_symbolic(rqid, \ ++ { SSAM_RQID_NOT_APPLICABLE, "N/A" } \ ++ ) ++ ++#define ssam_show_ssh_tc(rqid) \ ++ __print_symbolic(rqid, \ ++ { SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \ ++ { SSAM_SSH_TC_SAM, "SAM" }, \ ++ { SSAM_SSH_TC_BAT, "BAT" }, \ ++ { SSAM_SSH_TC_TMP, "TMP" }, \ ++ { SSAM_SSH_TC_PMC, "PMC" }, \ ++ { SSAM_SSH_TC_FAN, "FAN" }, \ ++ { SSAM_SSH_TC_PoM, "PoM" }, \ ++ { SSAM_SSH_TC_DBG, "DBG" }, \ ++ { SSAM_SSH_TC_KBD, "KBD" }, \ ++ { SSAM_SSH_TC_FWU, "FWU" }, \ ++ { SSAM_SSH_TC_UNI, "UNI" }, \ ++ { SSAM_SSH_TC_LPC, "LPC" }, \ ++ { SSAM_SSH_TC_TCL, "TCL" }, \ ++ { SSAM_SSH_TC_SFL, "SFL" }, \ ++ { SSAM_SSH_TC_KIP, "KIP" }, \ ++ { SSAM_SSH_TC_EXT, "EXT" }, \ ++ { SSAM_SSH_TC_BLD, "BLD" }, \ ++ { SSAM_SSH_TC_BAS, "BAS" }, \ ++ { SSAM_SSH_TC_SEN, "SEN" }, \ ++ { SSAM_SSH_TC_SRQ, "SRQ" }, \ ++ { SSAM_SSH_TC_MCU, "MCU" }, \ ++ { SSAM_SSH_TC_HID, "HID" }, \ ++ { SSAM_SSH_TC_TCH, "TCH" }, \ ++ { SSAM_SSH_TC_BKL, "BKL" }, \ ++ { SSAM_SSH_TC_TAM, "TAM" }, \ ++ { SSAM_SSH_TC_ACC, "ACC" }, \ ++ { SSAM_SSH_TC_UFI, "UFI" }, \ ++ { SSAM_SSH_TC_USC, "USC" }, \ ++ { SSAM_SSH_TC_PEN, "PEN" }, \ ++ { SSAM_SSH_TC_VID, "VID" }, \ ++ { SSAM_SSH_TC_AUD, "AUD" }, \ ++ { SSAM_SSH_TC_SMC, "SMC" }, \ ++ { SSAM_SSH_TC_KPD, "KPD" }, \ ++ { SSAM_SSH_TC_REG, "REG" } \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_frame_class, ++ TP_PROTO(const struct ssh_frame *frame), ++ ++ TP_ARGS(frame), ++ ++ TP_STRUCT__entry( ++ __field(u8, type) ++ __field(u8, seq) ++ __field(u16, len) ++ ), ++ ++ TP_fast_assign( ++ __entry->type = frame->type; ++ __entry->seq = frame->seq; ++ __entry->len = get_unaligned_le16(&frame->len); ++ ), ++ ++ TP_printk("ty=%s, seq=0x%02x, len=%u", ++ ssam_show_frame_type(__entry->type), ++ __entry->seq, ++ __entry->len ++ ) ++); ++ ++#define DEFINE_SSAM_FRAME_EVENT(name) \ ++ DEFINE_EVENT(ssam_frame_class, ssam_##name, \ ++ TP_PROTO(const struct ssh_frame *frame), \ ++ TP_ARGS(frame) \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_command_class, ++ TP_PROTO(const struct ssh_command *cmd, u16 len), ++ ++ TP_ARGS(cmd, len), ++ ++ TP_STRUCT__entry( ++ __field(u16, rqid) ++ __field(u16, len) ++ __field(u8, tc) ++ __field(u8, cid) ++ __field(u8, iid) ++ ), ++ ++ TP_fast_assign( ++ __entry->rqid = get_unaligned_le16(&cmd->rqid); ++ __entry->tc = cmd->tc; ++ __entry->cid = cmd->cid; ++ __entry->iid = cmd->iid; ++ __entry->len = len; ++ ), ++ ++ TP_printk("rqid=0x%04x, tc=%s, cid=0x%02x, iid=0x%02x, len=%u", ++ __entry->rqid, ++ ssam_show_ssh_tc(__entry->tc), ++ __entry->cid, ++ __entry->iid, ++ __entry->len ++ ) ++); ++ ++#define DEFINE_SSAM_COMMAND_EVENT(name) \ ++ DEFINE_EVENT(ssam_command_class, ssam_##name, \ ++ TP_PROTO(const struct ssh_command *cmd, u16 len), \ ++ TP_ARGS(cmd, len) \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_packet_class, ++ TP_PROTO(const struct ssh_packet *packet), ++ ++ TP_ARGS(packet), ++ ++ TP_STRUCT__entry( ++ __array(char, uid, SSAM_PTR_UID_LEN) ++ __field(u8, priority) ++ __field(u16, length) ++ __field(unsigned long, state) ++ __field(u16, seq) ++ ), ++ ++ TP_fast_assign( ++ ssam_trace_ptr_uid(packet, __entry->uid); ++ __entry->priority = READ_ONCE(packet->priority); ++ __entry->length = packet->data.len; ++ __entry->state = READ_ONCE(packet->state); ++ __entry->seq = ssam_trace_get_packet_seq(packet); ++ ), ++ ++ TP_printk("uid=%s, seq=%s, ty=%s, pri=0x%02x, len=%u, sta=%s", ++ __entry->uid, ++ ssam_show_packet_seq(__entry->seq), ++ ssam_show_packet_type(__entry->state), ++ __entry->priority, ++ __entry->length, ++ ssam_show_packet_state(__entry->state) ++ ) ++); ++ ++#define DEFINE_SSAM_PACKET_EVENT(name) \ ++ DEFINE_EVENT(ssam_packet_class, ssam_##name, \ ++ TP_PROTO(const struct ssh_packet *packet), \ ++ TP_ARGS(packet) \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_packet_status_class, ++ TP_PROTO(const struct ssh_packet *packet, int status), ++ ++ TP_ARGS(packet, status), ++ ++ TP_STRUCT__entry( ++ __array(char, uid, SSAM_PTR_UID_LEN) ++ __field(u8, priority) ++ __field(u16, length) ++ __field(unsigned long, state) ++ __field(u16, seq) ++ __field(int, status) ++ ), ++ ++ TP_fast_assign( ++ ssam_trace_ptr_uid(packet, __entry->uid); ++ __entry->priority = READ_ONCE(packet->priority); ++ __entry->length = packet->data.len; ++ __entry->state = READ_ONCE(packet->state); ++ __entry->seq = ssam_trace_get_packet_seq(packet); ++ __entry->status = status; ++ ), ++ ++ TP_printk("uid=%s, seq=%s, ty=%s, pri=0x%02x, len=%u, sta=%s, status=%d", ++ __entry->uid, ++ ssam_show_packet_seq(__entry->seq), ++ ssam_show_packet_type(__entry->state), ++ __entry->priority, ++ __entry->length, ++ ssam_show_packet_state(__entry->state), ++ __entry->status ++ ) ++); ++ ++#define DEFINE_SSAM_PACKET_STATUS_EVENT(name) \ ++ DEFINE_EVENT(ssam_packet_status_class, ssam_##name, \ ++ TP_PROTO(const struct ssh_packet *packet, int status), \ ++ TP_ARGS(packet, status) \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_request_class, ++ TP_PROTO(const struct ssh_request *request), ++ ++ TP_ARGS(request), ++ ++ TP_STRUCT__entry( ++ __array(char, uid, SSAM_PTR_UID_LEN) ++ __field(unsigned long, state) ++ __field(u32, rqid) ++ __field(u8, tc) ++ __field(u16, cid) ++ __field(u16, iid) ++ ), ++ ++ TP_fast_assign( ++ const struct ssh_packet *p = &request->packet; ++ ++ // use packet for UID so we can match requests to packets ++ ssam_trace_ptr_uid(p, __entry->uid); ++ __entry->state = READ_ONCE(request->state); ++ __entry->rqid = ssam_trace_get_request_id(p); ++ __entry->tc = ssam_trace_get_request_tc(p); ++ __entry->cid = ssam_trace_get_command_field_u8(p, cid); ++ __entry->iid = ssam_trace_get_command_field_u8(p, iid); ++ ), ++ ++ TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s", ++ __entry->uid, ++ ssam_show_request_id(__entry->rqid), ++ ssam_show_request_type(__entry->state), ++ ssam_show_request_state(__entry->state), ++ ssam_show_ssh_tc(__entry->tc), ++ ssam_show_generic_u8_field(__entry->cid), ++ ssam_show_generic_u8_field(__entry->iid) ++ ) ++); ++ ++#define DEFINE_SSAM_REQUEST_EVENT(name) \ ++ DEFINE_EVENT(ssam_request_class, ssam_##name, \ ++ TP_PROTO(const struct ssh_request *request), \ ++ TP_ARGS(request) \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_request_status_class, ++ TP_PROTO(const struct ssh_request *request, int status), ++ ++ TP_ARGS(request, status), ++ ++ TP_STRUCT__entry( ++ __array(char, uid, SSAM_PTR_UID_LEN) ++ __field(unsigned long, state) ++ __field(u32, rqid) ++ __field(u8, tc) ++ __field(u16, cid) ++ __field(u16, iid) ++ __field(int, status) ++ ), ++ ++ TP_fast_assign( ++ const struct ssh_packet *p = &request->packet; ++ ++ // use packet for UID so we can match requests to packets ++ ssam_trace_ptr_uid(p, __entry->uid); ++ __entry->state = READ_ONCE(request->state); ++ __entry->rqid = ssam_trace_get_request_id(p); ++ __entry->tc = ssam_trace_get_request_tc(p); ++ __entry->cid = ssam_trace_get_command_field_u8(p, cid); ++ __entry->iid = ssam_trace_get_command_field_u8(p, iid); ++ __entry->status = status; ++ ), ++ ++ TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s, status=%d", ++ __entry->uid, ++ ssam_show_request_id(__entry->rqid), ++ ssam_show_request_type(__entry->state), ++ ssam_show_request_state(__entry->state), ++ ssam_show_ssh_tc(__entry->tc), ++ ssam_show_generic_u8_field(__entry->cid), ++ ssam_show_generic_u8_field(__entry->iid), ++ __entry->status ++ ) ++); ++ ++#define DEFINE_SSAM_REQUEST_STATUS_EVENT(name) \ ++ DEFINE_EVENT(ssam_request_status_class, ssam_##name, \ ++ TP_PROTO(const struct ssh_request *request, int status),\ ++ TP_ARGS(request, status) \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_alloc_class, ++ TP_PROTO(void *ptr, size_t len), ++ ++ TP_ARGS(ptr, len), ++ ++ TP_STRUCT__entry( ++ __array(char, uid, SSAM_PTR_UID_LEN) ++ __field(size_t, len) ++ ), ++ ++ TP_fast_assign( ++ ssam_trace_ptr_uid(ptr, __entry->uid); ++ __entry->len = len; ++ ), ++ ++ TP_printk("uid=%s, len=%zu", __entry->uid, __entry->len) ++); ++ ++#define DEFINE_SSAM_ALLOC_EVENT(name) \ ++ DEFINE_EVENT(ssam_alloc_class, ssam_##name, \ ++ TP_PROTO(void *ptr, size_t len), \ ++ TP_ARGS(ptr, len) \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_free_class, ++ TP_PROTO(void *ptr), ++ ++ TP_ARGS(ptr), ++ ++ TP_STRUCT__entry( ++ __array(char, uid, SSAM_PTR_UID_LEN) ++ __field(size_t, len) ++ ), ++ ++ TP_fast_assign( ++ ssam_trace_ptr_uid(ptr, __entry->uid); ++ ), ++ ++ TP_printk("uid=%s", __entry->uid) ++); ++ ++#define DEFINE_SSAM_FREE_EVENT(name) \ ++ DEFINE_EVENT(ssam_free_class, ssam_##name, \ ++ TP_PROTO(void *ptr), \ ++ TP_ARGS(ptr) \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_generic_uint_class, ++ TP_PROTO(const char* property, unsigned int value), ++ ++ TP_ARGS(property, value), ++ ++ TP_STRUCT__entry( ++ __string(property, property) ++ __field(unsigned int, value) ++ ), ++ ++ TP_fast_assign( ++ __assign_str(property, property); ++ __entry->value = value; ++ ), ++ ++ TP_printk("%s=%u", __get_str(property), __entry->value) ++); ++ ++#define DEFINE_SSAM_GENERIC_UINT_EVENT(name) \ ++ DEFINE_EVENT(ssam_generic_uint_class, ssam_##name, \ ++ TP_PROTO(const char* property, unsigned int value), \ ++ TP_ARGS(property, value) \ ++ ) ++ ++ ++DEFINE_SSAM_FRAME_EVENT(rx_frame_received); ++DEFINE_SSAM_COMMAND_EVENT(rx_response_received); ++DEFINE_SSAM_COMMAND_EVENT(rx_event_received); ++ ++DEFINE_SSAM_PACKET_EVENT(packet_release); ++DEFINE_SSAM_PACKET_EVENT(packet_submit); ++DEFINE_SSAM_PACKET_EVENT(packet_resubmit); ++DEFINE_SSAM_PACKET_EVENT(packet_timeout); ++DEFINE_SSAM_PACKET_EVENT(packet_cancel); ++DEFINE_SSAM_PACKET_STATUS_EVENT(packet_complete); ++DEFINE_SSAM_GENERIC_UINT_EVENT(ptl_timeout_reap); ++ ++DEFINE_SSAM_REQUEST_EVENT(request_submit); ++DEFINE_SSAM_REQUEST_EVENT(request_timeout); ++DEFINE_SSAM_REQUEST_EVENT(request_cancel); ++DEFINE_SSAM_REQUEST_STATUS_EVENT(request_complete); ++DEFINE_SSAM_GENERIC_UINT_EVENT(rtl_timeout_reap); ++ ++DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_ack_packet); ++DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_nak_packet); ++DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_dsq_packet); ++DEFINE_SSAM_PACKET_STATUS_EVENT(ei_tx_fail_write); ++DEFINE_SSAM_PACKET_EVENT(ei_tx_corrupt_data); ++DEFINE_SSAM_GENERIC_UINT_EVENT(ei_rx_corrupt_syn); ++DEFINE_SSAM_FRAME_EVENT(ei_rx_corrupt_data); ++DEFINE_SSAM_REQUEST_EVENT(ei_rx_drop_response); ++ ++DEFINE_SSAM_ALLOC_EVENT(ctrl_packet_alloc); ++DEFINE_SSAM_FREE_EVENT(ctrl_packet_free); ++ ++DEFINE_SSAM_ALLOC_EVENT(event_item_alloc); ++DEFINE_SSAM_FREE_EVENT(event_item_free); ++ ++#endif /* _SURFACE_SAM_SSH_TRACE_H */ ++ ++/* This part must be outside protection */ ++#undef TRACE_INCLUDE_PATH ++#undef TRACE_INCLUDE_FILE ++ ++#define TRACE_INCLUDE_PATH . ++#define TRACE_INCLUDE_FILE surface_sam_ssh_trace ++ ++#include +diff --git a/drivers/platform/x86/surface_sam/surface_sam_vhf.c b/drivers/platform/x86/surface_sam/surface_sam_vhf.c +new file mode 100644 +index 0000000000000..8455f952c2724 +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_vhf.c +@@ -0,0 +1,266 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Virtual HID Framework (VHF) driver for input events via SAM. ++ * Used for keyboard input events on the Surface Laptops. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "surface_sam_ssh.h" ++ ++ ++#define USB_VENDOR_ID_MICROSOFT 0x045e ++#define USB_DEVICE_ID_MS_VHF 0xf001 ++ ++#define VHF_INPUT_NAME "Microsoft Virtual HID Framework Device" ++ ++ ++struct vhf_drvdata { ++ struct platform_device *dev; ++ struct ssam_controller *ctrl; ++ ++ struct ssam_event_notifier notif; ++ ++ struct hid_device *hid; ++}; ++ ++ ++/* ++ * These report descriptors have been extracted from a Surface Book 2. ++ * They seems to be similar enough to be usable on the Surface Laptop. ++ */ ++static const u8 vhf_hid_desc[] = { ++ // keyboard descriptor (event command ID 0x03) ++ 0x05, 0x01, /* Usage Page (Desktop), */ ++ 0x09, 0x06, /* Usage (Keyboard), */ ++ 0xA1, 0x01, /* Collection (Application), */ ++ 0x85, 0x01, /* Report ID (1), */ ++ 0x15, 0x00, /* Logical Minimum (0), */ ++ 0x25, 0x01, /* Logical Maximum (1), */ ++ 0x75, 0x01, /* Report Size (1), */ ++ 0x95, 0x08, /* Report Count (8), */ ++ 0x05, 0x07, /* Usage Page (Keyboard), */ ++ 0x19, 0xE0, /* Usage Minimum (KB Leftcontrol), */ ++ 0x29, 0xE7, /* Usage Maximum (KB Right GUI), */ ++ 0x81, 0x02, /* Input (Variable), */ ++ 0x75, 0x08, /* Report Size (8), */ ++ 0x95, 0x0A, /* Report Count (10), */ ++ 0x19, 0x00, /* Usage Minimum (None), */ ++ 0x29, 0x91, /* Usage Maximum (KB LANG2), */ ++ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ ++ 0x81, 0x00, /* Input, */ ++ 0x05, 0x0C, /* Usage Page (Consumer), */ ++ 0x0A, 0xC0, 0x02, /* Usage (02C0h), */ ++ 0xA1, 0x02, /* Collection (Logical), */ ++ 0x1A, 0xC1, 0x02, /* Usage Minimum (02C1h), */ ++ 0x2A, 0xC6, 0x02, /* Usage Maximum (02C6h), */ ++ 0x95, 0x06, /* Report Count (6), */ ++ 0xB1, 0x03, /* Feature (Constant, Variable), */ ++ 0xC0, /* End Collection, */ ++ 0x05, 0x08, /* Usage Page (LED), */ ++ 0x19, 0x01, /* Usage Minimum (01h), */ ++ 0x29, 0x03, /* Usage Maximum (03h), */ ++ 0x75, 0x01, /* Report Size (1), */ ++ 0x95, 0x03, /* Report Count (3), */ ++ 0x25, 0x01, /* Logical Maximum (1), */ ++ 0x91, 0x02, /* Output (Variable), */ ++ 0x95, 0x05, /* Report Count (5), */ ++ 0x91, 0x01, /* Output (Constant), */ ++ 0xC0, /* End Collection, */ ++ ++ // media key descriptor (event command ID 0x04) ++ 0x05, 0x0C, /* Usage Page (Consumer), */ ++ 0x09, 0x01, /* Usage (Consumer Control), */ ++ 0xA1, 0x01, /* Collection (Application), */ ++ 0x85, 0x03, /* Report ID (3), */ ++ 0x75, 0x10, /* Report Size (16), */ ++ 0x15, 0x00, /* Logical Minimum (0), */ ++ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ ++ 0x19, 0x00, /* Usage Minimum (00h), */ ++ 0x2A, 0xFF, 0x03, /* Usage Maximum (03FFh), */ ++ 0x81, 0x00, /* Input, */ ++ 0xC0, /* End Collection, */ ++}; ++ ++ ++static int vhf_hid_start(struct hid_device *hid) ++{ ++ hid_dbg(hid, "%s\n", __func__); ++ return 0; ++} ++ ++static void vhf_hid_stop(struct hid_device *hid) ++{ ++ hid_dbg(hid, "%s\n", __func__); ++} ++ ++static int vhf_hid_open(struct hid_device *hid) ++{ ++ hid_dbg(hid, "%s\n", __func__); ++ return 0; ++} ++ ++static void vhf_hid_close(struct hid_device *hid) ++{ ++ hid_dbg(hid, "%s\n", __func__); ++} ++ ++static int vhf_hid_parse(struct hid_device *hid) ++{ ++ return hid_parse_report(hid, (u8 *)vhf_hid_desc, ARRAY_SIZE(vhf_hid_desc)); ++} ++ ++static int vhf_hid_raw_request(struct hid_device *hid, unsigned char reportnum, ++ u8 *buf, size_t len, unsigned char rtype, ++ int reqtype) ++{ ++ hid_dbg(hid, "%s\n", __func__); ++ return 0; ++} ++ ++static int vhf_hid_output_report(struct hid_device *hid, u8 *buf, size_t len) ++{ ++ hid_dbg(hid, "%s\n", __func__); ++ print_hex_dump_debug("report:", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false); ++ ++ return len; ++} ++ ++static struct hid_ll_driver vhf_hid_ll_driver = { ++ .start = vhf_hid_start, ++ .stop = vhf_hid_stop, ++ .open = vhf_hid_open, ++ .close = vhf_hid_close, ++ .parse = vhf_hid_parse, ++ .raw_request = vhf_hid_raw_request, ++ .output_report = vhf_hid_output_report, ++}; ++ ++ ++static struct hid_device *vhf_create_hid_device(struct platform_device *pdev) ++{ ++ struct hid_device *hid; ++ ++ hid = hid_allocate_device(); ++ if (IS_ERR(hid)) ++ return hid; ++ ++ hid->dev.parent = &pdev->dev; ++ ++ hid->bus = BUS_VIRTUAL; ++ hid->vendor = USB_VENDOR_ID_MICROSOFT; ++ hid->product = USB_DEVICE_ID_MS_VHF; ++ ++ hid->ll_driver = &vhf_hid_ll_driver; ++ ++ sprintf(hid->name, "%s", VHF_INPUT_NAME); ++ ++ return hid; ++} ++ ++static u32 vhf_event_handler(struct ssam_notifier_block *nb, const struct ssam_event *event) ++{ ++ struct vhf_drvdata *drvdata = container_of(nb, struct vhf_drvdata, notif.base); ++ int status; ++ ++ if (event->target_category != 0x08) ++ return 0; ++ ++ if (event->command_id == 0x03 || event->command_id == 0x04) { ++ status = hid_input_report(drvdata->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 1); ++ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED; ++ } ++ ++ return 0; ++} ++ ++static int surface_sam_vhf_probe(struct platform_device *pdev) ++{ ++ struct ssam_controller *ctrl; ++ struct vhf_drvdata *drvdata; ++ struct hid_device *hid; ++ int status; ++ ++ // add device link to EC ++ status = ssam_client_bind(&pdev->dev, &ctrl); ++ if (status) ++ return status == -ENXIO ? -EPROBE_DEFER : status; ++ ++ drvdata = kzalloc(sizeof(struct vhf_drvdata), GFP_KERNEL); ++ if (!drvdata) ++ return -ENOMEM; ++ ++ hid = vhf_create_hid_device(pdev); ++ if (IS_ERR(hid)) { ++ status = PTR_ERR(hid); ++ goto err_probe_hid; ++ } ++ ++ status = hid_add_device(hid); ++ if (status) ++ goto err_add_hid; ++ ++ drvdata->dev = pdev; ++ drvdata->ctrl = ctrl; ++ drvdata->hid = hid; ++ ++ drvdata->notif.base.priority = 1; ++ drvdata->notif.base.fn = vhf_event_handler; ++ drvdata->notif.event.reg = SSAM_EVENT_REGISTRY_SAM; ++ drvdata->notif.event.id.target_category = SSAM_SSH_TC_KBD; ++ drvdata->notif.event.id.instance = 0; ++ drvdata->notif.event.flags = 0; ++ ++ platform_set_drvdata(pdev, drvdata); ++ ++ status = ssam_notifier_register(ctrl, &drvdata->notif); ++ if (status) ++ goto err_add_hid; ++ ++ return 0; ++ ++err_add_hid: ++ hid_destroy_device(hid); ++ platform_set_drvdata(pdev, NULL); ++err_probe_hid: ++ kfree(drvdata); ++ return status; ++} ++ ++static int surface_sam_vhf_remove(struct platform_device *pdev) ++{ ++ struct vhf_drvdata *drvdata = platform_get_drvdata(pdev); ++ ++ ssam_notifier_unregister(drvdata->ctrl, &drvdata->notif); ++ hid_destroy_device(drvdata->hid); ++ kfree(drvdata); ++ ++ platform_set_drvdata(pdev, NULL); ++ return 0; ++} ++ ++ ++static const struct acpi_device_id surface_sam_vhf_match[] = { ++ { "MSHW0096" }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(acpi, surface_sam_vhf_match); ++ ++static struct platform_driver surface_sam_vhf = { ++ .probe = surface_sam_vhf_probe, ++ .remove = surface_sam_vhf_remove, ++ .driver = { ++ .name = "surface_sam_vhf", ++ .acpi_match_table = surface_sam_vhf_match, ++ .probe_type = PROBE_PREFER_ASYNCHRONOUS, ++ }, ++}; ++module_platform_driver(surface_sam_vhf); ++ ++MODULE_AUTHOR("Maximilian Luz "); ++MODULE_DESCRIPTION("Virtual HID Framework Driver for 5th Generation Surface Devices"); ++MODULE_LICENSE("GPL"); +-- +2.28.0 + diff --git a/patches/5.8/0003-surface-sam-over-hid.patch b/patches/5.8/0003-surface-sam-over-hid.patch new file mode 100644 index 000000000..47465fbd0 --- /dev/null +++ b/patches/5.8/0003-surface-sam-over-hid.patch @@ -0,0 +1,65 @@ +From 4d82adb6c864e715ea5236dd0a8c971cf63e2dc1 Mon Sep 17 00:00:00 2001 +From: Maximilian Luz +Date: Sat, 25 Jul 2020 17:19:53 +0200 +Subject: [PATCH 3/5] surface-sam-over-hid + +--- + drivers/i2c/i2c-core-acpi.c | 35 +++++++++++++++++++++++++++++++++++ + 1 file changed, 35 insertions(+) + +diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c +index 2ade99b105b91..60b9cb51d5f7e 100644 +--- a/drivers/i2c/i2c-core-acpi.c ++++ b/drivers/i2c/i2c-core-acpi.c +@@ -574,6 +574,28 @@ static int acpi_gsb_i2c_write_bytes(struct i2c_client *client, + return (ret == 1) ? 0 : -EIO; + } + ++static int acpi_gsb_i2c_write_raw_bytes(struct i2c_client *client, ++ u8 *data, u8 data_len) ++{ ++ struct i2c_msg msgs[1]; ++ int ret = AE_OK; ++ ++ msgs[0].addr = client->addr; ++ msgs[0].flags = client->flags; ++ msgs[0].len = data_len + 1; ++ msgs[0].buf = data; ++ ++ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); ++ ++ if (ret < 0) { ++ dev_err(&client->adapter->dev, "i2c write failed: %d\n", ret); ++ return ret; ++ } ++ ++ /* 1 transfer must have completed successfully */ ++ return (ret == 1) ? 0 : -EIO; ++} ++ + static acpi_status + i2c_acpi_space_handler(u32 function, acpi_physical_address command, + u32 bits, u64 *value64, +@@ -675,6 +697,19 @@ i2c_acpi_space_handler(u32 function, acpi_physical_address command, + } + break; + ++ case ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES: ++ if (action == ACPI_READ) { ++ dev_warn(&adapter->dev, ++ "protocol 0x%02x not supported for client 0x%02x\n", ++ accessor_type, client->addr); ++ ret = AE_BAD_PARAMETER; ++ goto err; ++ } else { ++ status = acpi_gsb_i2c_write_raw_bytes(client, ++ gsb->data, info->access_length); ++ } ++ break; ++ + default: + dev_warn(&adapter->dev, "protocol 0x%02x not supported for client 0x%02x\n", + accessor_type, client->addr); +-- +2.28.0 + diff --git a/patches/5.8/0004-wifi.patch b/patches/5.8/0004-wifi.patch new file mode 100644 index 000000000..e0523de54 --- /dev/null +++ b/patches/5.8/0004-wifi.patch @@ -0,0 +1,255 @@ +From e20265c4b4f99cc46dfb5e481dff4a2be2ed3fd5 Mon Sep 17 00:00:00 2001 +From: kitakar5525 <34676735+kitakar5525@users.noreply.github.com> +Date: Thu, 20 Feb 2020 16:51:11 +0900 +Subject: [PATCH 4/5] wifi + +--- + .../net/wireless/marvell/mwifiex/cfg80211.c | 26 ++++++ + drivers/net/wireless/marvell/mwifiex/pcie.c | 84 +++++++++++-------- + .../net/wireless/marvell/mwifiex/sta_cmd.c | 31 ++----- + 3 files changed, 84 insertions(+), 57 deletions(-) + +diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +index 4e4f59c17ded3..528eedfbf41c9 100644 +--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c ++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +@@ -25,6 +25,11 @@ + static char *reg_alpha2; + module_param(reg_alpha2, charp, 0); + ++static bool allow_ps_mode; ++module_param(allow_ps_mode, bool, 0444); ++MODULE_PARM_DESC(allow_ps_mode, ++ "allow WiFi power management to be enabled. (default: disallowed)"); ++ + static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = { + { + .max = 3, .types = BIT(NL80211_IFTYPE_STATION) | +@@ -434,6 +439,27 @@ mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy, + + ps_mode = enabled; + ++ /* Allow ps_mode to be enabled only when allow_ps_mode is set ++ * (but always allow ps_mode to be disabled in case it gets enabled ++ * for unknown reason and you want to disable it) */ ++ if (ps_mode && !allow_ps_mode) { ++ dev_info(priv->adapter->dev, ++ "Request to enable ps_mode received but it's disallowed " ++ "by module parameter. Rejecting the request.\n"); ++ ++ /* Return negative value to inform userspace tools that setting ++ * power_save to be enabled is not permitted. */ ++ return -1; ++ } ++ ++ if (ps_mode) ++ dev_warn(priv->adapter->dev, ++ "WARN: Request to enable ps_mode received. Enabling it. " ++ "Disable it if you encounter connection instability.\n"); ++ else ++ dev_info(priv->adapter->dev, ++ "Request to disable ps_mode received. Disabling it.\n"); ++ + return mwifiex_drv_set_power(priv, &ps_mode); + } + +diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c +index 87b4ccca4b9a2..3bdad5e80ecbb 100644 +--- a/drivers/net/wireless/marvell/mwifiex/pcie.c ++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c +@@ -146,38 +146,45 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter) + * + * If already not suspended, this function allocates and sends a host + * sleep activate request to the firmware and turns off the traffic. ++ * ++ * XXX: ignoring all the above comment and just removes the card to ++ * fix S0ix and "AP scanning (sometimes) not working after suspend". ++ * Required code is extracted from mwifiex_pcie_remove(). + */ + static int mwifiex_pcie_suspend(struct device *dev) + { ++ struct pci_dev *pdev = to_pci_dev(dev); ++ struct pcie_service_card *card = pci_get_drvdata(pdev); + struct mwifiex_adapter *adapter; +- struct pcie_service_card *card = dev_get_drvdata(dev); +- ++ struct mwifiex_private *priv; ++ const struct mwifiex_pcie_card_reg *reg; ++ u32 fw_status; ++ int ret; + + /* Might still be loading firmware */ + wait_for_completion(&card->fw_done); + + adapter = card->adapter; +- if (!adapter) { +- dev_err(dev, "adapter is not valid\n"); ++ if (!adapter || !adapter->priv_num) + return 0; +- } + +- mwifiex_enable_wake(adapter); ++ reg = card->pcie.reg; ++ if (reg) ++ ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status); ++ else ++ fw_status = -1; + +- /* Enable the Host Sleep */ +- if (!mwifiex_enable_hs(adapter)) { +- mwifiex_dbg(adapter, ERROR, +- "cmd: failed to suspend\n"); +- clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags); +- mwifiex_disable_wake(adapter); +- return -EFAULT; +- } ++ if (fw_status == FIRMWARE_READY_PCIE && !adapter->mfg_mode) { ++ mwifiex_deauthenticate_all(adapter); + +- flush_workqueue(adapter->workqueue); ++ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); ++ ++ mwifiex_disable_auto_ds(priv); + +- /* Indicate device suspended */ +- set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); +- clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags); ++ mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN); ++ } ++ ++ mwifiex_remove_card(adapter); + + return 0; + } +@@ -189,31 +196,35 @@ static int mwifiex_pcie_suspend(struct device *dev) + * + * If already not resumed, this function turns on the traffic and + * sends a host sleep cancel request to the firmware. ++ * ++ * XXX: ignoring all the above comment and probes the card that was ++ * removed on suspend. Required code is extracted from mwifiex_pcie_probe(). + */ + static int mwifiex_pcie_resume(struct device *dev) + { +- struct mwifiex_adapter *adapter; +- struct pcie_service_card *card = dev_get_drvdata(dev); ++ struct pci_dev *pdev = to_pci_dev(dev); ++ struct pcie_service_card *card = pci_get_drvdata(pdev); ++ int ret; + ++ pr_debug("info: vendor=0x%4.04X device=0x%4.04X rev=%d\n", ++ pdev->vendor, pdev->device, pdev->revision); + +- if (!card->adapter) { +- dev_err(dev, "adapter structure is not valid\n"); +- return 0; +- } ++ init_completion(&card->fw_done); + +- adapter = card->adapter; ++ card->dev = pdev; + +- if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { +- mwifiex_dbg(adapter, WARN, +- "Device already resumed\n"); +- return 0; ++ /* device tree node parsing and platform specific configuration */ ++ if (pdev->dev.of_node) { ++ ret = mwifiex_pcie_probe_of(&pdev->dev); ++ if (ret) ++ return ret; + } + +- clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); +- +- mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), +- MWIFIEX_ASYNC_CMD); +- mwifiex_disable_wake(adapter); ++ if (mwifiex_add_card(card, &card->fw_done, &pcie_ops, ++ MWIFIEX_PCIE, &pdev->dev)) { ++ pr_err("%s failed\n", __func__); ++ return -1; ++ } + + return 0; + } +@@ -229,8 +240,13 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) + { + struct pcie_service_card *card; ++ struct pci_dev *parent_pdev = pci_upstream_bridge(pdev); + int ret; + ++ /* disable bridge_d3 to fix driver crashing after suspend on gen4+ ++ * Surface devices */ ++ parent_pdev->bridge_d3 = false; ++ + pr_debug("info: vendor=0x%4.04X device=0x%4.04X rev=%d\n", + pdev->vendor, pdev->device, pdev->revision); + +diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +index 8bd355d7974e9..256c8c38deee3 100644 +--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c ++++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +@@ -2247,7 +2247,6 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, + * - Function init (for first interface only) + * - Read MAC address (for first interface only) + * - Reconfigure Tx buffer size (for first interface only) +- * - Enable auto deep sleep (for first interface only) + * - Get Tx rate + * - Get Tx power + * - Set IBSS coalescing status +@@ -2260,7 +2259,6 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) + struct mwifiex_adapter *adapter = priv->adapter; + int ret; + struct mwifiex_ds_11n_amsdu_aggr_ctrl amsdu_aggr_ctrl; +- struct mwifiex_ds_auto_ds auto_ds; + enum state_11d_t state_11d; + struct mwifiex_ds_11n_tx_cfg tx_cfg; + u8 sdio_sp_rx_aggr_enable; +@@ -2332,16 +2330,10 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) + if (ret) + return -1; + +- if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { +- /* Enable IEEE PS by default */ +- priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP; +- ret = mwifiex_send_cmd(priv, +- HostCmd_CMD_802_11_PS_MODE_ENH, +- EN_AUTO_PS, BITMAP_STA_PS, NULL, +- true); +- if (ret) +- return -1; +- } ++ /* Not enabling ps_mode (IEEE power_save) by default. Enabling ++ * this causes connection instability, especially on 5GHz APs ++ * and eventually causes "firmware wakeup failed". Therefore, ++ * the relevant code was removed from here. */ + + if (drcs) { + adapter->drcs_enabled = true; +@@ -2388,17 +2380,10 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) + if (ret) + return -1; + +- if (!disable_auto_ds && first_sta && +- priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { +- /* Enable auto deep sleep */ +- auto_ds.auto_ds = DEEP_SLEEP_ON; +- auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME; +- ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH, +- EN_AUTO_PS, BITMAP_AUTO_DS, +- &auto_ds, true); +- if (ret) +- return -1; +- } ++ /* Not enabling auto deep sleep (auto_ds) by default. Enabling ++ * this reportedly causes "suspend/resume fails when not connected ++ * to an Access Point." Therefore, the relevant code was removed ++ * from here. */ + + if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { + /* Send cmd to FW to enable/disable 11D function */ +-- +2.28.0 + diff --git a/patches/5.8/0005-ipts.patch b/patches/5.8/0005-ipts.patch new file mode 100644 index 000000000..9a09f69a8 --- /dev/null +++ b/patches/5.8/0005-ipts.patch @@ -0,0 +1,2061 @@ +From aeb41df3109a21b984abdbbbb213ca802ffb6a26 Mon Sep 17 00:00:00 2001 +From: Dorian Stoll +Date: Mon, 27 Jan 2020 21:16:20 +0100 +Subject: [PATCH 5/5] ipts + +--- + drivers/input/touchscreen/Kconfig | 2 + + drivers/input/touchscreen/Makefile | 1 + + drivers/input/touchscreen/ipts/Kconfig | 16 ++ + drivers/input/touchscreen/ipts/Makefile | 17 ++ + drivers/input/touchscreen/ipts/context.h | 60 ++++ + drivers/input/touchscreen/ipts/control.c | 94 +++++++ + drivers/input/touchscreen/ipts/control.h | 18 ++ + drivers/input/touchscreen/ipts/data.c | 107 +++++++ + drivers/input/touchscreen/ipts/data.h | 12 + + drivers/input/touchscreen/ipts/hid.c | 38 +++ + drivers/input/touchscreen/ipts/hid.h | 13 + + drivers/input/touchscreen/ipts/init.c | 93 ++++++ + drivers/input/touchscreen/ipts/math.c | 103 +++++++ + drivers/input/touchscreen/ipts/math.h | 21 ++ + drivers/input/touchscreen/ipts/params.c | 27 ++ + drivers/input/touchscreen/ipts/params.h | 15 + + drivers/input/touchscreen/ipts/payload.c | 52 ++++ + drivers/input/touchscreen/ipts/payload.h | 14 + + .../touchscreen/ipts/protocol/commands.h | 61 ++++ + .../input/touchscreen/ipts/protocol/data.h | 30 ++ + .../input/touchscreen/ipts/protocol/events.h | 29 ++ + .../touchscreen/ipts/protocol/feedback.h | 30 ++ + .../input/touchscreen/ipts/protocol/payload.h | 47 ++++ + .../touchscreen/ipts/protocol/responses.h | 62 ++++ + .../touchscreen/ipts/protocol/singletouch.h | 17 ++ + .../input/touchscreen/ipts/protocol/stylus.h | 52 ++++ + drivers/input/touchscreen/ipts/receiver.c | 265 ++++++++++++++++++ + drivers/input/touchscreen/ipts/receiver.h | 8 + + drivers/input/touchscreen/ipts/resources.c | 131 +++++++++ + drivers/input/touchscreen/ipts/resources.h | 11 + + drivers/input/touchscreen/ipts/singletouch.c | 64 +++++ + drivers/input/touchscreen/ipts/singletouch.h | 14 + + drivers/input/touchscreen/ipts/stylus.c | 179 ++++++++++++ + drivers/input/touchscreen/ipts/stylus.h | 14 + + drivers/misc/mei/hw-me-regs.h | 2 + + drivers/misc/mei/pci-me.c | 2 + + include/uapi/linux/input.h | 1 + + 37 files changed, 1722 insertions(+) + create mode 100644 drivers/input/touchscreen/ipts/Kconfig + create mode 100644 drivers/input/touchscreen/ipts/Makefile + create mode 100644 drivers/input/touchscreen/ipts/context.h + create mode 100644 drivers/input/touchscreen/ipts/control.c + create mode 100644 drivers/input/touchscreen/ipts/control.h + create mode 100644 drivers/input/touchscreen/ipts/data.c + create mode 100644 drivers/input/touchscreen/ipts/data.h + create mode 100644 drivers/input/touchscreen/ipts/hid.c + create mode 100644 drivers/input/touchscreen/ipts/hid.h + create mode 100644 drivers/input/touchscreen/ipts/init.c + create mode 100644 drivers/input/touchscreen/ipts/math.c + create mode 100644 drivers/input/touchscreen/ipts/math.h + create mode 100644 drivers/input/touchscreen/ipts/params.c + create mode 100644 drivers/input/touchscreen/ipts/params.h + create mode 100644 drivers/input/touchscreen/ipts/payload.c + create mode 100644 drivers/input/touchscreen/ipts/payload.h + create mode 100644 drivers/input/touchscreen/ipts/protocol/commands.h + create mode 100644 drivers/input/touchscreen/ipts/protocol/data.h + create mode 100644 drivers/input/touchscreen/ipts/protocol/events.h + create mode 100644 drivers/input/touchscreen/ipts/protocol/feedback.h + create mode 100644 drivers/input/touchscreen/ipts/protocol/payload.h + create mode 100644 drivers/input/touchscreen/ipts/protocol/responses.h + create mode 100644 drivers/input/touchscreen/ipts/protocol/singletouch.h + create mode 100644 drivers/input/touchscreen/ipts/protocol/stylus.h + create mode 100644 drivers/input/touchscreen/ipts/receiver.c + create mode 100644 drivers/input/touchscreen/ipts/receiver.h + create mode 100644 drivers/input/touchscreen/ipts/resources.c + create mode 100644 drivers/input/touchscreen/ipts/resources.h + create mode 100644 drivers/input/touchscreen/ipts/singletouch.c + create mode 100644 drivers/input/touchscreen/ipts/singletouch.h + create mode 100644 drivers/input/touchscreen/ipts/stylus.c + create mode 100644 drivers/input/touchscreen/ipts/stylus.h + +diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig +index 35c867b2d9a77..a1fd8b293a367 100644 +--- a/drivers/input/touchscreen/Kconfig ++++ b/drivers/input/touchscreen/Kconfig +@@ -1322,4 +1322,6 @@ config TOUCHSCREEN_IQS5XX + To compile this driver as a module, choose M here: the + module will be called iqs5xx. + ++source "drivers/input/touchscreen/ipts/Kconfig" ++ + endif +diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile +index 30d1e1b424929..517e86dd20840 100644 +--- a/drivers/input/touchscreen/Makefile ++++ b/drivers/input/touchscreen/Makefile +@@ -46,6 +46,7 @@ obj-$(CONFIG_TOUCHSCREEN_EXC3000) += exc3000.o + obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o + obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix.o + obj-$(CONFIG_TOUCHSCREEN_HIDEEP) += hideep.o ++obj-$(CONFIG_TOUCHSCREEN_IPTS) += ipts/ + obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o + obj-$(CONFIG_TOUCHSCREEN_IMX6UL_TSC) += imx6ul_tsc.o + obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o +diff --git a/drivers/input/touchscreen/ipts/Kconfig b/drivers/input/touchscreen/ipts/Kconfig +new file mode 100644 +index 0000000000000..d3c530dafa948 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/Kconfig +@@ -0,0 +1,16 @@ ++# SPDX-License-Identifier: GPL-2.0-or-later ++ ++config TOUCHSCREEN_IPTS ++ tristate "Intel Precise Touch & Stylus" ++ select INTEL_MEI ++ depends on X86 ++ depends on PCI ++ depends on HID ++ help ++ Say Y here if your system has a touchscreen using Intels ++ Precise Touch & Stylus (IPTS). ++ ++ If unsure say N. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called ipts. +diff --git a/drivers/input/touchscreen/ipts/Makefile b/drivers/input/touchscreen/ipts/Makefile +new file mode 100644 +index 0000000000000..0f7c904e73171 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/Makefile +@@ -0,0 +1,17 @@ ++# SPDX-License-Identifier: GPL-2.0-or-later ++# ++# Makefile for the IPTS touchscreen driver ++# ++ ++obj-$(CONFIG_TOUCHSCREEN_IPTS) += ipts.o ++ipts-objs := control.o ++ipts-objs += data.o ++ipts-objs += hid.o ++ipts-objs += init.o ++ipts-objs += math.o ++ipts-objs += params.o ++ipts-objs += payload.o ++ipts-objs += receiver.o ++ipts-objs += resources.o ++ipts-objs += singletouch.o ++ipts-objs += stylus.o +diff --git a/drivers/input/touchscreen/ipts/context.h b/drivers/input/touchscreen/ipts/context.h +new file mode 100644 +index 0000000000000..ab26552579a5c +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/context.h +@@ -0,0 +1,60 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_CONTEXT_H_ ++#define _IPTS_CONTEXT_H_ ++ ++#include ++#include ++#include ++#include ++ ++#include "protocol/commands.h" ++#include "protocol/responses.h" ++ ++/* HACK: Workaround for DKMS build without BUS_MEI patch */ ++#ifndef BUS_MEI ++#define BUS_MEI 0x44 ++#endif ++ ++/* IPTS driver states */ ++enum ipts_host_status { ++ IPTS_HOST_STATUS_NONE, ++ IPTS_HOST_STATUS_INIT, ++ IPTS_HOST_STATUS_RESOURCE_READY, ++ IPTS_HOST_STATUS_STARTED, ++ IPTS_HOST_STATUS_STOPPING, ++ IPTS_HOST_STATUS_RESTARTING ++}; ++ ++struct ipts_buffer_info { ++ u8 *address; ++ dma_addr_t dma_address; ++}; ++ ++struct ipts_context { ++ struct mei_cl_device *client_dev; ++ struct device *dev; ++ struct ipts_device_info device_info; ++ ++ enum ipts_host_status status; ++ enum ipts_sensor_mode mode; ++ ++ struct ipts_buffer_info data[16]; ++ struct ipts_buffer_info feedback[16]; ++ struct ipts_buffer_info doorbell; ++ ++ /* ++ * These buffers are not actually used by anything, but they need ++ * to be allocated and passed to the ME to get proper functionality. ++ */ ++ struct ipts_buffer_info workqueue; ++ struct ipts_buffer_info host2me; ++ ++ struct task_struct *receiver_loop; ++ struct task_struct *data_loop; ++ ++ struct input_dev *stylus; ++ struct input_dev *singletouch; ++}; ++ ++#endif /* _IPTS_CONTEXT_H_ */ +diff --git a/drivers/input/touchscreen/ipts/control.c b/drivers/input/touchscreen/ipts/control.c +new file mode 100644 +index 0000000000000..9179eca665585 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/control.c +@@ -0,0 +1,94 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++#include ++#include ++ ++#include "context.h" ++#include "data.h" ++#include "params.h" ++#include "protocol/commands.h" ++#include "protocol/events.h" ++#include "protocol/feedback.h" ++#include "resources.h" ++ ++int ipts_control_send(struct ipts_context *ipts, ++ u32 cmd, void *data, u32 size) ++{ ++ int ret; ++ struct ipts_command msg; ++ ++ memset(&msg, 0, sizeof(struct ipts_command)); ++ msg.code = cmd; ++ ++ // Copy message payload ++ if (data && size > 0) ++ memcpy(&msg.data, data, size); ++ ++ ret = mei_cldev_send(ipts->client_dev, (u8 *)&msg, ++ sizeof(msg.code) + size); ++ if (ret < 0) { ++ dev_err(ipts->dev, "%s: error 0x%X:%d\n", __func__, cmd, ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ipts_control_send_feedback(struct ipts_context *ipts, ++ u32 buffer, u32 transaction) ++{ ++ struct ipts_buffer_info feedback_buffer; ++ struct ipts_feedback *feedback; ++ struct ipts_feedback_cmd cmd; ++ ++ feedback_buffer = ipts->feedback[buffer]; ++ feedback = (struct ipts_feedback *)feedback_buffer.address; ++ ++ memset(feedback, 0, sizeof(struct ipts_feedback)); ++ memset(&cmd, 0, sizeof(struct ipts_feedback_cmd)); ++ ++ feedback->type = IPTS_FEEDBACK_TYPE_NONE; ++ feedback->transaction = transaction; ++ ++ cmd.buffer = buffer; ++ cmd.transaction = transaction; ++ ++ return ipts_control_send(ipts, IPTS_CMD(FEEDBACK), ++ &cmd, sizeof(struct ipts_feedback_cmd)); ++} ++ ++int ipts_control_start(struct ipts_context *ipts) ++{ ++ ipts->status = IPTS_HOST_STATUS_INIT; ++ ++ if (ipts_params.singletouch) ++ ipts->mode = IPTS_SENSOR_MODE_SINGLETOUCH; ++ else ++ ipts->mode = IPTS_SENSOR_MODE_MULTITOUCH; ++ ++ return ipts_control_send(ipts, IPTS_CMD(NOTIFY_DEV_READY), NULL, 0); ++} ++ ++void ipts_control_stop(struct ipts_context *ipts) ++{ ++ enum ipts_host_status old_status = ipts->status; ++ ++ ipts->status = IPTS_HOST_STATUS_STOPPING; ++ ipts_control_send(ipts, IPTS_CMD(QUIESCE_IO), NULL, 0); ++ ipts_control_send(ipts, IPTS_CMD(CLEAR_MEM_WINDOW), NULL, 0); ++ ++ if (old_status < IPTS_HOST_STATUS_RESOURCE_READY) ++ return; ++ ++ ipts_data_free(ipts); ++ ipts_resources_free(ipts); ++} ++ ++int ipts_control_restart(struct ipts_context *ipts) ++{ ++ dev_info(ipts->dev, "Restarting IPTS\n"); ++ ipts_control_stop(ipts); ++ ++ ipts->status = IPTS_HOST_STATUS_RESTARTING; ++ return ipts_control_send(ipts, IPTS_CMD(QUIESCE_IO), NULL, 0); ++} +diff --git a/drivers/input/touchscreen/ipts/control.h b/drivers/input/touchscreen/ipts/control.h +new file mode 100644 +index 0000000000000..e57609c85d62a +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/control.h +@@ -0,0 +1,18 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_CONTROL_H_ ++#define _IPTS_CONTROL_H_ ++ ++#include ++ ++#include "context.h" ++ ++int ipts_control_start(struct ipts_context *ipts); ++void ipts_control_stop(struct ipts_context *ipts); ++int ipts_control_restart(struct ipts_context *ipts); ++int ipts_control_send(struct ipts_context *ipts, ++ u32 cmd, void *data, u32 size); ++int ipts_control_send_feedback(struct ipts_context *ipts, ++ u32 buffer, u32 transaction); ++ ++#endif /* _IPTS_CONTROL_H_ */ +diff --git a/drivers/input/touchscreen/ipts/data.c b/drivers/input/touchscreen/ipts/data.c +new file mode 100644 +index 0000000000000..568bf04f7ea6e +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/data.c +@@ -0,0 +1,107 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++#include ++#include ++#include ++ ++#include "context.h" ++#include "control.h" ++#include "hid.h" ++#include "params.h" ++#include "payload.h" ++#include "protocol/data.h" ++ ++static void ipts_data_handle_input(struct ipts_context *ipts, int buffer_id) ++{ ++ struct ipts_buffer_info buffer; ++ struct ipts_data *data; ++ ++ buffer = ipts->data[buffer_id]; ++ data = (struct ipts_data *)buffer.address; ++ ++ if (ipts_params.debug) { ++ dev_info(ipts->dev, "Buffer %d\n", buffer_id); ++ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 32, 1, ++ data->data, data->size, false); ++ } ++ ++ switch (data->type) { ++ case IPTS_DATA_TYPE_PAYLOAD: ++ ipts_payload_handle_input(ipts, data); ++ break; ++ case IPTS_DATA_TYPE_HID_REPORT: ++ ipts_hid_handle_input(ipts, data); ++ break; ++ default: ++ // ignore ++ break; ++ } ++ ++ ipts_control_send_feedback(ipts, buffer_id, data->transaction); ++} ++ ++int ipts_data_loop(void *data) ++{ ++ time64_t timeout; ++ u32 doorbell; ++ u32 last_doorbell; ++ struct ipts_context *ipts; ++ ++ timeout = ktime_get_seconds() + 5; ++ ipts = (struct ipts_context *)data; ++ last_doorbell = 0; ++ doorbell = 0; ++ ++ dev_info(ipts->dev, "Starting data loop\n"); ++ ++ while (!kthread_should_stop()) { ++ if (ipts->status != IPTS_HOST_STATUS_STARTED) { ++ msleep(1000); ++ continue; ++ } ++ ++ // IPTS will increment the doorbell after if filled up one of ++ // the data buffers. If the doorbell didn't change, there is ++ // no work for us to do. Otherwise, the value of the doorbell ++ // will stand for the *next* buffer thats going to be filled. ++ doorbell = *(u32 *)ipts->doorbell.address; ++ if (doorbell == last_doorbell) ++ goto sleep; ++ ++ timeout = ktime_get_seconds() + 5; ++ ++ while (last_doorbell != doorbell) { ++ ipts_data_handle_input(ipts, last_doorbell % 16); ++ last_doorbell++; ++ } ++sleep: ++ if (timeout > ktime_get_seconds()) ++ usleep_range(5000, 30000); ++ else ++ msleep(200); ++ } ++ ++ dev_info(ipts->dev, "Stopping data loop\n"); ++ return 0; ++} ++ ++int ipts_data_init(struct ipts_context *ipts) ++{ ++ int ret; ++ ++ ret = ipts_payload_init(ipts); ++ if (ret) ++ return ret; ++ ++ ret = ipts_hid_init(ipts); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++void ipts_data_free(struct ipts_context *ipts) ++{ ++ ipts_payload_free(ipts); ++ ipts_hid_free(ipts); ++} +diff --git a/drivers/input/touchscreen/ipts/data.h b/drivers/input/touchscreen/ipts/data.h +new file mode 100644 +index 0000000000000..fa72c1be09451 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/data.h +@@ -0,0 +1,12 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_DATA_H_ ++#define _IPTS_DATA_H_ ++ ++#include "context.h" ++ ++int ipts_data_loop(void *data); ++int ipts_data_init(struct ipts_context *ipts); ++void ipts_data_free(struct ipts_context *ipts); ++ ++#endif /* _IPTS_DATA_H_ */ +diff --git a/drivers/input/touchscreen/ipts/hid.c b/drivers/input/touchscreen/ipts/hid.c +new file mode 100644 +index 0000000000000..2642990b8c420 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/hid.c +@@ -0,0 +1,38 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++#include "context.h" ++#include "protocol/data.h" ++#include "singletouch.h" ++ ++/* ++ * IPTS on surface gen7 appears to make heavy use of HID reports, unlike ++ * previous generations. This file can be used to implement handling for ++ * them in the future, seperated from the actual singletouch implementation. ++ */ ++ ++void ipts_hid_handle_input(struct ipts_context *ipts, struct ipts_data *data) ++{ ++ // Make sure that we only handle singletouch inputs ++ // 40 is the report id of the singletouch device in the generic ++ // IPTS HID descriptor. ++ if (data->data[0] != 0x40) ++ return; ++ ++ ipts_singletouch_handle_input(ipts, data); ++} ++ ++int ipts_hid_init(struct ipts_context *ipts) ++{ ++ int ret; ++ ++ ret = ipts_singletouch_init(ipts); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++void ipts_hid_free(struct ipts_context *ipts) ++{ ++ ipts_singletouch_free(ipts); ++} +diff --git a/drivers/input/touchscreen/ipts/hid.h b/drivers/input/touchscreen/ipts/hid.h +new file mode 100644 +index 0000000000000..e6cf38fce4541 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/hid.h +@@ -0,0 +1,13 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_HID_H_ ++#define _IPTS_HID_H_ ++ ++#include "context.h" ++#include "protocol/data.h" ++ ++int ipts_hid_handle_input(struct ipts_context *ipts, struct ipts_data *data); ++int ipts_hid_init(struct ipts_context *ipts); ++void ipts_hid_free(struct ipts_context *ipts); ++ ++#endif /* _IPTS_HID_H_ */ +diff --git a/drivers/input/touchscreen/ipts/init.c b/drivers/input/touchscreen/ipts/init.c +new file mode 100644 +index 0000000000000..fb70d55542af7 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/init.c +@@ -0,0 +1,93 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++#include ++#include ++#include ++#include ++ ++#include "context.h" ++#include "control.h" ++#include "data.h" ++#include "receiver.h" ++ ++#define IPTS_MEI_UUID UUID_LE(0x3e8d0870, 0x271a, 0x4208, \ ++ 0x8e, 0xb5, 0x9a, 0xcb, 0x94, 0x02, 0xae, 0x04) ++ ++static int ipts_init_probe(struct mei_cl_device *cldev, ++ const struct mei_cl_device_id *id) ++{ ++ int ret; ++ struct ipts_context *ipts = NULL; ++ ++ dev_info(&cldev->dev, "Probing IPTS\n"); ++ ++ // Setup the DMA bit mask ++ if (!dma_coerce_mask_and_coherent(&cldev->dev, DMA_BIT_MASK(64))) { ++ dev_info(&cldev->dev, "IPTS using DMA_BIT_MASK(64)\n"); ++ } else if (!dma_coerce_mask_and_coherent(&cldev->dev, ++ DMA_BIT_MASK(32))) { ++ dev_info(&cldev->dev, "IPTS using DMA_BIT_MASK(32)"); ++ } else { ++ dev_err(&cldev->dev, "No suitable DMA for IPTS available\n"); ++ return -EFAULT; ++ } ++ ++ ret = mei_cldev_enable(cldev); ++ if (ret) { ++ dev_err(&cldev->dev, "Cannot enable IPTS\n"); ++ return ret; ++ } ++ ++ ipts = devm_kzalloc(&cldev->dev, ++ sizeof(struct ipts_context), GFP_KERNEL); ++ if (!ipts) { ++ mei_cldev_disable(cldev); ++ return -ENOMEM; ++ } ++ ++ ipts->client_dev = cldev; ++ ipts->dev = &cldev->dev; ++ ++ mei_cldev_set_drvdata(cldev, ipts); ++ ++ ipts->receiver_loop = kthread_run(ipts_receiver_loop, (void *)ipts, ++ "ipts_receiver_loop"); ++ ipts->data_loop = kthread_run(ipts_data_loop, (void *)ipts, ++ "ipts_data_loop"); ++ ++ ipts_control_start(ipts); ++ ++ return 0; ++} ++ ++static int ipts_init_remove(struct mei_cl_device *cldev) ++{ ++ struct ipts_context *ipts = mei_cldev_get_drvdata(cldev); ++ ++ dev_info(&cldev->dev, "Removing IPTS\n"); ++ ++ ipts_control_stop(ipts); ++ mei_cldev_disable(cldev); ++ kthread_stop(ipts->receiver_loop); ++ kthread_stop(ipts->data_loop); ++ ++ return 0; ++} ++ ++static struct mei_cl_device_id ipts_device_id[] = { ++ { "", IPTS_MEI_UUID, MEI_CL_VERSION_ANY }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(mei, ipts_device_id); ++ ++static struct mei_cl_driver ipts_driver = { ++ .id_table = ipts_device_id, ++ .name = "ipts", ++ .probe = ipts_init_probe, ++ .remove = ipts_init_remove, ++}; ++module_mei_cl_driver(ipts_driver); ++ ++MODULE_DESCRIPTION("IPTS touchscreen driver"); ++MODULE_AUTHOR("Dorian Stoll "); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/input/touchscreen/ipts/math.c b/drivers/input/touchscreen/ipts/math.c +new file mode 100644 +index 0000000000000..df956e5447e03 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/math.c +@@ -0,0 +1,103 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++#include ++#include ++#include ++#include ++ ++#include "math.h" ++ ++/* ++ * Since we need to work with [-pi, pi] in the atan functions, we are using ++ * 1 << 29 for the fixed point numbers. This allows us to store numbers from ++ * [-4, 4] using the full 32-bit signed integer range. ++ * ++ * Some constants such as PI have been already converted to the fixed-point ++ * format and are defined in math.h. ++ */ ++ ++static inline s32 ipts_math_mul(s32 x, s32 y) ++{ ++ return (x * (s64)y) >> 29; ++} ++ ++static inline s32 ipts_math_div(s32 x, s32 y) ++{ ++ return ((s64)x << 29) / y; ++} ++ ++static s32 ipts_math_atan(s32 x) ++{ ++ s32 tmp = ipts_math_mul( ++ ipts_math_mul(x, (abs(x) - (1 << 29))), ++ CONST_2447 + ipts_math_mul(CONST_0663, abs(x))); ++ ++ return ipts_math_mul(M_PI_4, x) - tmp; ++} ++ ++static s32 ipts_math_atan2(s32 y, s32 x) ++{ ++ s32 z; ++ ++ if (x != 0) { ++ if (abs(x) > abs(y)) { ++ z = ipts_math_div(y, x); ++ if (x > 0) ++ return ipts_math_atan(z); ++ else if (y >= 0) ++ return ipts_math_atan(z) + M_PI; ++ else ++ return ipts_math_atan(z) - M_PI; ++ } else { ++ z = ipts_math_div(x, y); ++ if (y > 0) ++ return -ipts_math_atan(z) + M_PI_2; ++ else ++ return -ipts_math_atan(z) - M_PI_2; ++ } ++ } else { ++ if (y > 0) ++ return M_PI_2; ++ else if (y < 0) ++ return -M_PI_2; ++ } ++ ++ return 0; ++} ++ ++/* ++ * Convert altitude in range [0, 9000] and azimuth in range [0, 36000] ++ * to x-/y-tilt in range [-9000, 9000]. Azimuth is given ++ * counter-clockwise, starting with zero on the right. Altitude is ++ * given as angle between stylus and z-axis. ++ */ ++void ipts_math_altitude_azimuth_to_tilt(s32 alt, s32 azm, s32 *tx, s32 *ty) ++{ ++ s32 sin_alt, cos_alt; ++ s32 sin_azm, cos_azm; ++ ++ s32 x, y, z; ++ s64 atan_x, atan_y; ++ ++ sin_alt = fixp_sin32_rad(alt, 36000) / 4; ++ sin_azm = fixp_sin32_rad(azm, 36000) / 4; ++ ++ cos_alt = fixp_cos32_rad(alt, 36000) / 4; ++ cos_azm = fixp_cos32_rad(azm, 36000) / 4; ++ ++ x = ipts_math_mul(sin_alt, cos_azm); ++ y = ipts_math_mul(sin_alt, sin_azm); ++ z = cos_alt; ++ ++ atan_x = ipts_math_atan2(z, x); ++ atan_y = ipts_math_atan2(z, y); ++ ++ atan_x = atan_x * 4500; ++ atan_y = atan_y * 4500; ++ ++ atan_x = atan_x / M_PI_4; ++ atan_y = atan_y / M_PI_4; ++ ++ *tx = 9000 - atan_x; ++ *ty = atan_y - 9000; ++} +diff --git a/drivers/input/touchscreen/ipts/math.h b/drivers/input/touchscreen/ipts/math.h +new file mode 100644 +index 0000000000000..8e831074ab60b +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/math.h +@@ -0,0 +1,21 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_MATH_H_ ++#define _IPTS_MATH_H_ ++ ++#include ++ ++/* (pi / 4) * (1 << 29) */ ++#define M_PI_4 421657428 ++#define M_PI_2 (M_PI_4 * 2) ++#define M_PI (M_PI_2 * 2) ++ ++/* 0.2447 * (1 << 29) */ ++#define CONST_2447 131372312 ++ ++/* 0.0663 * (1 << 29) */ ++#define CONST_0663 35594541 ++ ++void ipts_math_altitude_azimuth_to_tilt(s32 alt, s32 azm, s32 *tx, s32 *ty); ++ ++#endif /* _IPTS_MATH_H_ */ +diff --git a/drivers/input/touchscreen/ipts/params.c b/drivers/input/touchscreen/ipts/params.c +new file mode 100644 +index 0000000000000..6aa3f5cf1d762 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/params.c +@@ -0,0 +1,27 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++#include ++#include ++ ++#include "params.h" ++ ++#define IPTS_PARM(NAME, TYPE, PERM) \ ++ module_param_named(NAME, ipts_params.NAME, TYPE, PERM) ++ ++#define IPTS_DESC(NAME, DESC) \ ++ MODULE_PARM_DESC(NAME, DESC) ++ ++struct ipts_modparams ipts_params = { ++ .debug = false, ++ .singletouch = false, ++}; ++ ++IPTS_PARM(debug, bool, 0400); ++IPTS_DESC(debug, ++ "Enable additional debugging in the IPTS driver (default: false)" ++); ++ ++IPTS_PARM(singletouch, bool, 0400); ++IPTS_DESC(singletouch, ++ "Enables IPTS single touch mode (disables stylus) (default: false)" ++); +diff --git a/drivers/input/touchscreen/ipts/params.h b/drivers/input/touchscreen/ipts/params.h +new file mode 100644 +index 0000000000000..1f992a3bc21b9 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/params.h +@@ -0,0 +1,15 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_PARAMS_H_ ++#define _IPTS_PARAMS_H_ ++ ++#include ++ ++struct ipts_modparams { ++ bool debug; ++ bool singletouch; ++}; ++ ++extern struct ipts_modparams ipts_params; ++ ++#endif /* _IPTS_PARAMS_H_ */ +diff --git a/drivers/input/touchscreen/ipts/payload.c b/drivers/input/touchscreen/ipts/payload.c +new file mode 100644 +index 0000000000000..3572ddc0f2fb0 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/payload.c +@@ -0,0 +1,52 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++#include ++ ++#include "context.h" ++#include "protocol/data.h" ++#include "protocol/payload.h" ++#include "stylus.h" ++ ++void ipts_payload_handle_input(struct ipts_context *ipts, ++ struct ipts_data *data) ++{ ++ u32 i, offset; ++ struct ipts_payload *payload; ++ struct ipts_payload_frame *frame; ++ ++ payload = (struct ipts_payload *)data->data; ++ offset = 0; ++ ++ for (i = 0; i < payload->num_frames; i++) { ++ frame = (struct ipts_payload_frame *)&payload->data[offset]; ++ offset += sizeof(struct ipts_payload_frame) + frame->size; ++ ++ switch (frame->type) { ++ case IPTS_PAYLOAD_FRAME_TYPE_STYLUS: ++ ipts_stylus_handle_input(ipts, frame); ++ break; ++ case IPTS_PAYLOAD_FRAME_TYPE_TOUCH: ++ // ignored (for the moment) ++ break; ++ default: ++ // ignored ++ break; ++ } ++ } ++} ++ ++int ipts_payload_init(struct ipts_context *ipts) ++{ ++ int ret; ++ ++ ret = ipts_stylus_init(ipts); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++void ipts_payload_free(struct ipts_context *ipts) ++{ ++ ipts_stylus_free(ipts); ++} +diff --git a/drivers/input/touchscreen/ipts/payload.h b/drivers/input/touchscreen/ipts/payload.h +new file mode 100644 +index 0000000000000..6603714bb6fd0 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/payload.h +@@ -0,0 +1,14 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_PAYLOAD_H_ ++#define _IPTS_PAYLOAD_H_ ++ ++#include "context.h" ++#include "protocol/data.h" ++ ++void ipts_payload_handle_input(struct ipts_context *ipts, ++ struct ipts_data *data); ++int ipts_payload_init(struct ipts_context *ipts); ++void ipts_payload_free(struct ipts_context *ipts); ++ ++#endif /* _IPTS_PAYLOAD_H_ */ +diff --git a/drivers/input/touchscreen/ipts/protocol/commands.h b/drivers/input/touchscreen/ipts/protocol/commands.h +new file mode 100644 +index 0000000000000..2533dfb13584a +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/protocol/commands.h +@@ -0,0 +1,61 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_PROTOCOL_COMMANDS_H_ ++#define _IPTS_PROTOCOL_COMMANDS_H_ ++ ++#include ++#include ++ ++enum ipts_sensor_mode { ++ IPTS_SENSOR_MODE_SINGLETOUCH = 0, ++ IPTS_SENSOR_MODE_MULTITOUCH, ++ IPTS_SENSOR_MODE_MAX ++}; ++ ++struct ipts_set_mode_cmd { ++ u32 sensor_mode; ++ u8 reserved[12]; ++} __packed; ++ ++struct ipts_set_mem_window_cmd { ++ u32 data_buffer_addr_lower[16]; ++ u32 data_buffer_addr_upper[16]; ++ u32 workqueue_addr_lower; ++ u32 workqueue_addr_upper; ++ u32 doorbell_addr_lower; ++ u32 doorbell_addr_upper; ++ u32 feedback_buffer_addr_lower[16]; ++ u32 feedback_buffer_addr_upper[16]; ++ u32 host2me_addr_lower; ++ u32 host2me_addr_upper; ++ u32 host2me_size; ++ u8 reserved1; ++ u8 workqueue_item_size; ++ u16 workqueue_size; ++ u8 reserved[32]; ++} __packed; ++ ++struct ipts_feedback_cmd { ++ u32 buffer; ++ u32 transaction; ++ u8 reserved[8]; ++} __packed; ++ ++/* ++ * Commands are sent from the host to the ME ++ */ ++struct ipts_command { ++ u32 code; ++ union { ++ struct ipts_set_mode_cmd set_mode; ++ struct ipts_set_mem_window_cmd set_mem_window; ++ struct ipts_feedback_cmd feedback; ++ } data; ++} __packed; ++ ++static_assert(sizeof(struct ipts_set_mode_cmd) == 16); ++static_assert(sizeof(struct ipts_set_mem_window_cmd) == 320); ++static_assert(sizeof(struct ipts_feedback_cmd) == 16); ++static_assert(sizeof(struct ipts_command) == 324); ++ ++#endif /* _IPTS_PROTOCOL_COMMANDS_H_ */ +diff --git a/drivers/input/touchscreen/ipts/protocol/data.h b/drivers/input/touchscreen/ipts/protocol/data.h +new file mode 100644 +index 0000000000000..148e0545b2e4e +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/protocol/data.h +@@ -0,0 +1,30 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_PROTOCOL_DATA_H_ ++#define _IPTS_PROTOCOL_DATA_H_ ++ ++#include ++#include ++ ++enum ipts_data_type { ++ IPTS_DATA_TYPE_PAYLOAD = 0, ++ IPTS_DATA_TYPE_ERROR, ++ IPTS_DATA_TYPE_VENDOR_DATA, ++ IPTS_DATA_TYPE_HID_REPORT, ++ IPTS_DATA_TYPE_GET_FEATURES, ++ IPTS_DATA_TYPE_MAX ++}; ++ ++struct ipts_data { ++ u32 type; ++ u32 size; ++ u32 buffer; ++ u8 reserved1[20]; ++ u8 transaction; ++ u8 reserved2[31]; ++ u8 data[]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_data) == 64); ++ ++#endif /* _IPTS_PROTOCOL_DATA_H_ */ +diff --git a/drivers/input/touchscreen/ipts/protocol/events.h b/drivers/input/touchscreen/ipts/protocol/events.h +new file mode 100644 +index 0000000000000..f8b771f90bd2b +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/protocol/events.h +@@ -0,0 +1,29 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_PROTOCOL_EVENTS_H_ ++#define _IPTS_PROTOCOL_EVENTS_H_ ++ ++/* ++ * Helpers to avoid writing boilerplate code. ++ * The response to a command code is always 0x8000000x, where x ++ * is the command code itself. Instead of writing two definitions, ++ * we use macros to calculate the value on the fly instead. ++ */ ++#define IPTS_CMD(COMMAND) IPTS_EVT_##COMMAND ++#define IPTS_RSP(COMMAND) (IPTS_CMD(COMMAND) + 0x80000000) ++ ++/* ++ * Events that can be sent to / received from the ME ++ */ ++enum ipts_evt_code { ++ IPTS_EVT_GET_DEVICE_INFO = 1, ++ IPTS_EVT_SET_MODE, ++ IPTS_EVT_SET_MEM_WINDOW, ++ IPTS_EVT_QUIESCE_IO, ++ IPTS_EVT_READY_FOR_DATA, ++ IPTS_EVT_FEEDBACK, ++ IPTS_EVT_CLEAR_MEM_WINDOW, ++ IPTS_EVT_NOTIFY_DEV_READY, ++}; ++ ++#endif /* _IPTS_PROTOCOL_EVENTS_H_ */ +diff --git a/drivers/input/touchscreen/ipts/protocol/feedback.h b/drivers/input/touchscreen/ipts/protocol/feedback.h +new file mode 100644 +index 0000000000000..8b3d8b689ee83 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/protocol/feedback.h +@@ -0,0 +1,30 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_PROTOCOL_FEEDBACK_H_ ++#define _IPTS_PROTOCOL_FEEDBACK_H_ ++ ++#include ++#include ++ ++enum ipts_feedback_type { ++ IPTS_FEEDBACK_TYPE_NONE = 0, ++ IPTS_FEEDBACK_TYPE_SOFT_RESET, ++ IPTS_FEEDBACK_TYPE_GOTO_ARMED, ++ IPTS_FEEDBACK_TYPE_GOTO_SENSING, ++ IPTS_FEEDBACK_TYPE_GOTO_SLEEP, ++ IPTS_FEEDBACK_TYPE_GOTO_DOZE, ++ IPTS_FEEDBACK_TYPE_HARD_RESET, ++ IPTS_FEEDBACK_TYPE_MAX ++}; ++ ++struct ipts_feedback { ++ u32 type; ++ u32 size; ++ u32 transaction; ++ u8 reserved[52]; ++ u8 data[]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_feedback) == 64); ++ ++#endif /* _IPTS_PROTOCOL_FEEDBACK_H_ */ +diff --git a/drivers/input/touchscreen/ipts/protocol/payload.h b/drivers/input/touchscreen/ipts/protocol/payload.h +new file mode 100644 +index 0000000000000..f46da4ea81f25 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/protocol/payload.h +@@ -0,0 +1,47 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_PROTOCOL_PAYLOAD_H_ ++#define _IPTS_PROTOCOL_PAYLOAD_H_ ++ ++#include ++#include ++ ++enum ipts_payload_frame_type { ++ IPTS_PAYLOAD_FRAME_TYPE_STYLUS = 6, ++ IPTS_PAYLOAD_FRAME_TYPE_TOUCH = 8, ++}; ++ ++enum ipts_report_type { ++ IPTS_REPORT_TYPE_TOUCH_HEATMAP_DIM = 0x0403, ++ IPTS_REPORT_TYPE_TOUCH_HEATMAP = 0x0425, ++ IPTS_REPORT_TYPE_STYLUS_NO_TILT = 0x0410, ++ IPTS_REPORT_TYPE_STYLUS_TILT = 0x0461, ++ IPTS_REPORT_TYPE_STYLUS_TILT_SERIAL = 0x0460, ++}; ++ ++struct ipts_payload { ++ u32 counter; ++ u32 num_frames; ++ u8 reserved[4]; ++ u8 data[]; ++} __packed; ++ ++struct ipts_payload_frame { ++ u16 index; ++ u16 type; ++ u32 size; ++ u8 reserved[8]; ++ u8 data[]; ++} __packed; ++ ++struct ipts_report { ++ u16 type; ++ u16 size; ++ u8 data[]; ++} __packed; ++ ++static_assert(sizeof(struct ipts_payload) == 12); ++static_assert(sizeof(struct ipts_payload_frame) == 16); ++static_assert(sizeof(struct ipts_report) == 4); ++ ++#endif /* _IPTS_PROTOCOL_PAYLOAD_H_ */ +diff --git a/drivers/input/touchscreen/ipts/protocol/responses.h b/drivers/input/touchscreen/ipts/protocol/responses.h +new file mode 100644 +index 0000000000000..27153d82a5d67 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/protocol/responses.h +@@ -0,0 +1,62 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_PROTOCOL_RESPONSES_H_ ++#define _IPTS_PROTOCOL_RESPONSES_H_ ++ ++#include ++#include ++ ++enum ipts_me_status { ++ IPTS_ME_STATUS_SUCCESS = 0, ++ IPTS_ME_STATUS_INVALID_PARAMS, ++ IPTS_ME_STATUS_ACCESS_DENIED, ++ IPTS_ME_STATUS_CMD_SIZE_ERROR, ++ IPTS_ME_STATUS_NOT_READY, ++ IPTS_ME_STATUS_REQUEST_OUTSTANDING, ++ IPTS_ME_STATUS_NO_SENSOR_FOUND, ++ IPTS_ME_STATUS_OUT_OF_MEMORY, ++ IPTS_ME_STATUS_INTERNAL_ERROR, ++ IPTS_ME_STATUS_SENSOR_DISABLED, ++ IPTS_ME_STATUS_COMPAT_CHECK_FAIL, ++ IPTS_ME_STATUS_SENSOR_EXPECTED_RESET, ++ IPTS_ME_STATUS_SENSOR_UNEXPECTED_RESET, ++ IPTS_ME_STATUS_RESET_FAILED, ++ IPTS_ME_STATUS_TIMEOUT, ++ IPTS_ME_STATUS_TEST_MODE_FAIL, ++ IPTS_ME_STATUS_SENSOR_FAIL_FATAL, ++ IPTS_ME_STATUS_SENSOR_FAIL_NONFATAL, ++ IPTS_ME_STATUS_INVALID_DEVICE_CAPS, ++ IPTS_ME_STATUS_QUIESCE_IO_IN_PROGRESS, ++ IPTS_ME_STATUS_MAX ++}; ++ ++struct ipts_device_info { ++ u16 vendor_id; ++ u16 device_id; ++ u32 hw_rev; ++ u32 fw_rev; ++ ++ /* Required size of one touch data buffer */ ++ u32 data_size; ++ ++ /* Required size of one feedback buffer */ ++ u32 feedback_size; ++ u8 reserved[24]; ++} __packed; ++ ++/* ++ * Responses are sent from the ME to the host, reacting to a command. ++ */ ++struct ipts_response { ++ u32 code; ++ u32 status; ++ union { ++ struct ipts_device_info device_info; ++ u8 reserved[80]; ++ } data; ++} __packed; ++ ++static_assert(sizeof(struct ipts_device_info) == 44); ++static_assert(sizeof(struct ipts_response) == 88); ++ ++#endif /* _IPTS_PROTOCOL_RESPONSES_H_ */ +diff --git a/drivers/input/touchscreen/ipts/protocol/singletouch.h b/drivers/input/touchscreen/ipts/protocol/singletouch.h +new file mode 100644 +index 0000000000000..bf9912ee2af4c +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/protocol/singletouch.h +@@ -0,0 +1,17 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_PROTOCOL_SINGLETOUCH_H_ ++#define _IPTS_PROTOCOL_SINGLETOUCH_H_ ++ ++#include ++#include ++ ++struct ipts_singletouch_report { ++ u8 touch; ++ u16 x; ++ u16 y; ++} __packed; ++ ++static_assert(sizeof(struct ipts_singletouch_report) == 5); ++ ++#endif /* _IPTS_PROTOCOL_SINGLETOUCH_H_ */ +diff --git a/drivers/input/touchscreen/ipts/protocol/stylus.h b/drivers/input/touchscreen/ipts/protocol/stylus.h +new file mode 100644 +index 0000000000000..950850b365dfb +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/protocol/stylus.h +@@ -0,0 +1,52 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_PROTOCOL_STYLUS_H_ ++#define _IPTS_PROTOCOL_STYLUS_H_ ++ ++#include ++#include ++ ++struct ipts_stylus_report { ++ u8 reports; ++ u8 reserved[3]; ++ u8 data[]; ++} __packed; ++ ++struct ipts_stylus_report_serial { ++ u8 reports; ++ u8 reserved[3]; ++ u32 serial; ++ u8 data[]; ++} __packed; ++ ++struct ipts_stylus_report_data { ++ u16 timestamp; ++ u16 mode; ++ u16 x; ++ u16 y; ++ u16 pressure; ++ u16 altitude; ++ u16 azimuth; ++ u16 reserved; ++} __packed; ++ ++struct ipts_stylus_report_data_no_tilt { ++ u8 reserved[4]; ++ u8 mode; ++ u16 x; ++ u16 y; ++ u16 pressure; ++ u8 reserved2; ++} __packed; ++ ++#define IPTS_STYLUS_REPORT_MODE_PROX BIT(0) ++#define IPTS_STYLUS_REPORT_MODE_TOUCH BIT(1) ++#define IPTS_STYLUS_REPORT_MODE_BUTTON BIT(2) ++#define IPTS_STYLUS_REPORT_MODE_ERASER BIT(3) ++ ++static_assert(sizeof(struct ipts_stylus_report) == 4); ++static_assert(sizeof(struct ipts_stylus_report_serial) == 8); ++static_assert(sizeof(struct ipts_stylus_report_data) == 16); ++static_assert(sizeof(struct ipts_stylus_report_data_no_tilt) == 12); ++ ++#endif /* _IPTS_PAYLOAD_STYLUS_H_ */ +diff --git a/drivers/input/touchscreen/ipts/receiver.c b/drivers/input/touchscreen/ipts/receiver.c +new file mode 100644 +index 0000000000000..ab283994c3e5f +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/receiver.c +@@ -0,0 +1,265 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++#include ++ ++#include "context.h" ++#include "control.h" ++#include "data.h" ++#include "protocol/commands.h" ++#include "protocol/events.h" ++#include "protocol/responses.h" ++#include "resources.h" ++ ++static void ipts_receiver_handle_notify_dev_ready(struct ipts_context *ipts, ++ struct ipts_response *msg, int *cmd_status) ++{ ++ if (msg->status != IPTS_ME_STATUS_SENSOR_FAIL_NONFATAL && ++ msg->status != IPTS_ME_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "0x%08x failed - status = %d\n", ++ msg->code, msg->status); ++ return; ++ } ++ ++ *cmd_status = ipts_control_send(ipts, ++ IPTS_CMD(GET_DEVICE_INFO), NULL, 0); ++} ++ ++static void ipts_receiver_handle_get_device_info(struct ipts_context *ipts, ++ struct ipts_response *msg, int *cmd_status) ++{ ++ if (msg->status != IPTS_ME_STATUS_COMPAT_CHECK_FAIL && ++ msg->status != IPTS_ME_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "0x%08x failed - status = %d\n", ++ msg->code, msg->status); ++ return; ++ } ++ ++ memcpy(&ipts->device_info, &msg->data.device_info, ++ sizeof(struct ipts_device_info)); ++ ++ dev_info(ipts->dev, "Device %04hX:%04hX found\n", ++ ipts->device_info.vendor_id, ++ ipts->device_info.device_id); ++ ++ if (ipts_data_init(ipts)) ++ return; ++ ++ *cmd_status = ipts_control_send(ipts, ++ IPTS_CMD(CLEAR_MEM_WINDOW), NULL, 0); ++} ++ ++static void ipts_receiver_handle_clear_mem_window(struct ipts_context *ipts, ++ struct ipts_response *msg, int *cmd_status, int *ret) ++{ ++ struct ipts_set_mode_cmd sensor_mode_cmd; ++ ++ if (msg->status != IPTS_ME_STATUS_TIMEOUT && ++ msg->status != IPTS_ME_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "0x%08x failed - status = %d\n", ++ msg->code, msg->status); ++ return; ++ } ++ ++ if (ipts->status == IPTS_HOST_STATUS_STOPPING) ++ return; ++ ++ if (ipts_resources_init(ipts)) ++ return; ++ ++ ipts->status = IPTS_HOST_STATUS_RESOURCE_READY; ++ ++ memset(&sensor_mode_cmd, 0, sizeof(struct ipts_set_mode_cmd)); ++ sensor_mode_cmd.sensor_mode = ipts->mode; ++ ++ *cmd_status = ipts_control_send(ipts, IPTS_CMD(SET_MODE), ++ &sensor_mode_cmd, sizeof(struct ipts_set_mode_cmd)); ++} ++ ++static void ipts_receiver_handle_set_mode(struct ipts_context *ipts, ++ struct ipts_response *msg, int *cmd_status) ++{ ++ int i; ++ struct ipts_set_mem_window_cmd cmd; ++ ++ if (msg->status != IPTS_ME_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "0x%08x failed - status = %d\n", ++ msg->code, msg->status); ++ return; ++ } ++ ++ memset(&cmd, 0, sizeof(struct ipts_set_mem_window_cmd)); ++ ++ for (i = 0; i < 16; i++) { ++ cmd.data_buffer_addr_lower[i] = ++ lower_32_bits(ipts->data[i].dma_address); ++ ++ cmd.data_buffer_addr_upper[i] = ++ upper_32_bits(ipts->data[i].dma_address); ++ ++ cmd.feedback_buffer_addr_lower[i] = ++ lower_32_bits(ipts->feedback[i].dma_address); ++ ++ cmd.feedback_buffer_addr_upper[i] = ++ upper_32_bits(ipts->feedback[i].dma_address); ++ } ++ ++ cmd.workqueue_addr_lower = lower_32_bits(ipts->workqueue.dma_address); ++ cmd.workqueue_addr_upper = upper_32_bits(ipts->workqueue.dma_address); ++ ++ cmd.doorbell_addr_lower = lower_32_bits(ipts->doorbell.dma_address); ++ cmd.doorbell_addr_upper = upper_32_bits(ipts->doorbell.dma_address); ++ ++ cmd.host2me_addr_lower = lower_32_bits(ipts->host2me.dma_address); ++ cmd.host2me_addr_upper = upper_32_bits(ipts->host2me.dma_address); ++ cmd.host2me_size = ipts->device_info.data_size; ++ ++ cmd.workqueue_size = 8192; ++ cmd.workqueue_item_size = 16; ++ ++ *cmd_status = ipts_control_send(ipts, IPTS_CMD(SET_MEM_WINDOW), ++ &cmd, sizeof(struct ipts_set_mem_window_cmd)); ++} ++ ++static void ipts_receiver_handle_set_mem_window(struct ipts_context *ipts, ++ struct ipts_response *msg, int *cmd_status) ++{ ++ if (msg->status != IPTS_ME_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "0x%08x failed - status = %d\n", ++ msg->code, msg->status); ++ return; ++ } ++ ++ *cmd_status = ipts_control_send(ipts, ++ IPTS_CMD(READY_FOR_DATA), NULL, 0); ++ if (*cmd_status) ++ return; ++ ++ ipts->status = IPTS_HOST_STATUS_STARTED; ++ dev_info(ipts->dev, "IPTS enabled\n"); ++} ++ ++static void ipts_receiver_handle_ready_for_data(struct ipts_context *ipts, ++ struct ipts_response *msg) ++{ ++ if (msg->status != IPTS_ME_STATUS_SENSOR_DISABLED && ++ msg->status != IPTS_ME_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "0x%08x failed - status = %d\n", ++ msg->code, msg->status); ++ return; ++ } ++ ++ if (ipts->mode != IPTS_SENSOR_MODE_SINGLETOUCH || ++ ipts->status != IPTS_HOST_STATUS_STARTED) ++ return; ++ ++ // Increment the doorbell manually to indicate that a new buffer ++ // filled with touch data is available ++ *((u32 *)ipts->doorbell.address) += 1; ++} ++ ++static void ipts_recever_handle_feedback(struct ipts_context *ipts, ++ struct ipts_response *msg, int *cmd_status) ++{ ++ if (msg->status != IPTS_ME_STATUS_COMPAT_CHECK_FAIL && ++ msg->status != IPTS_ME_STATUS_SUCCESS && ++ msg->status != IPTS_ME_STATUS_INVALID_PARAMS) { ++ dev_err(ipts->dev, "0x%08x failed - status = %d\n", ++ msg->code, msg->status); ++ return; ++ } ++ ++ if (ipts->mode != IPTS_SENSOR_MODE_SINGLETOUCH) ++ return; ++ ++ *cmd_status = ipts_control_send(ipts, ++ IPTS_CMD(READY_FOR_DATA), NULL, 0); ++} ++ ++static void ipts_receiver_handle_quiesce_io(struct ipts_context *ipts, ++ struct ipts_response *msg) ++{ ++ if (msg->status != IPTS_ME_STATUS_SUCCESS) { ++ dev_err(ipts->dev, "0x%08x failed - status = %d\n", ++ msg->code, msg->status); ++ return; ++ } ++ ++ if (ipts->status == IPTS_HOST_STATUS_RESTARTING) ++ ipts_control_start(ipts); ++} ++ ++ ++static int ipts_receiver_handle_response(struct ipts_context *ipts, ++ struct ipts_response *msg, u32 msg_len) ++{ ++ int cmd_status = 0; ++ int ret = 0; ++ ++ switch (msg->code) { ++ case IPTS_RSP(NOTIFY_DEV_READY): ++ ipts_receiver_handle_notify_dev_ready(ipts, msg, &cmd_status); ++ break; ++ case IPTS_RSP(GET_DEVICE_INFO): ++ ipts_receiver_handle_get_device_info(ipts, msg, &cmd_status); ++ break; ++ case IPTS_RSP(CLEAR_MEM_WINDOW): ++ ipts_receiver_handle_clear_mem_window(ipts, msg, ++ &cmd_status, &ret); ++ break; ++ case IPTS_RSP(SET_MODE): ++ ipts_receiver_handle_set_mode(ipts, msg, &cmd_status); ++ break; ++ case IPTS_RSP(SET_MEM_WINDOW): ++ ipts_receiver_handle_set_mem_window(ipts, msg, &cmd_status); ++ break; ++ case IPTS_RSP(READY_FOR_DATA): ++ ipts_receiver_handle_ready_for_data(ipts, msg); ++ break; ++ case IPTS_RSP(FEEDBACK): ++ ipts_recever_handle_feedback(ipts, msg, &cmd_status); ++ break; ++ case IPTS_RSP(QUIESCE_IO): ++ ipts_receiver_handle_quiesce_io(ipts, msg); ++ break; ++ } ++ ++ if (ipts->status == IPTS_HOST_STATUS_STOPPING) ++ return 0; ++ ++ if (msg->status == IPTS_ME_STATUS_SENSOR_UNEXPECTED_RESET || ++ msg->status == IPTS_ME_STATUS_SENSOR_EXPECTED_RESET) { ++ dev_info(ipts->dev, "Sensor has been reset: %d\n", msg->status); ++ ipts_control_restart(ipts); ++ } ++ ++ if (cmd_status) ++ ipts_control_restart(ipts); ++ ++ return ret; ++} ++ ++int ipts_receiver_loop(void *data) ++{ ++ u32 msg_len; ++ struct ipts_context *ipts; ++ struct ipts_response msg; ++ ++ ipts = (struct ipts_context *)data; ++ dev_info(ipts->dev, "Starting receive loop\n"); ++ ++ while (!kthread_should_stop()) { ++ msg_len = mei_cldev_recv(ipts->client_dev, ++ (u8 *)&msg, sizeof(msg)); ++ ++ if (msg_len <= 0) { ++ dev_err(ipts->dev, "Error in reading ME message\n"); ++ continue; ++ } ++ ++ if (ipts_receiver_handle_response(ipts, &msg, msg_len)) ++ dev_err(ipts->dev, "Error in handling ME message\n"); ++ } ++ ++ dev_info(ipts->dev, "Stopping receive loop\n"); ++ return 0; ++} +diff --git a/drivers/input/touchscreen/ipts/receiver.h b/drivers/input/touchscreen/ipts/receiver.h +new file mode 100644 +index 0000000000000..4d413a0abd4c5 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/receiver.h +@@ -0,0 +1,8 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_RECEIVER_H_ ++#define _IPTS_RECEIVER_H_ ++ ++int ipts_receiver_loop(void *data); ++ ++#endif /* _IPTS_RECEIVER_H_ */ +diff --git a/drivers/input/touchscreen/ipts/resources.c b/drivers/input/touchscreen/ipts/resources.c +new file mode 100644 +index 0000000000000..704db9fdd3fd4 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/resources.c +@@ -0,0 +1,131 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++#include ++ ++#include "context.h" ++ ++void ipts_resources_free(struct ipts_context *ipts) ++{ ++ int i; ++ u32 touch_buffer_size; ++ u32 feedback_buffer_size; ++ struct ipts_buffer_info *buffers; ++ ++ touch_buffer_size = ipts->device_info.data_size; ++ feedback_buffer_size = ipts->device_info.feedback_size; ++ ++ buffers = ipts->data; ++ for (i = 0; i < 16; i++) { ++ if (!buffers[i].address) ++ continue; ++ ++ dmam_free_coherent(ipts->dev, touch_buffer_size, ++ buffers[i].address, buffers[i].dma_address); ++ ++ buffers[i].address = 0; ++ buffers[i].dma_address = 0; ++ } ++ ++ buffers = ipts->feedback; ++ for (i = 0; i < 16; i++) { ++ if (!buffers[i].address) ++ continue; ++ ++ dmam_free_coherent(ipts->dev, feedback_buffer_size, ++ buffers[i].address, buffers[i].dma_address); ++ ++ buffers[i].address = 0; ++ buffers[i].dma_address = 0; ++ } ++ ++ if (ipts->doorbell.address) { ++ dmam_free_coherent(ipts->dev, sizeof(u32), ++ ipts->doorbell.address, ++ ipts->doorbell.dma_address); ++ ++ ipts->doorbell.address = 0; ++ ipts->doorbell.dma_address = 0; ++ } ++ ++ if (ipts->workqueue.address) { ++ dmam_free_coherent(ipts->dev, sizeof(u32), ++ ipts->workqueue.address, ++ ipts->workqueue.dma_address); ++ ++ ipts->workqueue.address = 0; ++ ipts->workqueue.dma_address = 0; ++ } ++ ++ if (ipts->host2me.address) { ++ dmam_free_coherent(ipts->dev, touch_buffer_size, ++ ipts->host2me.address, ++ ipts->host2me.dma_address); ++ ++ ipts->host2me.address = 0; ++ ipts->host2me.dma_address = 0; ++ } ++} ++ ++int ipts_resources_init(struct ipts_context *ipts) ++{ ++ int i; ++ u32 touch_buffer_size; ++ u32 feedback_buffer_size; ++ struct ipts_buffer_info *buffers; ++ ++ touch_buffer_size = ipts->device_info.data_size; ++ feedback_buffer_size = ipts->device_info.feedback_size; ++ ++ buffers = ipts->data; ++ for (i = 0; i < 16; i++) { ++ buffers[i].address = dmam_alloc_coherent(ipts->dev, ++ touch_buffer_size, ++ &buffers[i].dma_address, ++ GFP_ATOMIC | __GFP_ZERO); ++ ++ if (!buffers[i].address) ++ goto release_resources; ++ } ++ ++ buffers = ipts->feedback; ++ for (i = 0; i < 16; i++) { ++ buffers[i].address = dmam_alloc_coherent(ipts->dev, ++ feedback_buffer_size, ++ &buffers[i].dma_address, ++ GFP_ATOMIC | __GFP_ZERO); ++ ++ if (!buffers[i].address) ++ goto release_resources; ++ } ++ ++ ipts->doorbell.address = dmam_alloc_coherent(ipts->dev, ++ sizeof(u32), ++ &ipts->doorbell.dma_address, ++ GFP_ATOMIC | __GFP_ZERO); ++ ++ if (!ipts->doorbell.address) ++ goto release_resources; ++ ++ ipts->workqueue.address = dmam_alloc_coherent(ipts->dev, ++ sizeof(u32), ++ &ipts->workqueue.dma_address, ++ GFP_ATOMIC | __GFP_ZERO); ++ ++ if (!ipts->workqueue.address) ++ goto release_resources; ++ ++ ipts->host2me.address = dmam_alloc_coherent(ipts->dev, ++ touch_buffer_size, ++ &ipts->host2me.dma_address, ++ GFP_ATOMIC | __GFP_ZERO); ++ ++ if (!ipts->workqueue.address) ++ goto release_resources; ++ ++ return 0; ++ ++release_resources: ++ ++ ipts_resources_free(ipts); ++ return -ENOMEM; ++} +diff --git a/drivers/input/touchscreen/ipts/resources.h b/drivers/input/touchscreen/ipts/resources.h +new file mode 100644 +index 0000000000000..cf9807b0dbe62 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/resources.h +@@ -0,0 +1,11 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_RESOURCES_H_ ++#define _IPTS_RESOURCES_H_ ++ ++#include "context.h" ++ ++int ipts_resources_init(struct ipts_context *ipts); ++void ipts_resources_free(struct ipts_context *ipts); ++ ++#endif /* _IPTS_RESOURCES_H_ */ +diff --git a/drivers/input/touchscreen/ipts/singletouch.c b/drivers/input/touchscreen/ipts/singletouch.c +new file mode 100644 +index 0000000000000..ed70444f649c4 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/singletouch.c +@@ -0,0 +1,64 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++#include ++#include ++ ++#include "context.h" ++#include "protocol/data.h" ++#include "protocol/singletouch.h" ++ ++void ipts_singletouch_handle_input(struct ipts_context *ipts, ++ struct ipts_data *data) ++{ ++ struct ipts_singletouch_report *report = ++ (struct ipts_singletouch_report *)&data->data[1]; ++ ++ input_report_key(ipts->singletouch, BTN_TOUCH, report->touch); ++ input_report_abs(ipts->singletouch, ABS_X, report->x); ++ input_report_abs(ipts->singletouch, ABS_Y, report->y); ++ ++ input_sync(ipts->singletouch); ++} ++ ++int ipts_singletouch_init(struct ipts_context *ipts) ++{ ++ int ret; ++ ++ ipts->singletouch = input_allocate_device(); ++ if (!ipts->singletouch) ++ return -ENOMEM; ++ ++ __set_bit(INPUT_PROP_DIRECT, ipts->singletouch->propbit); ++ ++ input_set_capability(ipts->singletouch, EV_KEY, BTN_TOUCH); ++ input_set_abs_params(ipts->singletouch, ABS_X, 0, 32767, 0, 0); ++ input_abs_set_res(ipts->singletouch, ABS_X, 112); ++ input_set_abs_params(ipts->singletouch, ABS_Y, 0, 32767, 0, 0); ++ input_abs_set_res(ipts->singletouch, ABS_Y, 199); ++ ++ ipts->singletouch->id.bustype = BUS_MEI; ++ ipts->singletouch->id.vendor = ipts->device_info.vendor_id; ++ ipts->singletouch->id.product = ipts->device_info.device_id; ++ ipts->singletouch->id.version = ipts->device_info.fw_rev; ++ ++ ipts->singletouch->phys = "heci3"; ++ ipts->singletouch->name = "IPTS Singletouch"; ++ ++ ret = input_register_device(ipts->singletouch); ++ if (ret) { ++ dev_err(ipts->dev, "Cannot register input device: %s (%d)\n", ++ ipts->singletouch->name, ret); ++ input_free_device(ipts->singletouch); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++void ipts_singletouch_free(struct ipts_context *ipts) ++{ ++ if (!ipts->singletouch) ++ return; ++ ++ input_unregister_device(ipts->singletouch); ++} +diff --git a/drivers/input/touchscreen/ipts/singletouch.h b/drivers/input/touchscreen/ipts/singletouch.h +new file mode 100644 +index 0000000000000..53207497a4628 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/singletouch.h +@@ -0,0 +1,14 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_SINGLETOUCH_H_ ++#define _IPTS_SINGLETOUCH_H_ ++ ++#include "context.h" ++#include "protocol/data.h" ++ ++void ipts_singletouch_handle_input(struct ipts_context *ipts, ++ struct ipts_data *data); ++int ipts_singletouch_init(struct ipts_context *ipts); ++void ipts_singletouch_free(struct ipts_context *ipts); ++ ++#endif /* _IPTS_SINGLETOUCH_H_ */ +diff --git a/drivers/input/touchscreen/ipts/stylus.c b/drivers/input/touchscreen/ipts/stylus.c +new file mode 100644 +index 0000000000000..987fa756fec33 +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/stylus.c +@@ -0,0 +1,179 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++#include ++#include ++ ++#include "context.h" ++#include "math.h" ++#include "protocol/payload.h" ++#include "protocol/stylus.h" ++ ++static void ipts_stylus_handle_stylus_data(struct ipts_context *ipts, ++ struct ipts_stylus_report_data *data) ++{ ++ u8 prox = data->mode & IPTS_STYLUS_REPORT_MODE_PROX; ++ u8 touch = data->mode & IPTS_STYLUS_REPORT_MODE_TOUCH; ++ u8 button = data->mode & IPTS_STYLUS_REPORT_MODE_BUTTON; ++ u8 rubber = data->mode & IPTS_STYLUS_REPORT_MODE_ERASER; ++ ++ s32 tx = 0; ++ s32 ty = 0; ++ ++ // avoid unnecessary computations ++ // altitude is zero if stylus does not touch the screen ++ if (data->altitude) { ++ ipts_math_altitude_azimuth_to_tilt(data->altitude, ++ data->azimuth, &tx, &ty); ++ } ++ ++ input_report_key(ipts->stylus, BTN_TOUCH, touch); ++ input_report_key(ipts->stylus, BTN_TOOL_PEN, prox && !rubber); ++ input_report_key(ipts->stylus, BTN_TOOL_RUBBER, prox && rubber); ++ input_report_key(ipts->stylus, BTN_STYLUS, button); ++ ++ input_report_abs(ipts->stylus, ABS_X, data->x); ++ input_report_abs(ipts->stylus, ABS_Y, data->y); ++ input_report_abs(ipts->stylus, ABS_PRESSURE, data->pressure); ++ input_report_abs(ipts->stylus, ABS_MISC, data->timestamp); ++ ++ input_report_abs(ipts->stylus, ABS_TILT_X, tx); ++ input_report_abs(ipts->stylus, ABS_TILT_Y, ty); ++ ++ input_sync(ipts->stylus); ++} ++ ++static void ipts_stylus_handle_report_tilt_serial(struct ipts_context *ipts, ++ struct ipts_report *report) ++{ ++ int i; ++ struct ipts_stylus_report_serial *stylus_report; ++ struct ipts_stylus_report_data *data; ++ ++ stylus_report = (struct ipts_stylus_report_serial *)report->data; ++ data = (struct ipts_stylus_report_data *)stylus_report->data; ++ ++ // TODO: Track serial number and support multiple styli ++ ++ for (i = 0; i < stylus_report->reports; i++) ++ ipts_stylus_handle_stylus_data(ipts, &data[i]); ++} ++ ++static void ipts_stylus_handle_report_tilt(struct ipts_context *ipts, ++ struct ipts_report *report) ++{ ++ int i; ++ struct ipts_stylus_report *stylus_report; ++ struct ipts_stylus_report_data *data; ++ ++ stylus_report = (struct ipts_stylus_report *)report->data; ++ data = (struct ipts_stylus_report_data *)stylus_report->data; ++ ++ for (i = 0; i < stylus_report->reports; i++) ++ ipts_stylus_handle_stylus_data(ipts, &data[i]); ++} ++ ++static void ipts_stylus_handle_report_no_tilt(struct ipts_context *ipts, ++ struct ipts_report *report) ++{ ++ int i; ++ struct ipts_stylus_report_serial *stylus_report; ++ struct ipts_stylus_report_data_no_tilt *data; ++ struct ipts_stylus_report_data new_data; ++ ++ stylus_report = (struct ipts_stylus_report_serial *)report->data; ++ data = (struct ipts_stylus_report_data_no_tilt *)stylus_report->data; ++ ++ for (i = 0; i < stylus_report->reports; i++) { ++ new_data.mode = data[i].mode; ++ new_data.x = data[i].x; ++ new_data.y = data[i].y; ++ new_data.pressure = data[i].pressure * 4; ++ new_data.altitude = 0; ++ new_data.azimuth = 0; ++ new_data.timestamp = 0; ++ ++ ipts_stylus_handle_stylus_data(ipts, &new_data); ++ } ++} ++ ++void ipts_stylus_handle_input(struct ipts_context *ipts, ++ struct ipts_payload_frame *frame) ++{ ++ int size; ++ struct ipts_report *report; ++ ++ size = 0; ++ ++ while (size < frame->size) { ++ report = (struct ipts_report *)&frame->data[size]; ++ size += sizeof(struct ipts_report) + report->size; ++ ++ switch (report->type) { ++ case IPTS_REPORT_TYPE_STYLUS_NO_TILT: ++ ipts_stylus_handle_report_no_tilt(ipts, report); ++ break; ++ case IPTS_REPORT_TYPE_STYLUS_TILT: ++ ipts_stylus_handle_report_tilt(ipts, report); ++ break; ++ case IPTS_REPORT_TYPE_STYLUS_TILT_SERIAL: ++ ipts_stylus_handle_report_tilt_serial(ipts, report); ++ break; ++ default: ++ // ignored ++ break; ++ } ++ } ++} ++ ++int ipts_stylus_init(struct ipts_context *ipts) ++{ ++ int ret; ++ ++ ipts->stylus = input_allocate_device(); ++ if (!ipts->stylus) ++ return -ENOMEM; ++ ++ __set_bit(INPUT_PROP_DIRECT, ipts->stylus->propbit); ++ __set_bit(INPUT_PROP_POINTER, ipts->stylus->propbit); ++ ++ input_set_abs_params(ipts->stylus, ABS_X, 0, 9600, 0, 0); ++ input_abs_set_res(ipts->stylus, ABS_X, 34); ++ input_set_abs_params(ipts->stylus, ABS_Y, 0, 7200, 0, 0); ++ input_abs_set_res(ipts->stylus, ABS_Y, 38); ++ input_set_abs_params(ipts->stylus, ABS_PRESSURE, 0, 4096, 0, 0); ++ input_set_abs_params(ipts->stylus, ABS_TILT_X, -9000, 9000, 0, 0); ++ input_abs_set_res(ipts->stylus, ABS_TILT_X, 5730); ++ input_set_abs_params(ipts->stylus, ABS_TILT_Y, -9000, 9000, 0, 0); ++ input_abs_set_res(ipts->stylus, ABS_TILT_Y, 5730); ++ input_set_abs_params(ipts->stylus, ABS_MISC, 0, 65535, 0, 0); ++ input_set_capability(ipts->stylus, EV_KEY, BTN_TOUCH); ++ input_set_capability(ipts->stylus, EV_KEY, BTN_STYLUS); ++ input_set_capability(ipts->stylus, EV_KEY, BTN_TOOL_PEN); ++ input_set_capability(ipts->stylus, EV_KEY, BTN_TOOL_RUBBER); ++ ++ ipts->stylus->id.bustype = BUS_MEI; ++ ipts->stylus->id.vendor = ipts->device_info.vendor_id; ++ ipts->stylus->id.product = ipts->device_info.device_id; ++ ipts->stylus->id.version = ipts->device_info.fw_rev; ++ ++ ipts->stylus->phys = "heci3"; ++ ipts->stylus->name = "IPTS Stylus"; ++ ++ ret = input_register_device(ipts->stylus); ++ if (ret) { ++ dev_err(ipts->dev, "Cannot register input device: %s (%d)\n", ++ ipts->stylus->name, ret); ++ input_free_device(ipts->stylus); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++void ipts_stylus_free(struct ipts_context *ipts) ++{ ++ if (!ipts->stylus) ++ return; ++ ++ input_unregister_device(ipts->stylus); ++} +diff --git a/drivers/input/touchscreen/ipts/stylus.h b/drivers/input/touchscreen/ipts/stylus.h +new file mode 100644 +index 0000000000000..5b93add1eac2d +--- /dev/null ++++ b/drivers/input/touchscreen/ipts/stylus.h +@@ -0,0 +1,14 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++ ++#ifndef _IPTS_STYLUS_H_ ++#define _IPTS_STYLUS_H_ ++ ++#include "context.h" ++#include "protocol/payload.h" ++ ++void ipts_stylus_handle_input(struct ipts_context *ipts, ++ struct ipts_payload_frame *frame); ++int ipts_stylus_init(struct ipts_context *ipts); ++void ipts_stylus_free(struct ipts_context *ipts); ++ ++#endif /* _IPTS_STYLUS_H_ */ +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h +index 7becfc768bbcc..3d0f9bfb7c494 100644 +--- a/drivers/misc/mei/hw-me-regs.h ++++ b/drivers/misc/mei/hw-me-regs.h +@@ -59,6 +59,7 @@ + + #define MEI_DEV_ID_SPT 0x9D3A /* Sunrise Point */ + #define MEI_DEV_ID_SPT_2 0x9D3B /* Sunrise Point 2 */ ++#define MEI_DEV_ID_SPT_4 0x9D3E /* Sunrise Point 4 (iTouch) */ + #define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */ + #define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */ + +@@ -90,6 +91,7 @@ + #define MEI_DEV_ID_CDF 0x18D3 /* Cedar Fork */ + + #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ ++#define MEI_DEV_ID_ICP_LP_4 0x34E4 /* Ice Lake Point LP 4 (iTouch) */ + + #define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */ + +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c +index 2a3f2fd5df507..78bc5dd5b4aba 100644 +--- a/drivers/misc/mei/pci-me.c ++++ b/drivers/misc/mei/pci-me.c +@@ -68,6 +68,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { + + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_4, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)}, +@@ -94,6 +95,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { + {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP_4, MEI_ME_PCH12_CFG)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)}, +diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h +index 9a61c28ed3ae4..47fc20975245d 100644 +--- a/include/uapi/linux/input.h ++++ b/include/uapi/linux/input.h +@@ -271,6 +271,7 @@ struct input_mask { + #define BUS_RMI 0x1D + #define BUS_CEC 0x1E + #define BUS_INTEL_ISHTP 0x1F ++#define BUS_MEI 0x44 + + /* + * MT_TOOL types +-- +2.28.0 +