linux-surface/patches/5.7/0004-surface-sam.patch
Maximilian Luz 441a362da9
Update v5.7 patches
Changes:
- SAM
  - Properly re-enable events after hibernation.
  - Other bugfixes.
  - Continued work on internal documentation.

- mwifiex
  - Fix bug causing network connection failure on certain networks.

Links:
- SAM: 735a01f74b
- mwifiex: https://lkml.org/lkml/2020/8/27/642
- kernel: d9aab7bed9
2020-08-28 17:12:20 +02:00

15674 lines
443 KiB
Diff

From a5d350e1206e570a6483ae77f4b7cd2d0af62225 Mon Sep 17 00:00:00 2001
From: Maximilian Luz <luzmaximilian@gmail.com>
Date: Mon, 17 Aug 2020 01:23:20 +0200
Subject: [PATCH 4/6] surface-sam
---
Documentation/driver-api/index.rst | 1 +
Documentation/driver-api/ssam/index.rst | 69 +
drivers/misc/Kconfig | 1 +
drivers/misc/Makefile | 1 +
drivers/misc/surface_sam/Kconfig | 46 +
drivers/misc/surface_sam/Makefile | 14 +
drivers/misc/surface_sam/bus.c | 394 +++
drivers/misc/surface_sam/bus.h | 14 +
drivers/misc/surface_sam/clients/Kconfig | 121 +
drivers/misc/surface_sam/clients/Makefile | 11 +
.../surface_sam/clients/surface_sam_debugfs.c | 270 ++
.../clients/surface_sam_device_hub.c | 582 ++++
.../surface_sam/clients/surface_sam_dtx.c | 582 ++++
.../surface_sam/clients/surface_sam_hps.c | 1287 +++++++++
.../surface_sam/clients/surface_sam_san.c | 930 +++++++
.../surface_sam/clients/surface_sam_san.h | 30 +
.../clients/surface_sam_sid_perfmode.c | 194 ++
.../clients/surface_sam_sid_power.c | 1112 ++++++++
.../surface_sam/clients/surface_sam_sid_vhf.c | 500 ++++
.../surface_sam/clients/surface_sam_vhf.c | 336 +++
drivers/misc/surface_sam/controller.c | 2384 +++++++++++++++++
drivers/misc/surface_sam/controller.h | 275 ++
drivers/misc/surface_sam/core.c | 764 ++++++
drivers/misc/surface_sam/ssam_trace.h | 619 +++++
drivers/misc/surface_sam/ssh_msgb.h | 196 ++
drivers/misc/surface_sam/ssh_packet_layer.c | 1780 ++++++++++++
drivers/misc/surface_sam/ssh_packet_layer.h | 125 +
drivers/misc/surface_sam/ssh_parser.c | 215 ++
drivers/misc/surface_sam/ssh_parser.h | 151 ++
drivers/misc/surface_sam/ssh_protocol.h | 102 +
drivers/misc/surface_sam/ssh_request_layer.c | 1100 ++++++++
drivers/misc/surface_sam/ssh_request_layer.h | 93 +
include/linux/mod_devicetable.h | 17 +
include/linux/surface_aggregator_module.h | 1006 +++++++
scripts/mod/devicetable-offsets.c | 7 +
scripts/mod/file2alias.c | 22 +
36 files changed, 15351 insertions(+)
create mode 100644 Documentation/driver-api/ssam/index.rst
create mode 100644 drivers/misc/surface_sam/Kconfig
create mode 100644 drivers/misc/surface_sam/Makefile
create mode 100644 drivers/misc/surface_sam/bus.c
create mode 100644 drivers/misc/surface_sam/bus.h
create mode 100644 drivers/misc/surface_sam/clients/Kconfig
create mode 100644 drivers/misc/surface_sam/clients/Makefile
create mode 100644 drivers/misc/surface_sam/clients/surface_sam_debugfs.c
create mode 100644 drivers/misc/surface_sam/clients/surface_sam_device_hub.c
create mode 100644 drivers/misc/surface_sam/clients/surface_sam_dtx.c
create mode 100644 drivers/misc/surface_sam/clients/surface_sam_hps.c
create mode 100644 drivers/misc/surface_sam/clients/surface_sam_san.c
create mode 100644 drivers/misc/surface_sam/clients/surface_sam_san.h
create mode 100644 drivers/misc/surface_sam/clients/surface_sam_sid_perfmode.c
create mode 100644 drivers/misc/surface_sam/clients/surface_sam_sid_power.c
create mode 100644 drivers/misc/surface_sam/clients/surface_sam_sid_vhf.c
create mode 100644 drivers/misc/surface_sam/clients/surface_sam_vhf.c
create mode 100644 drivers/misc/surface_sam/controller.c
create mode 100644 drivers/misc/surface_sam/controller.h
create mode 100644 drivers/misc/surface_sam/core.c
create mode 100644 drivers/misc/surface_sam/ssam_trace.h
create mode 100644 drivers/misc/surface_sam/ssh_msgb.h
create mode 100644 drivers/misc/surface_sam/ssh_packet_layer.c
create mode 100644 drivers/misc/surface_sam/ssh_packet_layer.h
create mode 100644 drivers/misc/surface_sam/ssh_parser.c
create mode 100644 drivers/misc/surface_sam/ssh_parser.h
create mode 100644 drivers/misc/surface_sam/ssh_protocol.h
create mode 100644 drivers/misc/surface_sam/ssh_request_layer.c
create mode 100644 drivers/misc/surface_sam/ssh_request_layer.h
create mode 100644 include/linux/surface_aggregator_module.h
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index d4e78cb3ef4d5..5728d81c06626 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -97,6 +97,7 @@ available subsections can be seen below.
serial/index
sm501
smsc_ece1099
+ ssam/indx
switchtec
sync_file
vfio-mediated-device
diff --git a/Documentation/driver-api/ssam/index.rst b/Documentation/driver-api/ssam/index.rst
new file mode 100644
index 0000000000000..582ddaa91f2a6
--- /dev/null
+++ b/Documentation/driver-api/ssam/index.rst
@@ -0,0 +1,69 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Surface System Aggregator Module (SSAM)
+=======================================
+
+The Surface System Aggregator Module ...
+
+API
+---
+
+.. kernel-doc:: drivers/misc/surface_sam/bus.c
+ :export:
+
+.. kernel-doc:: drivers/misc/surface_sam/controller.c
+ :export:
+
+.. kernel-doc:: drivers/misc/surface_sam/core.c
+ :export:
+
+.. kernel-doc:: drivers/misc/surface_sam/ssh_packet_layer.c
+ :export:
+
+.. kernel-doc:: include/linux/surface_aggregator_module.h
+
+
+Internal
+--------
+
+.. kernel-doc:: drivers/misc/surface_sam/bus.c
+ :internal:
+
+.. kernel-doc:: drivers/misc/surface_sam/controller.h
+ :internal:
+
+.. kernel-doc:: drivers/misc/surface_sam/controller.c
+ :internal:
+
+.. kernel-doc:: drivers/misc/surface_sam/core.c
+ :internal:
+
+.. kernel-doc:: drivers/misc/surface_sam/ssh_msgb.h
+ :internal:
+
+.. kernel-doc:: drivers/misc/surface_sam/ssh_packet_layer.h
+ :internal:
+
+.. kernel-doc:: drivers/misc/surface_sam/ssh_packet_layer.c
+ :internal:
+
+.. kernel-doc:: drivers/misc/surface_sam/ssh_parser.h
+ :internal:
+
+.. kernel-doc:: drivers/misc/surface_sam/ssh_parser.c
+ :internal:
+
+.. kernel-doc:: drivers/misc/surface_sam/ssh_protocol.h
+ :internal:
+
+.. kernel-doc:: drivers/misc/surface_sam/ssh_request_layer.h
+ :internal:
+
+.. kernel-doc:: drivers/misc/surface_sam/ssh_request_layer.c
+ :internal:
+
+
+Internal trace helpers
+----------------------
+
+.. kernel-doc:: drivers/misc/surface_sam/ssam_trace.h
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 99e151475d8f1..34e0536b63aef 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -481,4 +481,5 @@ source "drivers/misc/ocxl/Kconfig"
source "drivers/misc/cardreader/Kconfig"
source "drivers/misc/habanalabs/Kconfig"
source "drivers/misc/uacce/Kconfig"
+source "drivers/misc/surface_sam/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 9abf2923d8315..274dc0e96e6ff 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -58,3 +58,4 @@ obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_HABANA_AI) += habanalabs/
obj-$(CONFIG_UACCE) += uacce/
obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
+obj-$(CONFIG_SURFACE_SAM) += surface_sam/
diff --git a/drivers/misc/surface_sam/Kconfig b/drivers/misc/surface_sam/Kconfig
new file mode 100644
index 0000000000000..ca560d91e2291
--- /dev/null
+++ b/drivers/misc/surface_sam/Kconfig
@@ -0,0 +1,46 @@
+menuconfig SURFACE_SAM
+ depends on ACPI
+ tristate "Microsoft Surface/System Aggregator Module and Platform Drivers"
+ help
+ Drivers for the Surface/System Aggregator Module (SAM) of Microsoft
+ Surface devices.
+
+ SAM is an embedded controller that provides access to various
+ functionalities on these devices, including battery status, keyboard
+ events (on the Laptops) and many more.
+
+ Say M/Y here if you have a Microsoft Surface device with a SAM device
+ (i.e. 5th generation or later).
+
+config SURFACE_SAM_SSH
+ tristate "Surface Serial Hub Driver"
+ depends on SURFACE_SAM
+ depends on SERIAL_DEV_BUS
+ select CRC_CCITT
+ default m
+ help
+ Surface Serial Hub driver for 5th generation (or later) Microsoft
+ Surface devices.
+
+ This is the base driver for the embedded serial controller found on
+ 5th generation (and later) Microsoft Surface devices (e.g. Book 2,
+ Laptop, Laptop 2, Pro 2017, Pro 6, ...). This driver itself only
+ provides access to the embedded controller (SAM) and subsequent
+ drivers are required for the respective functionalities.
+
+ If you have a 5th generation (or later) Microsoft Surface device, say
+ Y or M here.
+
+config SURFACE_SAM_SSH_ERROR_INJECTION
+ bool "Surface Serial Hub Error Injection Capabilities"
+ depends on SURFACE_SAM_SSH
+ depends on FUNCTION_ERROR_INJECTION
+ default n
+ help
+ Enable error injection capabilities for the Surface Serial Hub.
+ This is used to debug the driver, specifically the communication
+ interface. It is not required for normal use.
+
+ If you are not sure, say N here.
+
+source "drivers/misc/surface_sam/clients/Kconfig"
diff --git a/drivers/misc/surface_sam/Makefile b/drivers/misc/surface_sam/Makefile
new file mode 100644
index 0000000000000..0a07dd2297874
--- /dev/null
+++ b/drivers/misc/surface_sam/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+# For include/trace/define_trace.h to include ssam_trace.h
+CFLAGS_core.o = -I$(src)
+
+obj-$(CONFIG_SURFACE_SAM_SSH) += surface_sam_ssh.o
+obj-$(CONFIG_SURFACE_SAM_SSH) += clients/
+
+surface_sam_ssh-objs := core.o
+surface_sam_ssh-objs += ssh_parser.o
+surface_sam_ssh-objs += ssh_packet_layer.o
+surface_sam_ssh-objs += ssh_request_layer.o
+surface_sam_ssh-objs += controller.o
+surface_sam_ssh-objs += bus.o
diff --git a/drivers/misc/surface_sam/bus.c b/drivers/misc/surface_sam/bus.c
new file mode 100644
index 0000000000000..e2b1dbad3f190
--- /dev/null
+++ b/drivers/misc/surface_sam/bus.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/device.h>
+#include <linux/uuid.h>
+
+#include "bus.h"
+#include "controller.h"
+
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ return snprintf(buf, PAGE_SIZE - 1, "ssam:c%02Xt%02Xi%02xf%02X\n",
+ sdev->uid.category, sdev->uid.target,
+ sdev->uid.instance, sdev->uid.function);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *ssam_device_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ssam_device);
+
+static int ssam_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ return add_uevent_var(env, "MODALIAS=ssam:c%02Xt%02Xi%02xf%02X",
+ sdev->uid.category, sdev->uid.target,
+ sdev->uid.instance, sdev->uid.function);
+}
+
+static void ssam_device_release(struct device *dev)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ ssam_controller_put(sdev->ctrl);
+ kfree(sdev);
+}
+
+const struct device_type ssam_device_type = {
+ .name = "ssam_client",
+ .groups = ssam_device_groups,
+ .uevent = ssam_device_uevent,
+ .release = ssam_device_release,
+};
+EXPORT_SYMBOL_GPL(ssam_device_type);
+
+
+/**
+ * ssam_device_alloc() - Allocate and initialize a SSAM client device.
+ * @ctrl: The controller under which the device should be added.
+ * @uid: The UID of the device to be added.
+ *
+ * This function only creates a new client device. It still has to be added
+ * via ssam_device_add(). Refer to that function for more details.
+ */
+struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl,
+ struct ssam_device_uid uid)
+{
+ struct ssam_device *sdev;
+
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return NULL;
+
+ device_initialize(&sdev->dev);
+ sdev->dev.bus = &ssam_bus_type;
+ sdev->dev.type = &ssam_device_type;
+ sdev->dev.parent = ssam_controller_device(ctrl);
+ sdev->ctrl = ssam_controller_get(ctrl);
+ sdev->uid = uid;
+
+ dev_set_name(&sdev->dev, "%02x:%02x:%02x:%02x",
+ sdev->uid.category, sdev->uid.target, sdev->uid.instance,
+ sdev->uid.function);
+
+ return sdev;
+}
+EXPORT_SYMBOL_GPL(ssam_device_alloc);
+
+/**
+ * ssam_device_add() - Add a SSAM client device.
+ * @sdev: The SSAM client device to be added.
+ *
+ * Added client devices must be guaranteed to always have a valid and active
+ * controller. Thus, this function will fail with %-ENXIO if the controller of
+ * the device has not been initialized yet, has been suspended, or has been
+ * shut down.
+ *
+ * The caller of this function should ensure that the corresponding call to
+ * ssam_device_remove is issued before the controller is shut down. If the
+ * added device is a direct child of the controller device (default), it will
+ * be automatically removed when the controller is shut down.
+ *
+ * By default, the controller device will become the parent of the newly
+ * created client device. The parent may be changed before ssam_device_add is
+ * called, but care must be taken that a) the correct suspend/resume ordering
+ * is guaranteed and b) the client device does not oultive the controller,
+ * i.e. that the device is removed before the controller is being shut down.
+ * In case these guarantees have to be manually enforced, please refer to the
+ * ssam_client_link() and ssam_client_bind() functions, which are intended to
+ * set up device-links for this purpose.
+ */
+int ssam_device_add(struct ssam_device *sdev)
+{
+ int status;
+
+ /*
+ * Ensure that we can only add new devices to a controller if it has
+ * been started and is not going away soon. This works in combination
+ * with ssam_controller_remove_clients to ensure driver presence for the
+ * controller device, i.e. it ensures that the controller (sdev->ctrl)
+ * is always valid and can be used for requests as long as the client
+ * device we add here is registered as child under it. This essentially
+ * guarantees that the client driver can always expect the preconditions
+ * for functions like ssam_request_sync (controller has to be started
+ * and is not suspended) to hold and thus does not have to check for
+ * them.
+ *
+ * Note that for this to work, the controller has to be a parent device.
+ * If it is not a direct parent, care has to be taken that the device is
+ * removed via ssam_device_remove, as device_unregister does not remove
+ * child devices recursively.
+ */
+ ssam_controller_statelock(sdev->ctrl);
+
+ if (READ_ONCE(sdev->ctrl->state) != SSAM_CONTROLLER_STARTED) {
+ ssam_controller_stateunlock(sdev->ctrl);
+ return -ENXIO;
+ }
+
+ status = device_add(&sdev->dev);
+
+ ssam_controller_stateunlock(sdev->ctrl);
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_device_add);
+
+/**
+ * ssam_device_remove() - Remove a SSAM client device.
+ * @sdev: The device to remove.
+ *
+ * Removes and unregisters the provided SSAM client device.
+ */
+void ssam_device_remove(struct ssam_device *sdev)
+{
+ device_unregister(&sdev->dev);
+}
+EXPORT_SYMBOL_GPL(ssam_device_remove);
+
+
+/**
+ * ssam_device_id_compatible() - Check if a device ID matches a UID.
+ * @id: The device ID as potential match.
+ * @uid: The device UID matching against.
+ *
+ * Check if the given ID is a match for the given UID, i.e. if a device with
+ * the provided UID is compatible to the given ID following the match rules
+ * described in its &ssam_device_id.match_flags member.
+ */
+static inline bool ssam_device_id_compatible(const struct ssam_device_id *id,
+ struct ssam_device_uid uid)
+{
+ if (id->category != uid.category)
+ return false;
+
+ if ((id->match_flags & SSAM_MATCH_TARGET) && id->target != uid.target)
+ return false;
+
+ if ((id->match_flags & SSAM_MATCH_INSTANCE) && id->instance != uid.instance)
+ return false;
+
+ if ((id->match_flags & SSAM_MATCH_FUNCTION) && id->function != uid.function)
+ return false;
+
+ return true;
+}
+
+/**
+ * ssam_device_id_is_null() - Check if a device ID is null.
+ * @id: The device ID to check.
+ *
+ * Check if a given device ID is null, i.e. all zeros. Used to check for the
+ * end of ``MODULE_DEVICE_TABLE(ssam, ...)`` or similar lists.
+ */
+static inline bool ssam_device_id_is_null(const struct ssam_device_id *id)
+{
+ return id->match_flags == 0
+ && id->category == 0
+ && id->target == 0
+ && id->instance == 0
+ && id->function == 0
+ && id->driver_data == 0;
+}
+
+/**
+ * ssam_device_id_match() - Find the matching ID table entry for the given UID.
+ * @table: The table to search in.
+ * @uid: The UID to matched against the individual table entries.
+ *
+ * Find the first match for the provided device UID in the provided ID table
+ * and return it. Returns %NULL if no match could be found.
+ */
+const struct ssam_device_id *ssam_device_id_match(
+ const struct ssam_device_id *table,
+ const struct ssam_device_uid uid)
+{
+ const struct ssam_device_id *id;
+
+ for (id = table; !ssam_device_id_is_null(id); ++id)
+ if (ssam_device_id_compatible(id, uid))
+ return id;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(ssam_device_id_match);
+
+/**
+ * ssam_device_get_match() - Find and return the ID matching the device in the
+ * ID table of the bound driver.
+ * @dev: The device for which to get the matching ID table entry.
+ *
+ * Find the fist match for the UID of the device in the ID table of the
+ * currently bound driver and return it. Returns %NULL if the device does not
+ * have a driver bound to it, the driver does not have match_table (i.e. it is
+ * %NULL), or there is no match in the driver's match_table.
+ *
+ * This function essentially calls ssam_device_id_match() with the ID table of
+ * the bound device driver and the UID of the device.
+ */
+const struct ssam_device_id *ssam_device_get_match(
+ const struct ssam_device *dev)
+{
+ const struct ssam_device_driver *sdrv;
+
+ sdrv = to_ssam_device_driver(dev->dev.driver);
+ if (!sdrv)
+ return NULL;
+
+ if (!sdrv->match_table)
+ return NULL;
+
+ return ssam_device_id_match(sdrv->match_table, dev->uid);
+}
+EXPORT_SYMBOL_GPL(ssam_device_get_match);
+
+/**
+ * ssam_device_get_match_data() - Find the ID matching the device in hte
+ * ID table of the bound driver and return its ``driver_data`` member.
+ * @dev: The device for which to get the match data.
+ *
+ * Find the fist match for the UID of the device in the ID table of the
+ * corresponding driver and return its driver_data. Returns %NULL if the
+ * device does not have a driver bound to it, the driver does not have
+ * match_table (i.e. it is %NULL), there is no match in the driver's
+ * match_table, or the match does not have any driver_data.
+ *
+ * This function essentially calls ssam_device_get_match() and, if any match
+ * could be found, returns its &ssam_device_id.driver_data member.
+ */
+const void *ssam_device_get_match_data(const struct ssam_device *dev)
+{
+ const struct ssam_device_id *id;
+
+ id = ssam_device_get_match(dev);
+ if (!id)
+ return NULL;
+
+ return (const void *)id->driver_data;
+}
+EXPORT_SYMBOL_GPL(ssam_device_get_match_data);
+
+
+static int ssam_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct ssam_device_driver *sdrv = to_ssam_device_driver(drv);
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ if (!is_ssam_device(dev))
+ return 0;
+
+ return !!ssam_device_id_match(sdrv->match_table, sdev->uid);
+}
+
+static int ssam_bus_probe(struct device *dev)
+{
+ struct ssam_device_driver *sdrv = to_ssam_device_driver(dev->driver);
+
+ return sdrv->probe(to_ssam_device(dev));
+}
+
+static int ssam_bus_remove(struct device *dev)
+{
+ struct ssam_device_driver *sdrv = to_ssam_device_driver(dev->driver);
+
+ if (sdrv->remove)
+ sdrv->remove(to_ssam_device(dev));
+
+ return 0;
+}
+
+struct bus_type ssam_bus_type = {
+ .name = "ssam",
+ .match = ssam_bus_match,
+ .probe = ssam_bus_probe,
+ .remove = ssam_bus_remove,
+};
+EXPORT_SYMBOL_GPL(ssam_bus_type);
+
+
+/**
+ * __ssam_device_driver_register() - Register a SSAM device driver.
+ * @sdrv: The driver to register.
+ * @owner: The module owning the provided driver.
+ *
+ * Please refer to the ssam_device_driver_register() macro for the normal way
+ * to register a driver from inside its owning module.
+ */
+int __ssam_device_driver_register(struct ssam_device_driver *sdrv,
+ struct module *owner)
+{
+ sdrv->driver.owner = owner;
+ sdrv->driver.bus = &ssam_bus_type;
+
+ /* force drivers to async probe so I/O is possible in probe */
+ sdrv->driver.probe_type = PROBE_PREFER_ASYNCHRONOUS;
+
+ return driver_register(&sdrv->driver);
+}
+EXPORT_SYMBOL_GPL(__ssam_device_driver_register);
+
+/**
+ * ssam_device_driver_unregister - Unregister a SSAM device driver.
+ * @sdrv: The driver to unregister.
+ */
+void ssam_device_driver_unregister(struct ssam_device_driver *sdrv)
+{
+ driver_unregister(&sdrv->driver);
+}
+EXPORT_SYMBOL_GPL(ssam_device_driver_unregister);
+
+
+static int ssam_remove_device(struct device *dev, void *_data)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+
+ if (is_ssam_device(dev))
+ ssam_device_remove(sdev);
+
+ return 0;
+}
+
+/**
+ * ssam_controller_remove_clients() - Remove SSAM client devices registered as
+ * direct children under the given controller.
+ * @ctrl: The controller to remove all direct clients for.
+ *
+ * Remove all SSAM client devices registered as direct children under the
+ * given controller. Note that this only accounts for direct children ot the
+ * controller device. This does not take care of any client devices where the
+ * parent device has been manually set before calling ssam_device_add. Refer
+ * to ssam_device_add()/ssam_device_remove() for more details on those cases.
+ *
+ * To avoid new devices being added in parallel to this call, the main
+ * controller lock (not statelock) must be held during this (and if
+ * necessary, any subsequent de-initialization) call.
+ */
+void ssam_controller_remove_clients(struct ssam_controller *ctrl)
+{
+ struct device *dev = ssam_controller_device(ctrl);
+
+ device_for_each_child_reverse(dev, NULL, ssam_remove_device);
+}
+
+
+/**
+ * ssam_bus_register() - Register and set-up the SSAM client device bus.
+ */
+int ssam_bus_register(void)
+{
+ return bus_register(&ssam_bus_type);
+}
+
+/**
+ * ssam_bus_unregister() - Unregister the SSAM client device bus.
+ */
+void ssam_bus_unregister(void)
+{
+ return bus_unregister(&ssam_bus_type);
+}
diff --git a/drivers/misc/surface_sam/bus.h b/drivers/misc/surface_sam/bus.h
new file mode 100644
index 0000000000000..8b3ddf2100870
--- /dev/null
+++ b/drivers/misc/surface_sam/bus.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _SSAM_BUS_H
+#define _SSAM_BUS_H
+
+#include <linux/surface_aggregator_module.h>
+
+
+void ssam_controller_remove_clients(struct ssam_controller *ctrl);
+
+int ssam_bus_register(void);
+void ssam_bus_unregister(void);
+
+#endif /* _SSAM_BUS_H */
diff --git a/drivers/misc/surface_sam/clients/Kconfig b/drivers/misc/surface_sam/clients/Kconfig
new file mode 100644
index 0000000000000..189bf02e280d7
--- /dev/null
+++ b/drivers/misc/surface_sam/clients/Kconfig
@@ -0,0 +1,121 @@
+config SURFACE_SAM_DEBUGFS
+ tristate "Surface Serial Hub Debug Device"
+ depends on SURFACE_SAM_SSH
+ depends on DEBUG_FS
+ default n
+ help
+ Debug device for direct communication with the embedded controller
+ found on 5th generation (and later) Microsoft Surface devices (e.g.
+ Book 2, Laptop, Laptop 2, Pro 2017, Pro 6, ...) via debugfs.
+
+ If you are not sure, say N here.
+
+config SURFACE_SAM_DEVICE_HUB
+ tristate "Surface SAM Device Hub"
+ depends on SURFACE_SAM_SSH
+ default m
+ help
+ This driver acts as a device hub, providing virtual SAM client devices
+ used on the Surface devices to provide interfaces for the performance
+ mode, HID input devices on Surface Laptop 3 and Surface Book 3, and
+ battery and AC devices on 7th generation Surface devices.
+
+ Note that this module only provides the devices and acts as a sort of
+ registry for them. Both the device hub and the respective drivers for
+ the devices attached to the hub are required for full support.
+
+ If you are not sure, say M here.
+
+config SURFACE_SAM_SAN
+ tristate "Surface ACPI Notify Driver"
+ depends on SURFACE_SAM_SSH
+ default m
+ help
+ Surface ACPI Notify driver for 5th generation (or later) Microsoft
+ Surface devices.
+
+ This driver enables basic ACPI events and requests, such as battery
+ status requests/events, thermal events, lid status, and possibly more,
+ which would otherwise not work on these devices.
+
+ If you are not sure, say M here.
+
+config SURFACE_SAM_KBD
+ tristate "Surface Virtual HID Framework Driver"
+ depends on SURFACE_SAM_SSH
+ depends on HID
+ default m
+ help
+ Surface Virtual HID Framework driver for 5th generation (or later)
+ Microsoft Surface devices.
+
+ This driver provides support for the Microsoft Virtual HID framework,
+ which is required for keyboard support on the Surface Laptop 1 and 2.
+
+ If you are not sure, say M here.
+
+config SURFACE_SAM_DTX
+ tristate "Surface Detachment System (DTX) Driver"
+ depends on SURFACE_SAM_SSH
+ depends on INPUT
+ default m
+ help
+ Surface Detachment System (DTX) driver for the Microsoft Surface Book
+ 2. This driver provides support for proper detachment handling in
+ user-space, status-events relating to the base and support for
+ the safe-guard keeping the base attached when the discrete GPU
+ contained in it is running via the special /dev/surface-dtx device.
+
+ Also provides a standard input device to provide SW_TABLET_MODE events
+ upon device mode change.
+
+ If you are not sure, say M here.
+
+config SURFACE_SAM_HPS
+ tristate "Surface dGPU Hot-Plug System (dGPU-HPS) Driver"
+ depends on SURFACE_SAM_SSH
+ depends on SURFACE_SAM_SAN
+ depends on GPIO_SYSFS
+ default m
+ help
+ Driver to properly handle hot-plugging and explicit power-on/power-off
+ of the discrete GPU (dGPU) on the Surface Book 2 and 3.
+
+ If you are not sure, say M here.
+
+config SURFACE_SAM_PERFMODE
+ tristate "Surface Performance Mode Driver"
+ depends on SURFACE_SAM_SSH
+ depends on SYSFS
+ default m
+ help
+ This driver provides support for setting performance-modes on Surface
+ devices via the perf_mode sysfs attribute. Currently only supports the
+ Surface Book 2. Performance-modes directly influence the fan-profile
+ of the device, allowing to choose between higher performance or
+ quieter operation.
+
+ If you are not sure, say M here.
+
+config SURFACE_SAM_HID
+ tristate "Surface SAM HID Driver"
+ depends on SURFACE_SAM_SSH
+ depends on HID
+ default m
+ help
+ This driver provides support for HID devices connected via the Surface
+ SAM embedded controller. It provides support for keyboard and touchpad
+ on the Surface Laptop 3 models.
+
+ If you are not sure, say M here.
+
+config SURFACE_SAM_PSY
+ tristate "Surface SAM Battery/AC Driver"
+ depends on SURFACE_SAM_SSH
+ select POWER_SUPPLY
+ default m
+ help
+ This driver provides support for the battery and AC on 7th generation
+ Surface devices.
+
+ If you are not sure, say M here.
diff --git a/drivers/misc/surface_sam/clients/Makefile b/drivers/misc/surface_sam/clients/Makefile
new file mode 100644
index 0000000000000..1db9db2f86252
--- /dev/null
+++ b/drivers/misc/surface_sam/clients/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+obj-$(CONFIG_SURFACE_SAM_DEBUGFS) += surface_sam_debugfs.o
+obj-$(CONFIG_SURFACE_SAM_SAN) += surface_sam_san.o
+obj-$(CONFIG_SURFACE_SAM_KBD) += surface_sam_vhf.o
+obj-$(CONFIG_SURFACE_SAM_DTX) += surface_sam_dtx.o
+obj-$(CONFIG_SURFACE_SAM_HPS) += surface_sam_hps.o
+obj-$(CONFIG_SURFACE_SAM_PERFMODE) += surface_sam_sid_perfmode.o
+obj-$(CONFIG_SURFACE_SAM_HID) += surface_sam_sid_vhf.o
+obj-$(CONFIG_SURFACE_SAM_PSY) += surface_sam_sid_power.o
+obj-$(CONFIG_SURFACE_SAM_DEVICE_HUB) += surface_sam_device_hub.o
diff --git a/drivers/misc/surface_sam/clients/surface_sam_debugfs.c b/drivers/misc/surface_sam/clients/surface_sam_debugfs.c
new file mode 100644
index 0000000000000..9b7ffbe610b10
--- /dev/null
+++ b/drivers/misc/surface_sam/clients/surface_sam_debugfs.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/surface_aggregator_module.h>
+
+#define SSAM_DBGDEV_NAME "surface_sam_dbgdev"
+#define SSAM_DBGDEV_VERS 0x0100
+
+
+struct ssam_dbgdev_request {
+ __u8 target_category;
+ __u8 target_id;
+ __u8 command_id;
+ __u8 instance_id;
+ __u16 flags;
+ __s16 status;
+
+ struct {
+ __u8 __pad[6];
+ __u16 length;
+ const __u8 __user *data;
+ } payload;
+
+ struct {
+ __u8 __pad[6];
+ __u16 length;
+ __u8 __user *data;
+ } response;
+};
+
+#define SSAM_DBGDEV_IOCTL_GETVERSION _IOR(0xA5, 0, __u32)
+#define SSAM_DBGDEV_IOCTL_REQUEST _IOWR(0xA5, 1, struct ssam_dbgdev_request)
+
+
+struct ssam_dbgdev {
+ struct ssam_controller *ctrl;
+ struct dentry *dentry_dir;
+ struct dentry *dentry_dev;
+};
+
+
+static int ssam_dbgdev_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return nonseekable_open(inode, filp);
+}
+
+static long ssam_dbgdev_request(struct file *file, unsigned long arg)
+{
+ struct ssam_dbgdev *ddev = file->private_data;
+ struct ssam_dbgdev_request __user *r;
+ struct ssam_dbgdev_request rqst;
+ struct ssam_request spec;
+ struct ssam_response rsp;
+ u8 *pldbuf = NULL;
+ u8 *rspbuf = NULL;
+ int status = 0, ret = 0, tmp;
+
+ r = (struct ssam_dbgdev_request __user *)arg;
+ ret = copy_struct_from_user(&rqst, sizeof(rqst), r, sizeof(*r));
+ if (ret)
+ goto out;
+
+ // setup basic request fields
+ spec.target_category = rqst.target_category;
+ spec.target_id = rqst.target_id;
+ spec.command_id = rqst.command_id;
+ spec.instance_id = rqst.instance_id;
+ spec.flags = rqst.flags;
+ spec.length = rqst.payload.length;
+
+ rsp.capacity = rqst.response.length;
+ rsp.length = 0;
+
+ // get request payload from user-space
+ if (spec.length) {
+ if (!rqst.payload.data) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pldbuf = kzalloc(spec.length, GFP_KERNEL);
+ if (!pldbuf) {
+ status = -ENOMEM;
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (copy_from_user(pldbuf, rqst.payload.data, spec.length)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ spec.payload = pldbuf;
+
+ // allocate response buffer
+ if (rsp.capacity) {
+ if (!rqst.response.data) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ rspbuf = kzalloc(rsp.capacity, GFP_KERNEL);
+ if (!rspbuf) {
+ status = -ENOMEM;
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ rsp.pointer = rspbuf;
+
+ // perform request
+ status = ssam_request_sync(ddev->ctrl, &spec, &rsp);
+ if (status)
+ goto out;
+
+ // copy response to user-space
+ if (rsp.length) {
+ if (copy_to_user(rqst.response.data, rsp.pointer, rsp.length)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+out:
+ // always try to set response-length and status
+ tmp = put_user(rsp.length, &r->response.length);
+ if (!ret)
+ ret = tmp;
+
+ tmp = put_user(status, &r->status);
+ if (!ret)
+ ret = tmp;
+
+ // cleanup
+ if (pldbuf)
+ kfree(pldbuf);
+
+ if (rspbuf)
+ kfree(rspbuf);
+
+ return ret;
+}
+
+static long ssam_dbgdev_getversion(struct file *file, unsigned long arg)
+{
+ put_user(SSAM_DBGDEV_VERS, (u32 __user *)arg);
+ return 0;
+}
+
+static long ssam_dbgdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case SSAM_DBGDEV_IOCTL_GETVERSION:
+ return ssam_dbgdev_getversion(file, arg);
+
+ case SSAM_DBGDEV_IOCTL_REQUEST:
+ return ssam_dbgdev_request(file, arg);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+const struct file_operations ssam_dbgdev_fops = {
+ .owner = THIS_MODULE,
+ .open = ssam_dbgdev_open,
+ .unlocked_ioctl = ssam_dbgdev_ioctl,
+ .compat_ioctl = ssam_dbgdev_ioctl,
+ .llseek = noop_llseek,
+};
+
+static int ssam_dbgdev_probe(struct platform_device *pdev)
+{
+ struct ssam_dbgdev *ddev;
+ struct ssam_controller *ctrl;
+ int status;
+
+ status = ssam_client_bind(&pdev->dev, &ctrl);
+ if (status)
+ return status == -ENXIO ? -EPROBE_DEFER : status;
+
+ ddev = devm_kzalloc(&pdev->dev, sizeof(struct ssam_dbgdev), GFP_KERNEL);
+ if (!ddev)
+ return -ENOMEM;
+
+ ddev->ctrl = ctrl;
+
+ ddev->dentry_dir = debugfs_create_dir("surface_sam", NULL);
+ if (IS_ERR(ddev->dentry_dir))
+ return PTR_ERR(ddev->dentry_dir);
+
+ ddev->dentry_dev = debugfs_create_file("controller", 0600,
+ ddev->dentry_dir, ddev,
+ &ssam_dbgdev_fops);
+ if (IS_ERR(ddev->dentry_dev)) {
+ debugfs_remove(ddev->dentry_dir);
+ return PTR_ERR(ddev->dentry_dev);
+ }
+
+ platform_set_drvdata(pdev, ddev);
+ return 0;
+}
+
+static int ssam_dbgdev_remove(struct platform_device *pdev)
+{
+ struct ssam_dbgdev *ddev = platform_get_drvdata(pdev);
+
+ debugfs_remove(ddev->dentry_dev);
+ debugfs_remove(ddev->dentry_dir);
+
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static void ssam_dbgdev_release(struct device *dev)
+{
+ // nothing to do
+}
+
+
+static struct platform_device ssam_dbgdev_device = {
+ .name = SSAM_DBGDEV_NAME,
+ .id = PLATFORM_DEVID_NONE,
+ .dev.release = ssam_dbgdev_release,
+};
+
+static struct platform_driver ssam_dbgdev_driver = {
+ .probe = ssam_dbgdev_probe,
+ .remove = ssam_dbgdev_remove,
+ .driver = {
+ .name = SSAM_DBGDEV_NAME,
+ },
+};
+
+static int __init surface_sam_debugfs_init(void)
+{
+ int status;
+
+ status = platform_device_register(&ssam_dbgdev_device);
+ if (status)
+ return status;
+
+ status = platform_driver_register(&ssam_dbgdev_driver);
+ if (status)
+ platform_device_unregister(&ssam_dbgdev_device);
+
+ return status;
+}
+
+static void __exit surface_sam_debugfs_exit(void)
+{
+ platform_driver_unregister(&ssam_dbgdev_driver);
+ platform_device_unregister(&ssam_dbgdev_device);
+}
+
+module_init(surface_sam_debugfs_init);
+module_exit(surface_sam_debugfs_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("DebugFS entries for Surface Aggregator Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/surface_sam/clients/surface_sam_device_hub.c b/drivers/misc/surface_sam/clients/surface_sam_device_hub.c
new file mode 100644
index 0000000000000..bd903b86b96f4
--- /dev/null
+++ b/drivers/misc/surface_sam/clients/surface_sam_device_hub.c
@@ -0,0 +1,582 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Surface Device Registry.
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator_module.h>
+
+
+/* -- Device registry structures. ------------------------------------------- */
+
+struct ssam_hub_cell {
+ struct ssam_device_uid uid;
+ void *data;
+};
+
+struct ssam_hub_desc {
+ const struct ssam_hub_cell *cells;
+ unsigned int num_cells;
+};
+
+
+#define SSAM_DUID_HUB_MAIN SSAM_DUID(_HUB, 0x01, 0x00, 0x00)
+#define SSAM_DUID_HUB_BASE SSAM_DUID(_HUB, 0x02, 0x00, 0x00)
+
+#define SSAM_DEFINE_HUB_DESC(__name, __cells) \
+ struct ssam_hub_desc __name = { \
+ .cells = __cells, \
+ .num_cells = ARRAY_SIZE(__cells), \
+ };
+
+#define SSAM_DEFINE_PLATFORM_HUB(__suffix) \
+ static const SSAM_DEFINE_HUB_DESC(ssam_device_hub_##__suffix, \
+ ssam_devices_##__suffix); \
+ static const struct ssam_hub_cell ssam_platform_hubs_##__suffix[] = { \
+ { SSAM_DUID_HUB_MAIN, (void *)&ssam_device_hub_##__suffix }, \
+ }; \
+ static const SSAM_DEFINE_HUB_DESC(ssam_platform_hub_##__suffix, \
+ ssam_platform_hubs_##__suffix); \
+
+#define SSAM_DEFINE_PLATFORM_HUB_WITH_BASE(__suffix) \
+ static const SSAM_DEFINE_HUB_DESC(ssam_device_hub_##__suffix, \
+ ssam_devices_##__suffix); \
+ static const SSAM_DEFINE_HUB_DESC(ssam_device_hub_##__suffix##_base, \
+ ssam_devices_##__suffix##_base); \
+ static const struct ssam_hub_cell ssam_platform_hubs_##__suffix[] = { \
+ { SSAM_DUID_HUB_MAIN, (void *)&ssam_device_hub_##__suffix }, \
+ { SSAM_DUID_HUB_BASE, (void *)&ssam_device_hub_##__suffix##_base },\
+ }; \
+ static const SSAM_DEFINE_HUB_DESC(ssam_platform_hub_##__suffix, \
+ ssam_platform_hubs_##__suffix); \
+
+
+/* -- Device registry. ------------------------------------------------------ */
+
+#define SSAM_DUID_BAT_AC SSAM_DUID(BAT, 0x01, 0x01, 0x01)
+#define SSAM_DUID_BAT_MAIN SSAM_DUID(BAT, 0x01, 0x01, 0x00)
+#define SSAM_DUID_BAT_SB3BASE SSAM_DUID(BAT, 0x02, 0x01, 0x00)
+
+#define SSAM_DUID_TMP_PERF SSAM_DUID(TMP, 0x01, 0x00, 0x02)
+
+#define SSAM_DUID_HID_KEYBOARD SSAM_DUID(HID, 0x02, 0x01, 0x00)
+#define SSAM_DUID_HID_TOUCHPAD SSAM_DUID(HID, 0x02, 0x03, 0x00)
+#define SSAM_DUID_HID_IID5 SSAM_DUID(HID, 0x02, 0x05, 0x00)
+#define SSAM_DUID_HID_IID6 SSAM_DUID(HID, 0x02, 0x06, 0x00)
+
+
+static const struct ssam_hub_cell ssam_devices_sb2[] = {
+ { SSAM_DUID_TMP_PERF },
+};
+
+static const struct ssam_hub_cell ssam_devices_sb3[] = {
+ { SSAM_DUID_TMP_PERF },
+ { SSAM_DUID_BAT_AC },
+ { SSAM_DUID_BAT_MAIN },
+};
+
+static const struct ssam_hub_cell ssam_devices_sb3_base[] = {
+ { SSAM_DUID_BAT_SB3BASE },
+ { SSAM_DUID_HID_KEYBOARD },
+ { SSAM_DUID_HID_TOUCHPAD },
+ { SSAM_DUID_HID_IID5 },
+ { SSAM_DUID_HID_IID6 },
+};
+
+static const struct ssam_hub_cell ssam_devices_sl1[] = {
+ { SSAM_DUID_TMP_PERF },
+};
+
+static const struct ssam_hub_cell ssam_devices_sl2[] = {
+ { SSAM_DUID_TMP_PERF },
+};
+
+static const struct ssam_hub_cell ssam_devices_sl3[] = {
+ { SSAM_DUID_TMP_PERF },
+ { SSAM_DUID_BAT_AC },
+ { SSAM_DUID_BAT_MAIN },
+ { SSAM_DUID_HID_KEYBOARD },
+ { SSAM_DUID_HID_TOUCHPAD },
+ { SSAM_DUID_HID_IID5 },
+};
+
+static const struct ssam_hub_cell ssam_devices_sp5[] = {
+ { SSAM_DUID_TMP_PERF },
+};
+
+static const struct ssam_hub_cell ssam_devices_sp6[] = {
+ { SSAM_DUID_TMP_PERF },
+};
+
+static const struct ssam_hub_cell ssam_devices_sp7[] = {
+ { SSAM_DUID_TMP_PERF },
+ { SSAM_DUID_BAT_AC },
+ { SSAM_DUID_BAT_MAIN },
+};
+
+SSAM_DEFINE_PLATFORM_HUB(sb2);
+SSAM_DEFINE_PLATFORM_HUB_WITH_BASE(sb3);
+SSAM_DEFINE_PLATFORM_HUB(sl1);
+SSAM_DEFINE_PLATFORM_HUB(sl2);
+SSAM_DEFINE_PLATFORM_HUB(sl3);
+SSAM_DEFINE_PLATFORM_HUB(sp5);
+SSAM_DEFINE_PLATFORM_HUB(sp6);
+SSAM_DEFINE_PLATFORM_HUB(sp7);
+
+
+/* -- Device registry helper functions. ------------------------------------- */
+
+static int ssam_hub_remove_devices_fn(struct device *dev, void *data)
+{
+ if (!is_ssam_device(dev))
+ return 0;
+
+ ssam_device_remove(to_ssam_device(dev));
+ return 0;
+}
+
+static void ssam_hub_remove_devices(struct device *parent)
+{
+ device_for_each_child_reverse(parent, NULL, ssam_hub_remove_devices_fn);
+}
+
+static int ssam_hub_add_device(struct device *parent,
+ struct ssam_controller *ctrl,
+ const struct ssam_hub_cell *cell)
+{
+ struct ssam_device *sdev;
+ int status;
+
+ sdev = ssam_device_alloc(ctrl, cell->uid);
+ if (!sdev)
+ return -ENOMEM;
+
+ sdev->dev.parent = parent;
+ sdev->dev.platform_data = cell->data;
+
+ status = ssam_device_add(sdev);
+ if (status)
+ ssam_device_put(sdev);
+
+ return status;
+}
+
+static int ssam_hub_add_devices(struct device *parent,
+ struct ssam_controller *ctrl,
+ const struct ssam_hub_desc *desc)
+{
+ int status, i;
+
+ for (i = 0; i < desc->num_cells; i++) {
+ status = ssam_hub_add_device(parent, ctrl, &desc->cells[i]);
+ if (status)
+ goto err;
+ }
+
+ return 0;
+err:
+ ssam_hub_remove_devices(parent);
+ return status;
+}
+
+
+/* -- SSAM main-hub driver. ------------------------------------------------- */
+
+static int ssam_hub_probe(struct ssam_device *sdev)
+{
+ const struct ssam_hub_desc *desc = dev_get_platdata(&sdev->dev);
+
+ if (!desc)
+ return -ENODEV;
+
+ return ssam_hub_add_devices(&sdev->dev, sdev->ctrl, desc);
+}
+
+static void ssam_hub_remove(struct ssam_device *sdev)
+{
+ ssam_hub_remove_devices(&sdev->dev);
+}
+
+static const struct ssam_device_id ssam_hub_match[] = {
+ { SSAM_DEVICE(_HUB, 0x01, 0x00, 0x00) },
+ { },
+};
+
+static struct ssam_device_driver ssam_hub_driver = {
+ .probe = ssam_hub_probe,
+ .remove = ssam_hub_remove,
+ .match_table = ssam_hub_match,
+ .driver = {
+ .name = "surface_sam_hub",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+
+/* -- SSAM base-hub driver. ------------------------------------------------- */
+
+enum ssam_base_hub_state {
+ SSAM_BASE_HUB_UNINITIALIZED,
+ SSAM_BASE_HUB_CONNECTED,
+ SSAM_BASE_HUB_DISCONNECTED,
+};
+
+struct ssam_base_hub {
+ struct ssam_device *sdev;
+ const struct ssam_hub_desc *devices;
+
+ struct mutex lock;
+ enum ssam_base_hub_state state;
+
+ struct ssam_event_notifier notif;
+};
+
+
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0d,
+ .instance_id = 0x00,
+});
+
+#define SSAM_BAS_OPMODE_TABLET 0x00
+#define SSAM_EVENT_BAS_CID_CONNECTION 0x0c
+
+static int ssam_base_hub_query_state(struct ssam_device *sdev,
+ enum ssam_base_hub_state *state)
+{
+ u8 opmode;
+ int status;
+
+ status = ssam_bas_query_opmode(sdev->ctrl, &opmode);
+ if (status < 0) {
+ dev_err(&sdev->dev, "failed to query base state: %d\n", status);
+ return status;
+ }
+
+ if (opmode != SSAM_BAS_OPMODE_TABLET)
+ *state = SSAM_BASE_HUB_CONNECTED;
+ else
+ *state = SSAM_BASE_HUB_DISCONNECTED;
+
+ return 0;
+}
+
+
+static ssize_t ssam_base_hub_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+ struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
+ bool connected;
+
+ mutex_lock(&hub->lock);
+ connected = hub->state == SSAM_BASE_HUB_CONNECTED;
+ mutex_unlock(&hub->lock);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", connected);
+}
+
+static struct device_attribute ssam_base_hub_attr_state =
+ __ATTR(state, S_IRUGO, ssam_base_hub_state_show, NULL);
+
+static struct attribute *ssam_base_hub_attrs[] = {
+ &ssam_base_hub_attr_state.attr,
+ NULL,
+};
+
+const struct attribute_group ssam_base_hub_group = {
+ .attrs = ssam_base_hub_attrs,
+};
+
+
+static int ssam_base_hub_update(struct ssam_device *sdev,
+ enum ssam_base_hub_state new)
+{
+ struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
+ int status = 0;
+
+ mutex_lock(&hub->lock);
+ if (hub->state == new) {
+ mutex_unlock(&hub->lock);
+ return 0;
+ }
+ hub->state = new;
+
+ if (hub->state == SSAM_BASE_HUB_CONNECTED)
+ status = ssam_hub_add_devices(&sdev->dev, sdev->ctrl, hub->devices);
+
+ if (hub->state != SSAM_BASE_HUB_CONNECTED || status)
+ ssam_hub_remove_devices(&sdev->dev);
+
+ mutex_unlock(&hub->lock);
+
+ if (status) {
+ dev_err(&sdev->dev, "failed to update base-hub devices: %d\n",
+ status);
+ }
+
+ return status;
+}
+
+static u32 ssam_base_hub_notif(struct ssam_notifier_block *nb,
+ const struct ssam_event *event)
+{
+ struct ssam_base_hub *hub;
+ struct ssam_device *sdev;
+ enum ssam_base_hub_state new;
+
+ hub = container_of(nb, struct ssam_base_hub, notif.base);
+ sdev = hub->sdev;
+
+ if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION)
+ return 0;
+
+ if (event->length < 1) {
+ dev_err(&sdev->dev, "unexpected payload size: %u\n",
+ event->length);
+ return 0;
+ }
+
+ if (event->data[0])
+ new = SSAM_BASE_HUB_CONNECTED;
+ else
+ new = SSAM_BASE_HUB_DISCONNECTED;
+
+ ssam_base_hub_update(sdev, new);
+
+ /*
+ * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and
+ * consumed by the detachment system driver. We're just a (more or less)
+ * silent observer.
+ */
+ return 0;
+}
+
+static int ssam_base_hub_resume(struct device *dev)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+ enum ssam_base_hub_state state;
+ int status;
+
+ status = ssam_base_hub_query_state(sdev, &state);
+ if (status)
+ return status;
+
+ return ssam_base_hub_update(sdev, state);
+}
+static SIMPLE_DEV_PM_OPS(ssam_base_hub_pm_ops, NULL, ssam_base_hub_resume);
+
+static int ssam_base_hub_probe(struct ssam_device *sdev)
+{
+ const struct ssam_hub_desc *desc = dev_get_platdata(&sdev->dev);
+ const struct ssam_device_id *match;
+ enum ssam_base_hub_state state;
+ struct ssam_base_hub *hub;
+ int status;
+
+ if (!desc)
+ return -ENODEV;
+
+ match = ssam_device_get_match(sdev);
+ if (!match)
+ return -ENODEV;
+
+ hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL);
+ if (!hub)
+ return -ENOMEM;
+
+ hub->sdev = sdev;
+ hub->devices = desc;
+ hub->state = SSAM_BASE_HUB_UNINITIALIZED;
+
+ // TODO: still need to verify registry
+ hub->notif.base.priority = 1000; // this notifier should run first
+ hub->notif.base.fn = ssam_base_hub_notif;
+ hub->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ hub->notif.event.id.target_category = SSAM_SSH_TC_BAS,
+ hub->notif.event.id.instance = 0,
+ hub->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ status = ssam_notifier_register(sdev->ctrl, &hub->notif);
+ if (status)
+ return status;
+
+ ssam_device_set_drvdata(sdev, hub);
+
+ status = ssam_base_hub_query_state(sdev, &state);
+ if (status) {
+ ssam_notifier_unregister(sdev->ctrl, &hub->notif);
+ return status;
+ }
+
+ status = ssam_base_hub_update(sdev, state);
+ if (status) {
+ ssam_notifier_unregister(sdev->ctrl, &hub->notif);
+ return status;
+ }
+
+ status = sysfs_create_group(&sdev->dev.kobj, &ssam_base_hub_group);
+ if (status) {
+ ssam_notifier_unregister(sdev->ctrl, &hub->notif);
+ ssam_hub_remove_devices(&sdev->dev);
+ return status;
+ }
+
+ return status;
+}
+
+static void ssam_base_hub_remove(struct ssam_device *sdev)
+{
+ struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
+
+ sysfs_remove_group(&sdev->dev.kobj, &ssam_base_hub_group);
+
+ ssam_notifier_unregister(sdev->ctrl, &hub->notif);
+ ssam_hub_remove_devices(&sdev->dev);
+
+ ssam_device_set_drvdata(sdev, NULL);
+ kfree(hub);
+}
+
+static const struct ssam_device_id ssam_base_hub_match[] = {
+ { SSAM_DEVICE(_HUB, 0x02, 0x00, 0x00) },
+ { },
+};
+
+static struct ssam_device_driver ssam_base_hub_driver = {
+ .probe = ssam_base_hub_probe,
+ .remove = ssam_base_hub_remove,
+ .match_table = ssam_base_hub_match,
+ .driver = {
+ .name = "surface_sam_base_hub",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = &ssam_base_hub_pm_ops,
+ },
+};
+
+
+/* -- SSAM platform/meta-hub driver. ---------------------------------------- */
+
+static const struct acpi_device_id ssam_platform_hub_match[] = {
+ /* Surface Pro 4, 5, and 6 */
+ { "MSHW0081", (unsigned long)&ssam_platform_hub_sp5 },
+
+ /* Surface Pro 6 (OMBR >= 0x10) */
+ { "MSHW0111", (unsigned long)&ssam_platform_hub_sp6 },
+
+ /* Surface Pro 7 */
+ { "MSHW0116", (unsigned long)&ssam_platform_hub_sp7 },
+
+ /* Surface Book 2 */
+ { "MSHW0107", (unsigned long)&ssam_platform_hub_sb2 },
+
+ /* Surface Book 3 */
+ { "MSHW0117", (unsigned long)&ssam_platform_hub_sb3 },
+
+ /* Surface Laptop 1 */
+ { "MSHW0086", (unsigned long)&ssam_platform_hub_sl1 },
+
+ /* Surface Laptop 2 */
+ { "MSHW0112", (unsigned long)&ssam_platform_hub_sl2 },
+
+ /* Surface Laptop 3 (13", Intel) */
+ { "MSHW0114", (unsigned long)&ssam_platform_hub_sl3 },
+
+ /* Surface Laptop 3 (15", AMD) */
+ { "MSHW0110", (unsigned long)&ssam_platform_hub_sl3 },
+
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ssam_platform_hub_match);
+
+static int ssam_platform_hub_probe(struct platform_device *pdev)
+{
+ const struct ssam_hub_desc *desc;
+ struct ssam_controller *ctrl;
+ int status;
+
+ desc = acpi_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -ENODEV;
+
+ /*
+ * As we're adding the SSAM client devices as children under this device
+ * and not the SSAM controller, we need to add a device link to the
+ * controller to ensure that we remove all of our devices before the
+ * controller is removed. This also guarantees proper ordering for
+ * suspend/resume of the devices on this hub.
+ */
+ status = ssam_client_bind(&pdev->dev, &ctrl);
+ if (status)
+ return status == -ENXIO ? -EPROBE_DEFER : status;
+
+ return ssam_hub_add_devices(&pdev->dev, ctrl, desc);
+}
+
+static int ssam_platform_hub_remove(struct platform_device *pdev)
+{
+ ssam_hub_remove_devices(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver ssam_platform_hub_driver = {
+ .probe = ssam_platform_hub_probe,
+ .remove = ssam_platform_hub_remove,
+ .driver = {
+ .name = "surface_sam_platform_hub",
+ .acpi_match_table = ssam_platform_hub_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+
+/* -- Module initialization. ------------------------------------------------ */
+
+static int __init ssam_device_hub_init(void)
+{
+ int status;
+
+ status = platform_driver_register(&ssam_platform_hub_driver);
+ if (status)
+ goto err_platform;
+
+ status = ssam_device_driver_register(&ssam_hub_driver);
+ if (status)
+ goto err_main;
+
+ status = ssam_device_driver_register(&ssam_base_hub_driver);
+ if (status)
+ goto err_base;
+
+ return 0;
+
+err_base:
+ ssam_device_driver_unregister(&ssam_hub_driver);
+err_main:
+ platform_driver_unregister(&ssam_platform_hub_driver);
+err_platform:
+ return status;
+}
+
+static void __exit ssam_device_hub_exit(void)
+{
+ ssam_device_driver_unregister(&ssam_base_hub_driver);
+ ssam_device_driver_unregister(&ssam_hub_driver);
+ platform_driver_unregister(&ssam_platform_hub_driver);
+}
+
+module_init(ssam_device_hub_init);
+module_exit(ssam_device_hub_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Surface SAM Device Hub Driver for 5th Generation Surface Devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/surface_sam/clients/surface_sam_dtx.c b/drivers/misc/surface_sam/clients/surface_sam_dtx.c
new file mode 100644
index 0000000000000..106543112b206
--- /dev/null
+++ b/drivers/misc/surface_sam/clients/surface_sam_dtx.c
@@ -0,0 +1,582 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Detachment system (DTX) driver for Microsoft Surface Book 2.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/input.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+
+#include <linux/surface_aggregator_module.h>
+
+
+#define USB_VENDOR_ID_MICROSOFT 0x045e
+#define USB_DEVICE_ID_MS_SURFACE_BASE_2_INTEGRATION 0x0922
+
+// name copied from MS device manager
+#define DTX_INPUT_NAME "Microsoft Surface Base 2 Integration Device"
+
+
+#define DTX_CMD_LATCH_LOCK _IO(0x11, 0x01)
+#define DTX_CMD_LATCH_UNLOCK _IO(0x11, 0x02)
+#define DTX_CMD_LATCH_REQUEST _IO(0x11, 0x03)
+#define DTX_CMD_LATCH_OPEN _IO(0x11, 0x04)
+#define DTX_CMD_GET_OPMODE _IOR(0x11, 0x05, int)
+
+#define SAM_EVENT_DTX_CID_CONNECTION 0x0c
+#define SAM_EVENT_DTX_CID_BUTTON 0x0e
+#define SAM_EVENT_DTX_CID_ERROR 0x0f
+#define SAM_EVENT_DTX_CID_LATCH_STATUS 0x11
+
+#define DTX_OPMODE_TABLET 0x00
+#define DTX_OPMODE_LAPTOP 0x01
+#define DTX_OPMODE_STUDIO 0x02
+
+#define DTX_LATCH_CLOSED 0x00
+#define DTX_LATCH_OPENED 0x01
+
+
+// Warning: This must always be a power of 2!
+#define DTX_CLIENT_BUF_SIZE 16
+
+#define DTX_CONNECT_OPMODE_DELAY 1000
+
+#define DTX_ERR KERN_ERR "surface_sam_dtx: "
+#define DTX_WARN KERN_WARNING "surface_sam_dtx: "
+
+
+struct surface_dtx_event {
+ u8 type;
+ u8 code;
+ u8 arg0;
+ u8 arg1;
+} __packed;
+
+struct surface_dtx_dev {
+ struct ssam_controller *ctrl;
+
+ struct ssam_event_notifier notif;
+ struct delayed_work opmode_work;
+ wait_queue_head_t waitq;
+ struct miscdevice mdev;
+ spinlock_t client_lock;
+ struct list_head client_list;
+ struct mutex mutex;
+ bool active;
+ spinlock_t input_lock;
+ struct input_dev *input_dev;
+};
+
+struct surface_dtx_client {
+ struct list_head node;
+ struct surface_dtx_dev *ddev;
+ struct fasync_struct *fasync;
+ spinlock_t buffer_lock;
+ unsigned int buffer_head;
+ unsigned int buffer_tail;
+ struct surface_dtx_event buffer[DTX_CLIENT_BUF_SIZE];
+};
+
+
+static struct surface_dtx_dev surface_dtx_dev;
+
+
+static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x06,
+ .instance_id = 0x00,
+});
+
+static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x07,
+ .instance_id = 0x00,
+});
+
+static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x08,
+ .instance_id = 0x00,
+});
+
+static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_open, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x09,
+ .instance_id = 0x00,
+});
+
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x0d,
+ .instance_id = 0x00,
+});
+
+
+static int dtx_bas_get_opmode(struct ssam_controller *ctrl, int __user *buf)
+{
+ u8 opmode;
+ int status;
+
+ status = ssam_bas_query_opmode(ctrl, &opmode);
+ if (status < 0)
+ return status;
+
+ if (put_user(opmode, buf))
+ return -EACCES;
+
+ return 0;
+}
+
+
+static int surface_dtx_open(struct inode *inode, struct file *file)
+{
+ struct surface_dtx_dev *ddev = container_of(file->private_data, struct surface_dtx_dev, mdev);
+ struct surface_dtx_client *client;
+
+ // initialize client
+ client = kzalloc(sizeof(struct surface_dtx_client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ spin_lock_init(&client->buffer_lock);
+ client->buffer_head = 0;
+ client->buffer_tail = 0;
+ client->ddev = ddev;
+
+ // attach client
+ spin_lock(&ddev->client_lock);
+ list_add_tail_rcu(&client->node, &ddev->client_list);
+ spin_unlock(&ddev->client_lock);
+
+ file->private_data = client;
+ nonseekable_open(inode, file);
+
+ return 0;
+}
+
+static int surface_dtx_release(struct inode *inode, struct file *file)
+{
+ struct surface_dtx_client *client = file->private_data;
+
+ // detach client
+ spin_lock(&client->ddev->client_lock);
+ list_del_rcu(&client->node);
+ spin_unlock(&client->ddev->client_lock);
+ synchronize_rcu();
+
+ kfree(client);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
+{
+ struct surface_dtx_client *client = file->private_data;
+ struct surface_dtx_dev *ddev = client->ddev;
+ struct surface_dtx_event event;
+ size_t read = 0;
+ int status = 0;
+
+ if (count != 0 && count < sizeof(struct surface_dtx_event))
+ return -EINVAL;
+
+ if (!ddev->active)
+ return -ENODEV;
+
+ // check availability
+ if (client->buffer_head == client->buffer_tail) {
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ status = wait_event_interruptible(ddev->waitq,
+ client->buffer_head != client->buffer_tail ||
+ !ddev->active);
+ if (status)
+ return status;
+
+ if (!ddev->active)
+ return -ENODEV;
+ }
+
+ // copy events one by one
+ while (read + sizeof(struct surface_dtx_event) <= count) {
+ spin_lock_irq(&client->buffer_lock);
+
+ if (client->buffer_head == client->buffer_tail) {
+ spin_unlock_irq(&client->buffer_lock);
+ break;
+ }
+
+ // get one event
+ event = client->buffer[client->buffer_tail];
+ client->buffer_tail = (client->buffer_tail + 1) & (DTX_CLIENT_BUF_SIZE - 1);
+ spin_unlock_irq(&client->buffer_lock);
+
+ // copy to userspace
+ if (copy_to_user(buf, &event, sizeof(struct surface_dtx_event)))
+ return -EFAULT;
+
+ read += sizeof(struct surface_dtx_event);
+ }
+
+ return read;
+}
+
+static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
+{
+ struct surface_dtx_client *client = file->private_data;
+ int mask;
+
+ poll_wait(file, &client->ddev->waitq, pt);
+
+ if (client->ddev->active)
+ mask = EPOLLOUT | EPOLLWRNORM;
+ else
+ mask = EPOLLHUP | EPOLLERR;
+
+ if (client->buffer_head != client->buffer_tail)
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ return mask;
+}
+
+static int surface_dtx_fasync(int fd, struct file *file, int on)
+{
+ struct surface_dtx_client *client = file->private_data;
+
+ return fasync_helper(fd, file, on, &client->fasync);
+}
+
+static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct surface_dtx_client *client = file->private_data;
+ struct surface_dtx_dev *ddev = client->ddev;
+ int status;
+
+ status = mutex_lock_interruptible(&ddev->mutex);
+ if (status)
+ return status;
+
+ if (!ddev->active) {
+ mutex_unlock(&ddev->mutex);
+ return -ENODEV;
+ }
+
+ switch (cmd) {
+ case DTX_CMD_LATCH_LOCK:
+ status = ssam_bas_latch_lock(ddev->ctrl);
+ break;
+
+ case DTX_CMD_LATCH_UNLOCK:
+ status = ssam_bas_latch_unlock(ddev->ctrl);
+ break;
+
+ case DTX_CMD_LATCH_REQUEST:
+ status = ssam_bas_latch_request(ddev->ctrl);
+ break;
+
+ case DTX_CMD_LATCH_OPEN:
+ status = ssam_bas_latch_open(ddev->ctrl);
+ break;
+
+ case DTX_CMD_GET_OPMODE:
+ status = dtx_bas_get_opmode(ddev->ctrl, (int __user *)arg);
+ break;
+
+ default:
+ status = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&ddev->mutex);
+ return status;
+}
+
+static const struct file_operations surface_dtx_fops = {
+ .owner = THIS_MODULE,
+ .open = surface_dtx_open,
+ .release = surface_dtx_release,
+ .read = surface_dtx_read,
+ .poll = surface_dtx_poll,
+ .fasync = surface_dtx_fasync,
+ .unlocked_ioctl = surface_dtx_ioctl,
+ .llseek = no_llseek,
+};
+
+static struct surface_dtx_dev surface_dtx_dev = {
+ .mdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "surface_dtx",
+ .fops = &surface_dtx_fops,
+ },
+ .client_lock = __SPIN_LOCK_UNLOCKED(),
+ .input_lock = __SPIN_LOCK_UNLOCKED(),
+ .mutex = __MUTEX_INITIALIZER(surface_dtx_dev.mutex),
+ .active = false,
+};
+
+
+static void surface_dtx_push_event(struct surface_dtx_dev *ddev, struct surface_dtx_event *event)
+{
+ struct surface_dtx_client *client;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(client, &ddev->client_list, node) {
+ spin_lock(&client->buffer_lock);
+
+ client->buffer[client->buffer_head++] = *event;
+ client->buffer_head &= DTX_CLIENT_BUF_SIZE - 1;
+
+ if (unlikely(client->buffer_head == client->buffer_tail)) {
+ printk(DTX_WARN "event buffer overrun\n");
+ client->buffer_tail = (client->buffer_tail + 1) & (DTX_CLIENT_BUF_SIZE - 1);
+ }
+
+ spin_unlock(&client->buffer_lock);
+
+ kill_fasync(&client->fasync, SIGIO, POLL_IN);
+ }
+ rcu_read_unlock();
+
+ wake_up_interruptible(&ddev->waitq);
+}
+
+
+static void surface_dtx_update_opmpde(struct surface_dtx_dev *ddev)
+{
+ struct surface_dtx_event event;
+ u8 opmode;
+ int status;
+
+ // get operation mode
+ status = ssam_bas_query_opmode(ddev->ctrl, &opmode);
+ if (status < 0) {
+ printk(DTX_ERR "EC request failed with error %d\n", status);
+ return;
+ }
+
+ // send DTX event
+ event.type = 0x11;
+ event.code = 0x0D;
+ event.arg0 = opmode;
+ event.arg1 = 0x00;
+
+ surface_dtx_push_event(ddev, &event);
+
+ // send SW_TABLET_MODE event
+ spin_lock(&ddev->input_lock);
+ input_report_switch(ddev->input_dev, SW_TABLET_MODE, opmode != DTX_OPMODE_LAPTOP);
+ input_sync(ddev->input_dev);
+ spin_unlock(&ddev->input_lock);
+}
+
+static void surface_dtx_opmode_workfn(struct work_struct *work)
+{
+ struct surface_dtx_dev *ddev = container_of(work, struct surface_dtx_dev, opmode_work.work);
+
+ surface_dtx_update_opmpde(ddev);
+}
+
+static u32 surface_dtx_notification(struct ssam_notifier_block *nb, const struct ssam_event *in_event)
+{
+ struct surface_dtx_dev *ddev = container_of(nb, struct surface_dtx_dev, notif.base);
+ struct surface_dtx_event event;
+ unsigned long delay;
+
+ switch (in_event->command_id) {
+ case SAM_EVENT_DTX_CID_CONNECTION:
+ case SAM_EVENT_DTX_CID_BUTTON:
+ case SAM_EVENT_DTX_CID_ERROR:
+ case SAM_EVENT_DTX_CID_LATCH_STATUS:
+ if (in_event->length > 2) {
+ printk(DTX_ERR "unexpected payload size (cid: %x, len: %u)\n",
+ in_event->command_id, in_event->length);
+ return SSAM_NOTIF_HANDLED;
+ }
+
+ event.type = in_event->target_category;
+ event.code = in_event->command_id;
+ event.arg0 = in_event->length >= 1 ? in_event->data[0] : 0x00;
+ event.arg1 = in_event->length >= 2 ? in_event->data[1] : 0x00;
+ surface_dtx_push_event(ddev, &event);
+ break;
+
+ default:
+ return 0;
+ }
+
+ // update device mode
+ if (in_event->command_id == SAM_EVENT_DTX_CID_CONNECTION) {
+ delay = event.arg0 ? DTX_CONNECT_OPMODE_DELAY : 0;
+ schedule_delayed_work(&ddev->opmode_work, delay);
+ }
+
+ return SSAM_NOTIF_HANDLED;
+}
+
+
+static struct input_dev *surface_dtx_register_inputdev(
+ struct platform_device *pdev, struct ssam_controller *ctrl)
+{
+ struct input_dev *input_dev;
+ u8 opmode;
+ int status;
+
+ input_dev = input_allocate_device();
+ if (!input_dev)
+ return ERR_PTR(-ENOMEM);
+
+ input_dev->name = DTX_INPUT_NAME;
+ input_dev->dev.parent = &pdev->dev;
+ input_dev->id.bustype = BUS_VIRTUAL;
+ input_dev->id.vendor = USB_VENDOR_ID_MICROSOFT;
+ input_dev->id.product = USB_DEVICE_ID_MS_SURFACE_BASE_2_INTEGRATION;
+
+ input_set_capability(input_dev, EV_SW, SW_TABLET_MODE);
+
+ status = ssam_bas_query_opmode(ctrl, &opmode);
+ if (status < 0) {
+ input_free_device(input_dev);
+ return ERR_PTR(status);
+ }
+
+ input_report_switch(input_dev, SW_TABLET_MODE, opmode != DTX_OPMODE_LAPTOP);
+
+ status = input_register_device(input_dev);
+ if (status) {
+ input_unregister_device(input_dev);
+ return ERR_PTR(status);
+ }
+
+ return input_dev;
+}
+
+
+static int surface_sam_dtx_probe(struct platform_device *pdev)
+{
+ struct surface_dtx_dev *ddev = &surface_dtx_dev;
+ struct ssam_controller *ctrl;
+ struct input_dev *input_dev;
+ int status;
+
+ // link to ec
+ status = ssam_client_bind(&pdev->dev, &ctrl);
+ if (status)
+ return status == -ENXIO ? -EPROBE_DEFER : status;
+
+ input_dev = surface_dtx_register_inputdev(pdev, ctrl);
+ if (IS_ERR(input_dev))
+ return PTR_ERR(input_dev);
+
+ // initialize device
+ mutex_lock(&ddev->mutex);
+ if (ddev->active) {
+ mutex_unlock(&ddev->mutex);
+ status = -ENODEV;
+ goto err_register;
+ }
+
+ ddev->ctrl = ctrl;
+ INIT_DELAYED_WORK(&ddev->opmode_work, surface_dtx_opmode_workfn);
+ INIT_LIST_HEAD(&ddev->client_list);
+ init_waitqueue_head(&ddev->waitq);
+ ddev->active = true;
+ ddev->input_dev = input_dev;
+ mutex_unlock(&ddev->mutex);
+
+ status = misc_register(&ddev->mdev);
+ if (status)
+ goto err_register;
+
+ // set up events
+ ddev->notif.base.priority = 1;
+ ddev->notif.base.fn = surface_dtx_notification;
+ ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
+ ddev->notif.event.id.instance = 0;
+ ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ status = ssam_notifier_register(ctrl, &ddev->notif);
+ if (status)
+ goto err_events_setup;
+
+ return 0;
+
+err_events_setup:
+ misc_deregister(&ddev->mdev);
+err_register:
+ input_unregister_device(ddev->input_dev);
+ return status;
+}
+
+static int surface_sam_dtx_remove(struct platform_device *pdev)
+{
+ struct surface_dtx_dev *ddev = &surface_dtx_dev;
+ struct surface_dtx_client *client;
+
+ mutex_lock(&ddev->mutex);
+ if (!ddev->active) {
+ mutex_unlock(&ddev->mutex);
+ return 0;
+ }
+
+ // mark as inactive
+ ddev->active = false;
+ mutex_unlock(&ddev->mutex);
+
+ // After this call we're guaranteed that no more input events will arive
+ ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
+
+ // wake up clients
+ spin_lock(&ddev->client_lock);
+ list_for_each_entry(client, &ddev->client_list, node) {
+ kill_fasync(&client->fasync, SIGIO, POLL_HUP);
+ }
+ spin_unlock(&ddev->client_lock);
+
+ wake_up_interruptible(&ddev->waitq);
+
+ // unregister user-space devices
+ input_unregister_device(ddev->input_dev);
+ misc_deregister(&ddev->mdev);
+
+ return 0;
+}
+
+
+static const struct acpi_device_id surface_sam_dtx_match[] = {
+ { "MSHW0133", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, surface_sam_dtx_match);
+
+static struct platform_driver surface_sam_dtx = {
+ .probe = surface_sam_dtx_probe,
+ .remove = surface_sam_dtx_remove,
+ .driver = {
+ .name = "surface_sam_dtx",
+ .acpi_match_table = surface_sam_dtx_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(surface_sam_dtx);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Surface Detachment System (DTX) Driver for 5th Generation Surface Devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/surface_sam/clients/surface_sam_hps.c b/drivers/misc/surface_sam/clients/surface_sam_hps.c
new file mode 100644
index 0000000000000..a47a5eb7391a1
--- /dev/null
+++ b/drivers/misc/surface_sam/clients/surface_sam_hps.c
@@ -0,0 +1,1287 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Surface dGPU hot-plug system driver.
+ * Supports explicit setting of the dGPU power-state on the Surface Book 2 and
+ * properly handles hot-plugging by detaching the base.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+
+#include <linux/surface_aggregator_module.h>
+#include "surface_sam_san.h"
+
+
+// TODO: vgaswitcheroo integration
+
+
+static void dbg_dump_drvsta(struct platform_device *pdev, const char *prefix);
+
+
+#define SHPS_DSM_REVISION 1
+#define SHPS_DSM_GPU_ADDRS 0x02
+#define SHPS_DSM_GPU_POWER 0x05
+static const guid_t SHPS_DSM_UUID =
+ GUID_INIT(0x5515a847, 0xed55, 0x4b27, 0x83, 0x52, 0xcd,
+ 0x32, 0x0e, 0x10, 0x36, 0x0a);
+
+
+#define SAM_DGPU_TC 0x13
+#define SAM_DGPU_CID_POWERON 0x02
+#define ACPI_SGCP_NOTIFY_POWER_ON 0x81
+
+#define SHPS_DSM_GPU_ADDRS_RP "RP5_PCIE"
+#define SHPS_DSM_GPU_ADDRS_DGPU "DGPU_PCIE"
+#define SHPS_PCI_GPU_ADDR_RP "\\_SB.PCI0.RP13._ADR"
+
+static const struct acpi_gpio_params gpio_base_presence_int = { 0, 0, false };
+static const struct acpi_gpio_params gpio_base_presence = { 1, 0, false };
+static const struct acpi_gpio_params gpio_dgpu_power_int = { 2, 0, false };
+static const struct acpi_gpio_params gpio_dgpu_power = { 3, 0, false };
+static const struct acpi_gpio_params gpio_dgpu_presence_int = { 4, 0, false };
+static const struct acpi_gpio_params gpio_dgpu_presence = { 5, 0, false };
+
+static const struct acpi_gpio_mapping shps_acpi_gpios[] = {
+ { "base_presence-int-gpio", &gpio_base_presence_int, 1 },
+ { "base_presence-gpio", &gpio_base_presence, 1 },
+ { "dgpu_power-int-gpio", &gpio_dgpu_power_int, 1 },
+ { "dgpu_power-gpio", &gpio_dgpu_power, 1 },
+ { "dgpu_presence-int-gpio", &gpio_dgpu_presence_int, 1 },
+ { "dgpu_presence-gpio", &gpio_dgpu_presence, 1 },
+ { },
+};
+
+
+enum shps_dgpu_power {
+ SHPS_DGPU_POWER_OFF = 0,
+ SHPS_DGPU_POWER_ON = 1,
+ SHPS_DGPU_POWER_UNKNOWN = 2,
+};
+
+static const char *shps_dgpu_power_str(enum shps_dgpu_power power)
+{
+ if (power == SHPS_DGPU_POWER_OFF)
+ return "off";
+ else if (power == SHPS_DGPU_POWER_ON)
+ return "on";
+ else if (power == SHPS_DGPU_POWER_UNKNOWN)
+ return "unknown";
+ else
+ return "<invalid>";
+}
+
+enum shps_notification_method {
+ SHPS_NOTIFICATION_METHOD_SAN = 1,
+ SHPS_NOTIFICATION_METHOD_SGCP = 2
+};
+
+struct shps_hardware_traits {
+ enum shps_notification_method notification_method;
+ const char *dgpu_rp_pci_address;
+};
+
+struct shps_driver_data {
+ struct ssam_controller *ctrl;
+
+ struct mutex lock;
+ struct pci_dev *dgpu_root_port;
+ struct pci_saved_state *dgpu_root_port_state;
+ struct gpio_desc *gpio_dgpu_power;
+ struct gpio_desc *gpio_dgpu_presence;
+ struct gpio_desc *gpio_base_presence;
+ unsigned int irq_dgpu_presence;
+ unsigned int irq_base_presence;
+ unsigned long state;
+ acpi_handle sgpc_handle;
+ struct shps_hardware_traits hardware_traits;
+};
+
+struct shps_hardware_probe {
+ const char *hardware_id;
+ int generation;
+ struct shps_hardware_traits *hardware_traits;
+};
+
+static struct shps_hardware_traits shps_gen1_hwtraits = {
+ .notification_method = SHPS_NOTIFICATION_METHOD_SAN
+};
+
+static struct shps_hardware_traits shps_gen2_hwtraits = {
+ .notification_method = SHPS_NOTIFICATION_METHOD_SGCP,
+ .dgpu_rp_pci_address = SHPS_PCI_GPU_ADDR_RP
+};
+
+static const struct shps_hardware_probe shps_hardware_probe_match[] = {
+ /* Surface Book 3 */
+ { "MSHW0117", 2, &shps_gen2_hwtraits },
+
+ /* Surface Book 2 (default, must be last entry) */
+ { NULL, 1, &shps_gen1_hwtraits }
+};
+
+#define SHPS_STATE_BIT_PWRTGT 0 /* desired power state: 1 for on, 0 for off */
+#define SHPS_STATE_BIT_RPPWRON_SYNC 1 /* synchronous/requested power-up in progress */
+#define SHPS_STATE_BIT_WAKE_ENABLED 2 /* wakeup via base-presence GPIO enabled */
+
+
+#define SHPS_DGPU_PARAM_PERM 0644
+
+enum shps_dgpu_power_mp {
+ SHPS_DGPU_MP_POWER_OFF = SHPS_DGPU_POWER_OFF,
+ SHPS_DGPU_MP_POWER_ON = SHPS_DGPU_POWER_ON,
+ SHPS_DGPU_MP_POWER_ASIS = -1,
+
+ __SHPS_DGPU_MP_POWER_START = -1,
+ __SHPS_DGPU_MP_POWER_END = 1,
+};
+
+static int param_dgpu_power_set(const char *val, const struct kernel_param *kp)
+{
+ int power = SHPS_DGPU_MP_POWER_OFF;
+ int status;
+
+ status = kstrtoint(val, 0, &power);
+ if (status)
+ return status;
+
+ if (power < __SHPS_DGPU_MP_POWER_START || power > __SHPS_DGPU_MP_POWER_END)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops param_dgpu_power_ops = {
+ .set = param_dgpu_power_set,
+ .get = param_get_int,
+};
+
+static int param_dgpu_power_init = SHPS_DGPU_MP_POWER_OFF;
+static int param_dgpu_power_exit = SHPS_DGPU_MP_POWER_ON;
+static int param_dgpu_power_susp = SHPS_DGPU_MP_POWER_ASIS;
+static bool param_dtx_latch = true;
+
+module_param_cb(dgpu_power_init, &param_dgpu_power_ops, &param_dgpu_power_init, SHPS_DGPU_PARAM_PERM);
+module_param_cb(dgpu_power_exit, &param_dgpu_power_ops, &param_dgpu_power_exit, SHPS_DGPU_PARAM_PERM);
+module_param_cb(dgpu_power_susp, &param_dgpu_power_ops, &param_dgpu_power_susp, SHPS_DGPU_PARAM_PERM);
+module_param_named(dtx_latch, param_dtx_latch, bool, SHPS_DGPU_PARAM_PERM);
+
+MODULE_PARM_DESC(dgpu_power_init, "dGPU power state to be set on init (0: off / 1: on / 2: as-is, default: off)");
+MODULE_PARM_DESC(dgpu_power_exit, "dGPU power state to be set on exit (0: off / 1: on / 2: as-is, default: on)");
+MODULE_PARM_DESC(dgpu_power_susp, "dGPU power state to be set on exit (0: off / 1: on / 2: as-is, default: as-is)");
+MODULE_PARM_DESC(dtx_latch, "lock/unlock DTX base latch in accordance to power-state (Y/n)");
+
+static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x06,
+ .instance_id = 0x00,
+});
+
+static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
+ .target_category = SSAM_SSH_TC_BAS,
+ .target_id = 0x01,
+ .command_id = 0x07,
+ .instance_id = 0x00,
+});
+
+static int shps_dgpu_dsm_get_pci_addr_from_adr(struct platform_device *pdev, const char *entry) {
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ int status;
+ struct acpi_object_list input;
+ union acpi_object input_args[0];
+ u64 device_addr;
+ u8 bus, dev, fun;
+
+ input.count = 0;
+ input.pointer = input_args;
+
+
+ status = acpi_evaluate_integer(handle, (acpi_string)entry, &input, &device_addr);
+ if (status) {
+ return -ENODEV;
+ }
+
+ bus = 0;
+ dev = (device_addr & 0xFF0000) >> 16;
+ fun = device_addr & 0xFF;
+
+ dev_info(&pdev->dev, "found pci device at bus = %d, dev = %x, fun = %x\n",
+ (u32)bus, (u32)dev, (u32)fun);
+
+ return bus << 8 | PCI_DEVFN(dev, fun);
+}
+
+static int shps_dgpu_dsm_get_pci_addr_from_dsm(struct platform_device *pdev, const char *entry)
+{
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ union acpi_object *result;
+ union acpi_object *e0;
+ union acpi_object *e1;
+ union acpi_object *e2;
+ u64 device_addr = 0;
+ u8 bus, dev, fun;
+ int i;
+
+
+ result = acpi_evaluate_dsm_typed(handle, &SHPS_DSM_UUID, SHPS_DSM_REVISION,
+ SHPS_DSM_GPU_ADDRS, NULL, ACPI_TYPE_PACKAGE);
+
+ if (IS_ERR_OR_NULL(result))
+ return result ? PTR_ERR(result) : -EIO;
+
+ // three entries per device: name, address, <integer>
+ for (i = 0; i + 2 < result->package.count; i += 3) {
+ e0 = &result->package.elements[i];
+ e1 = &result->package.elements[i + 1];
+ e2 = &result->package.elements[i + 2];
+
+ if (e0->type != ACPI_TYPE_STRING) {
+ ACPI_FREE(result);
+ return -EIO;
+ }
+
+ if (e1->type != ACPI_TYPE_INTEGER) {
+ ACPI_FREE(result);
+ return -EIO;
+ }
+
+ if (e2->type != ACPI_TYPE_INTEGER) {
+ ACPI_FREE(result);
+ return -EIO;
+ }
+
+ if (strncmp(e0->string.pointer, entry, 64) == 0)
+ device_addr = e1->integer.value;
+ }
+
+ ACPI_FREE(result);
+ if (device_addr == 0)
+ return -ENODEV;
+
+
+ // convert address
+ bus = (device_addr & 0x0FF00000) >> 20;
+ dev = (device_addr & 0x000F8000) >> 15;
+ fun = (device_addr & 0x00007000) >> 12;
+
+ return bus << 8 | PCI_DEVFN(dev, fun);
+}
+
+static struct pci_dev *shps_dgpu_dsm_get_pci_dev(struct platform_device *pdev)
+{
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ struct pci_dev *dev;
+ int addr;
+
+
+ if (drvdata->hardware_traits.dgpu_rp_pci_address) {
+ addr = shps_dgpu_dsm_get_pci_addr_from_adr(pdev, drvdata->hardware_traits.dgpu_rp_pci_address);
+ } else {
+ addr = shps_dgpu_dsm_get_pci_addr_from_dsm(pdev, SHPS_DSM_GPU_ADDRS_RP);
+ }
+
+ if (addr < 0)
+ return ERR_PTR(addr);
+
+ dev = pci_get_domain_bus_and_slot(0, (addr & 0xFF00) >> 8, addr & 0xFF);
+ return dev ? dev : ERR_PTR(-ENODEV);
+}
+
+
+static int shps_dgpu_dsm_get_power_unlocked(struct platform_device *pdev)
+{
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ struct gpio_desc *gpio = drvdata->gpio_dgpu_power;
+ int status;
+
+ status = gpiod_get_value_cansleep(gpio);
+ if (status < 0)
+ return status;
+
+ return status == 0 ? SHPS_DGPU_POWER_OFF : SHPS_DGPU_POWER_ON;
+}
+
+static int shps_dgpu_dsm_get_power(struct platform_device *pdev)
+{
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ int status;
+
+ mutex_lock(&drvdata->lock);
+ status = shps_dgpu_dsm_get_power_unlocked(pdev);
+ mutex_unlock(&drvdata->lock);
+
+ return status;
+}
+
+static int __shps_dgpu_dsm_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
+{
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ union acpi_object *result;
+ union acpi_object param;
+
+ dev_info(&pdev->dev, "setting dGPU direct power to \'%s\'\n", shps_dgpu_power_str(power));
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = power == SHPS_DGPU_POWER_ON;
+
+ result = acpi_evaluate_dsm_typed(handle, &SHPS_DSM_UUID, SHPS_DSM_REVISION,
+ SHPS_DSM_GPU_POWER, &param, ACPI_TYPE_BUFFER);
+
+ if (IS_ERR_OR_NULL(result))
+ return result ? PTR_ERR(result) : -EIO;
+
+ // check for the expected result
+ if (result->buffer.length != 1 || result->buffer.pointer[0] != 0) {
+ ACPI_FREE(result);
+ return -EIO;
+ }
+
+ ACPI_FREE(result);
+ return 0;
+}
+
+static int shps_dgpu_dsm_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
+{
+ int status;
+
+ if (power != SHPS_DGPU_POWER_ON && power != SHPS_DGPU_POWER_OFF)
+ return -EINVAL;
+
+ status = shps_dgpu_dsm_get_power_unlocked(pdev);
+ if (status < 0)
+ return status;
+ if (status == power)
+ return 0;
+
+ return __shps_dgpu_dsm_set_power_unlocked(pdev, power);
+}
+
+static int shps_dgpu_dsm_set_power(struct platform_device *pdev, enum shps_dgpu_power power)
+{
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ int status;
+
+ mutex_lock(&drvdata->lock);
+ status = shps_dgpu_dsm_set_power_unlocked(pdev, power);
+ mutex_unlock(&drvdata->lock);
+
+ return status;
+}
+
+
+static bool shps_rp_link_up(struct pci_dev *rp)
+{
+ u16 lnksta = 0, sltsta = 0;
+
+ pcie_capability_read_word(rp, PCI_EXP_LNKSTA, &lnksta);
+ pcie_capability_read_word(rp, PCI_EXP_SLTSTA, &sltsta);
+
+ return (lnksta & PCI_EXP_LNKSTA_DLLLA) || (sltsta & PCI_EXP_SLTSTA_PDS);
+}
+
+
+static int shps_dgpu_rp_get_power_unlocked(struct platform_device *pdev)
+{
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ struct pci_dev *rp = drvdata->dgpu_root_port;
+
+ if (rp->current_state == PCI_D3hot || rp->current_state == PCI_D3cold)
+ return SHPS_DGPU_POWER_OFF;
+ else if (rp->current_state == PCI_UNKNOWN || rp->current_state == PCI_POWER_ERROR)
+ return SHPS_DGPU_POWER_UNKNOWN;
+ else
+ return SHPS_DGPU_POWER_ON;
+}
+
+static int shps_dgpu_rp_get_power(struct platform_device *pdev)
+{
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ int status;
+
+ mutex_lock(&drvdata->lock);
+ status = shps_dgpu_rp_get_power_unlocked(pdev);
+ mutex_unlock(&drvdata->lock);
+
+ return status;
+}
+
+static int __shps_dgpu_rp_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
+{
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ struct pci_dev *rp = drvdata->dgpu_root_port;
+ int status, i;
+
+ dev_info(&pdev->dev, "setting dGPU power state to \'%s\'\n", shps_dgpu_power_str(power));
+
+ dbg_dump_drvsta(pdev, "__shps_dgpu_rp_set_power_unlocked.1");
+ if (power == SHPS_DGPU_POWER_ON) {
+ set_bit(SHPS_STATE_BIT_RPPWRON_SYNC, &drvdata->state);
+ pci_set_power_state(rp, PCI_D0);
+
+ if (drvdata->dgpu_root_port_state)
+ pci_load_and_free_saved_state(rp, &drvdata->dgpu_root_port_state);
+
+ pci_restore_state(rp);
+
+ if (!pci_is_enabled(rp))
+ pci_enable_device(rp);
+
+ pci_set_master(rp);
+ clear_bit(SHPS_STATE_BIT_RPPWRON_SYNC, &drvdata->state);
+
+ set_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
+ } else {
+ if (!drvdata->dgpu_root_port_state) {
+ pci_save_state(rp);
+ drvdata->dgpu_root_port_state = pci_store_saved_state(rp);
+ }
+
+ /*
+ * To properly update the hot-plug system we need to "remove" the dGPU
+ * before disabling it and sending it to D3cold. Following this, we
+ * need to wait for the link and slot status to actually change.
+ */
+ status = shps_dgpu_dsm_set_power_unlocked(pdev, SHPS_DGPU_POWER_OFF);
+ if (status)
+ return status;
+
+ for (i = 0; i < 20 && shps_rp_link_up(rp); i++)
+ msleep(50);
+
+ if (shps_rp_link_up(rp))
+ dev_err(&pdev->dev, "dGPU removal via DSM timed out\n");
+
+ pci_clear_master(rp);
+
+ if (pci_is_enabled(rp))
+ pci_disable_device(rp);
+
+ pci_set_power_state(rp, PCI_D3cold);
+
+ clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
+ }
+ dbg_dump_drvsta(pdev, "__shps_dgpu_rp_set_power_unlocked.2");
+
+ return 0;
+}
+
+static int shps_dgpu_rp_set_power_unlocked(struct platform_device *pdev, enum shps_dgpu_power power)
+{
+ int status;
+
+ if (power != SHPS_DGPU_POWER_ON && power != SHPS_DGPU_POWER_OFF)
+ return -EINVAL;
+
+ status = shps_dgpu_rp_get_power_unlocked(pdev);
+ if (status < 0)
+ return status;
+ if (status == power)
+ return 0;
+
+ return __shps_dgpu_rp_set_power_unlocked(pdev, power);
+}
+
+static int shps_dgpu_rp_set_power(struct platform_device *pdev, enum shps_dgpu_power power)
+{
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ int status;
+
+ mutex_lock(&drvdata->lock);
+ status = shps_dgpu_rp_set_power_unlocked(pdev, power);
+ mutex_unlock(&drvdata->lock);
+
+ return status;
+}
+
+
+static int shps_dgpu_set_power(struct platform_device *pdev, enum shps_dgpu_power power)
+{
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ int status;
+
+ if (!param_dtx_latch)
+ return shps_dgpu_rp_set_power(pdev, power);
+
+ if (power == SHPS_DGPU_POWER_ON) {
+ status = ssam_bas_latch_lock(drvdata->ctrl);
+ if (status)
+ return status;
+
+ status = shps_dgpu_rp_set_power(pdev, power);
+ if (status)
+ ssam_bas_latch_unlock(drvdata->ctrl);
+
+ } else {
+ status = shps_dgpu_rp_set_power(pdev, power);
+ if (status)
+ return status;
+
+ status = ssam_bas_latch_unlock(drvdata->ctrl);
+ }
+
+ return status;
+}
+
+
+static int shps_dgpu_is_present(struct platform_device *pdev)
+{
+ struct shps_driver_data *drvdata;
+
+ drvdata = platform_get_drvdata(pdev);
+ return gpiod_get_value_cansleep(drvdata->gpio_dgpu_presence);
+}
+
+
+static ssize_t dgpu_power_show(struct device *dev, struct device_attribute *attr, char *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int power = shps_dgpu_rp_get_power(pdev);
+
+ if (power < 0)
+ return power;
+
+ return sprintf(data, "%s\n", shps_dgpu_power_str(power));
+}
+
+static ssize_t dgpu_power_store(struct device *dev, struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ enum shps_dgpu_power power;
+ bool b = false;
+ int status;
+
+ status = kstrtobool(data, &b);
+ if (status)
+ return status;
+
+ status = shps_dgpu_is_present(pdev);
+ if (status <= 0)
+ return status < 0 ? status : -EPERM;
+
+ power = b ? SHPS_DGPU_POWER_ON : SHPS_DGPU_POWER_OFF;
+ status = shps_dgpu_set_power(pdev, power);
+
+ return status < 0 ? status : count;
+}
+
+static ssize_t dgpu_power_dsm_show(struct device *dev, struct device_attribute *attr, char *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int power = shps_dgpu_dsm_get_power(pdev);
+
+ if (power < 0)
+ return power;
+
+ return sprintf(data, "%s\n", shps_dgpu_power_str(power));
+}
+
+static ssize_t dgpu_power_dsm_store(struct device *dev, struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ enum shps_dgpu_power power;
+ bool b = false;
+ int status;
+
+ status = kstrtobool(data, &b);
+ if (status)
+ return status;
+
+ status = shps_dgpu_is_present(pdev);
+ if (status <= 0)
+ return status < 0 ? status : -EPERM;
+
+ power = b ? SHPS_DGPU_POWER_ON : SHPS_DGPU_POWER_OFF;
+ status = shps_dgpu_dsm_set_power(pdev, power);
+
+ return status < 0 ? status : count;
+}
+
+static DEVICE_ATTR_RW(dgpu_power);
+static DEVICE_ATTR_RW(dgpu_power_dsm);
+
+static struct attribute *shps_power_attrs[] = {
+ &dev_attr_dgpu_power.attr,
+ &dev_attr_dgpu_power_dsm.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(shps_power);
+
+
+static void dbg_dump_power_states(struct platform_device *pdev, const char *prefix)
+{
+ enum shps_dgpu_power power_dsm;
+ enum shps_dgpu_power power_rp;
+ int status;
+
+ status = shps_dgpu_rp_get_power_unlocked(pdev);
+ if (status < 0)
+ dev_err(&pdev->dev, "%s: failed to get root-port power state: %d\n", prefix, status);
+ power_rp = status;
+
+ status = shps_dgpu_rp_get_power_unlocked(pdev);
+ if (status < 0)
+ dev_err(&pdev->dev, "%s: failed to get direct power state: %d\n", prefix, status);
+ power_dsm = status;
+
+ dev_dbg(&pdev->dev, "%s: root-port power state: %d\n", prefix, power_rp);
+ dev_dbg(&pdev->dev, "%s: direct power state: %d\n", prefix, power_dsm);
+}
+
+static void dbg_dump_pciesta(struct platform_device *pdev, const char *prefix)
+{
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ struct pci_dev *rp = drvdata->dgpu_root_port;
+ u16 lnksta, lnksta2, sltsta, sltsta2;
+
+ pcie_capability_read_word(rp, PCI_EXP_LNKSTA, &lnksta);
+ pcie_capability_read_word(rp, PCI_EXP_LNKSTA2, &lnksta2);
+ pcie_capability_read_word(rp, PCI_EXP_SLTSTA, &sltsta);
+ pcie_capability_read_word(rp, PCI_EXP_SLTSTA2, &sltsta2);
+
+ dev_dbg(&pdev->dev, "%s: LNKSTA: 0x%04x\n", prefix, lnksta);
+ dev_dbg(&pdev->dev, "%s: LNKSTA2: 0x%04x\n", prefix, lnksta2);
+ dev_dbg(&pdev->dev, "%s: SLTSTA: 0x%04x\n", prefix, sltsta);
+ dev_dbg(&pdev->dev, "%s: SLTSTA2: 0x%04x\n", prefix, sltsta2);
+}
+
+static void dbg_dump_drvsta(struct platform_device *pdev, const char *prefix)
+{
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ struct pci_dev *rp = drvdata->dgpu_root_port;
+
+ dev_dbg(&pdev->dev, "%s: RP power: %d\n", prefix, rp->current_state);
+ dev_dbg(&pdev->dev, "%s: RP state saved: %d\n", prefix, rp->state_saved);
+ dev_dbg(&pdev->dev, "%s: RP state stored: %d\n", prefix, !!drvdata->dgpu_root_port_state);
+ dev_dbg(&pdev->dev, "%s: RP enabled: %d\n", prefix, atomic_read(&rp->enable_cnt));
+ dev_dbg(&pdev->dev, "%s: RP mastered: %d\n", prefix, rp->is_busmaster);
+}
+
+static int shps_pm_prepare(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ bool pwrtgt;
+ int status = 0;
+
+ dbg_dump_power_states(pdev, "shps_pm_prepare");
+
+ if (param_dgpu_power_susp != SHPS_DGPU_MP_POWER_ASIS) {
+ pwrtgt = test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
+
+ status = shps_dgpu_set_power(pdev, param_dgpu_power_susp);
+ if (status) {
+ dev_err(&pdev->dev, "failed to power %s dGPU: %d\n",
+ param_dgpu_power_susp == SHPS_DGPU_MP_POWER_OFF ? "off" : "on",
+ status);
+ return status;
+ }
+
+ if (pwrtgt)
+ set_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
+ else
+ clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
+ }
+
+ return 0;
+}
+
+static void shps_pm_complete(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ int status;
+
+ dbg_dump_power_states(pdev, "shps_pm_complete");
+ dbg_dump_pciesta(pdev, "shps_pm_complete");
+ dbg_dump_drvsta(pdev, "shps_pm_complete.1");
+
+ // update power target, dGPU may have been detached while suspended
+ status = shps_dgpu_is_present(pdev);
+ if (status < 0) {
+ dev_err(&pdev->dev, "failed to get dGPU presence: %d\n", status);
+ return;
+ } else if (status == 0) {
+ clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
+ }
+
+ /*
+ * During resume, the PCIe core will power on the root-port, which in turn
+ * will power on the dGPU. Most of the state synchronization is already
+ * handled via the SAN RQSG handler, so it is in a fully consistent
+ * on-state here. If requested, turn it off here.
+ *
+ * As there seem to be some synchronization issues turning off the dGPU
+ * directly after the power-on SAN RQSG notification during the resume
+ * process, let's do this here.
+ *
+ * TODO/FIXME:
+ * This does not combat unhandled power-ons when the device is not fully
+ * resumed, i.e. re-suspended before shps_pm_complete is called. Those
+ * should normally not be an issue, but the dGPU does get hot even though
+ * it is suspended, so ideally we want to keep it off.
+ */
+ if (!test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state)) {
+ status = shps_dgpu_set_power(pdev, SHPS_DGPU_POWER_OFF);
+ if (status)
+ dev_err(&pdev->dev, "failed to power-off dGPU: %d\n", status);
+ }
+
+ dbg_dump_drvsta(pdev, "shps_pm_complete.2");
+}
+
+static int shps_pm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ int status;
+
+ if (device_may_wakeup(dev)) {
+ status = enable_irq_wake(drvdata->irq_base_presence);
+ if (status)
+ return status;
+
+ set_bit(SHPS_STATE_BIT_WAKE_ENABLED, &drvdata->state);
+ }
+
+ return 0;
+}
+
+static int shps_pm_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ int status = 0;
+
+ if (test_and_clear_bit(SHPS_STATE_BIT_WAKE_ENABLED, &drvdata->state))
+ status = disable_irq_wake(drvdata->irq_base_presence);
+
+ return status;
+}
+
+static void shps_shutdown(struct platform_device *pdev)
+{
+ int status;
+
+ /*
+ * Turn on dGPU before shutting down. This allows the core drivers to
+ * properly shut down the device. If we don't do this, the pcieport driver
+ * will complain that the device has already been disabled.
+ */
+ status = shps_dgpu_set_power(pdev, SHPS_DGPU_POWER_ON);
+ if (status)
+ dev_err(&pdev->dev, "failed to turn on dGPU: %d\n", status);
+}
+
+static int shps_dgpu_detached(struct platform_device *pdev)
+{
+ dbg_dump_power_states(pdev, "shps_dgpu_detached");
+ return shps_dgpu_set_power(pdev, SHPS_DGPU_POWER_OFF);
+}
+
+static int shps_dgpu_attached(struct platform_device *pdev)
+{
+ dbg_dump_power_states(pdev, "shps_dgpu_attached");
+ return 0;
+}
+
+static int shps_dgpu_powered_on(struct platform_device *pdev)
+{
+ /*
+ * This function gets called directly after a power-state transition of
+ * the dGPU root port out of D3cold state, indicating a power-on of the
+ * dGPU. Specifically, this function is called from the RQSG handler of
+ * SAN, invoked by the ACPI _ON method of the dGPU root port. This means
+ * that this function is run inside `pci_set_power_state(rp, ...)`
+ * synchronously and thus returns before the `pci_set_power_state` call
+ * does.
+ *
+ * `pci_set_power_state` may either be called by us or when the PCI
+ * subsystem decides to power up the root port (e.g. during resume). Thus
+ * we should use this function to ensure that the dGPU and root port
+ * states are consistent when an unexpected power-up is encountered.
+ */
+
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ struct pci_dev *rp = drvdata->dgpu_root_port;
+ int status;
+
+ dbg_dump_drvsta(pdev, "shps_dgpu_powered_on.1");
+
+ // if we caused the root port to power-on, return
+ if (test_bit(SHPS_STATE_BIT_RPPWRON_SYNC, &drvdata->state))
+ return 0;
+
+ // if dGPU is not present, force power-target to off and return
+ status = shps_dgpu_is_present(pdev);
+ if (status == 0)
+ clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
+ if (status <= 0)
+ return status;
+
+ mutex_lock(&drvdata->lock);
+
+ dbg_dump_power_states(pdev, "shps_dgpu_powered_on.1");
+ dbg_dump_pciesta(pdev, "shps_dgpu_powered_on.1");
+ if (drvdata->dgpu_root_port_state)
+ pci_load_and_free_saved_state(rp, &drvdata->dgpu_root_port_state);
+ pci_restore_state(rp);
+ if (!pci_is_enabled(rp))
+ pci_enable_device(rp);
+ pci_set_master(rp);
+ dbg_dump_drvsta(pdev, "shps_dgpu_powered_on.2");
+ dbg_dump_power_states(pdev, "shps_dgpu_powered_on.2");
+ dbg_dump_pciesta(pdev, "shps_dgpu_powered_on.2");
+
+ mutex_unlock(&drvdata->lock);
+
+ if (!test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state)) {
+ dev_warn(&pdev->dev, "unexpected dGPU power-on detected\n");
+ // TODO: schedule state re-check and update
+ }
+
+ return 0;
+}
+
+static int shps_dgpu_handle_rqsg(struct surface_sam_san_rqsg *rqsg, void *data)
+{
+ struct platform_device *pdev = data;
+
+ if (rqsg->tc == SAM_DGPU_TC && rqsg->cid == SAM_DGPU_CID_POWERON)
+ return shps_dgpu_powered_on(pdev);
+
+ dev_warn(&pdev->dev, "unimplemented dGPU request: RQSG(0x%02x, 0x%02x, 0x%02x)\n",
+ rqsg->tc, rqsg->cid, rqsg->iid);
+ return 0;
+}
+
+static irqreturn_t shps_dgpu_presence_irq(int irq, void *data)
+{
+ struct platform_device *pdev = data;
+ bool dgpu_present;
+ int status;
+
+ status = shps_dgpu_is_present(pdev);
+ if (status < 0) {
+ dev_err(&pdev->dev, "failed to check physical dGPU presence: %d\n", status);
+ return IRQ_HANDLED;
+ }
+
+ dgpu_present = status != 0;
+ dev_info(&pdev->dev, "dGPU physically %s\n", dgpu_present ? "attached" : "detached");
+
+ if (dgpu_present)
+ status = shps_dgpu_attached(pdev);
+ else
+ status = shps_dgpu_detached(pdev);
+
+ if (status)
+ dev_err(&pdev->dev, "error handling dGPU interrupt: %d\n", status);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t shps_base_presence_irq(int irq, void *data)
+{
+ return IRQ_HANDLED; // nothing to do, just wake
+}
+
+
+static int shps_gpios_setup(struct platform_device *pdev)
+{
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ struct gpio_desc *gpio_dgpu_power;
+ struct gpio_desc *gpio_dgpu_presence;
+ struct gpio_desc *gpio_base_presence;
+ int status;
+
+ // get GPIOs
+ gpio_dgpu_power = devm_gpiod_get(&pdev->dev, "dgpu_power", GPIOD_IN);
+ if (IS_ERR(gpio_dgpu_power)) {
+ status = PTR_ERR(gpio_dgpu_power);
+ goto err_out;
+ }
+
+ gpio_dgpu_presence = devm_gpiod_get(&pdev->dev, "dgpu_presence", GPIOD_IN);
+ if (IS_ERR(gpio_dgpu_presence)) {
+ status = PTR_ERR(gpio_dgpu_presence);
+ goto err_out;
+ }
+
+ gpio_base_presence = devm_gpiod_get(&pdev->dev, "base_presence", GPIOD_IN);
+ if (IS_ERR(gpio_base_presence)) {
+ status = PTR_ERR(gpio_base_presence);
+ goto err_out;
+ }
+
+ // export GPIOs
+ status = gpiod_export(gpio_dgpu_power, false);
+ if (status)
+ goto err_out;
+
+ status = gpiod_export(gpio_dgpu_presence, false);
+ if (status)
+ goto err_export_dgpu_presence;
+
+ status = gpiod_export(gpio_base_presence, false);
+ if (status)
+ goto err_export_base_presence;
+
+ // create sysfs links
+ status = gpiod_export_link(&pdev->dev, "gpio-dgpu_power", gpio_dgpu_power);
+ if (status)
+ goto err_link_dgpu_power;
+
+ status = gpiod_export_link(&pdev->dev, "gpio-dgpu_presence", gpio_dgpu_presence);
+ if (status)
+ goto err_link_dgpu_presence;
+
+ status = gpiod_export_link(&pdev->dev, "gpio-base_presence", gpio_base_presence);
+ if (status)
+ goto err_link_base_presence;
+
+ drvdata->gpio_dgpu_power = gpio_dgpu_power;
+ drvdata->gpio_dgpu_presence = gpio_dgpu_presence;
+ drvdata->gpio_base_presence = gpio_base_presence;
+ return 0;
+
+err_link_base_presence:
+ sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_presence");
+err_link_dgpu_presence:
+ sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_power");
+err_link_dgpu_power:
+ gpiod_unexport(gpio_base_presence);
+err_export_base_presence:
+ gpiod_unexport(gpio_dgpu_presence);
+err_export_dgpu_presence:
+ gpiod_unexport(gpio_dgpu_power);
+err_out:
+ return status;
+}
+
+static void shps_gpios_remove(struct platform_device *pdev)
+{
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+
+ sysfs_remove_link(&pdev->dev.kobj, "gpio-base_presence");
+ sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_presence");
+ sysfs_remove_link(&pdev->dev.kobj, "gpio-dgpu_power");
+ gpiod_unexport(drvdata->gpio_base_presence);
+ gpiod_unexport(drvdata->gpio_dgpu_presence);
+ gpiod_unexport(drvdata->gpio_dgpu_power);
+}
+
+static int shps_gpios_setup_irq(struct platform_device *pdev)
+{
+ const int irqf_dgpu = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+ const int irqf_base = IRQF_SHARED;
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ int status;
+
+ status = gpiod_to_irq(drvdata->gpio_base_presence);
+ if (status < 0)
+ return status;
+ drvdata->irq_base_presence = status;
+
+ status = gpiod_to_irq(drvdata->gpio_dgpu_presence);
+ if (status < 0)
+ return status;
+ drvdata->irq_dgpu_presence = status;
+
+ status = request_irq(drvdata->irq_base_presence,
+ shps_base_presence_irq, irqf_base,
+ "shps_base_presence_irq", pdev);
+ if (status) {
+ dev_err(&pdev->dev, "base irq failed: %d\n", status);
+ return status;
+ }
+
+ status = request_threaded_irq(drvdata->irq_dgpu_presence,
+ NULL, shps_dgpu_presence_irq, irqf_dgpu,
+ "shps_dgpu_presence_irq", pdev);
+ if (status) {
+ free_irq(drvdata->irq_base_presence, pdev);
+ return status;
+ }
+
+ return 0;
+}
+
+static void shps_gpios_remove_irq(struct platform_device *pdev)
+{
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+
+ free_irq(drvdata->irq_base_presence, pdev);
+ free_irq(drvdata->irq_dgpu_presence, pdev);
+}
+
+static void shps_sgcp_notify(acpi_handle device, u32 value, void *context) {
+ struct platform_device *pdev = context;
+ switch (value) {
+ case ACPI_SGCP_NOTIFY_POWER_ON:
+ shps_dgpu_powered_on(pdev);
+ }
+}
+
+static int shps_start_sgcp_notification(struct platform_device *pdev, acpi_handle *sgpc_handle) {
+ acpi_handle handle;
+ int status;
+
+ status = acpi_get_handle(NULL, "\\_SB.SGPC", &handle);
+ if (status) {
+ dev_err(&pdev->dev, "error in get_handle %d\n", status);
+ return status;
+ }
+
+ status = acpi_install_notify_handler(handle, ACPI_DEVICE_NOTIFY, shps_sgcp_notify, pdev);
+ if (status) {
+ dev_err(&pdev->dev, "error in install notify %d\n", status);
+ *sgpc_handle = NULL;
+ return status;
+ }
+
+ *sgpc_handle = handle;
+ return 0;
+}
+
+static void shps_remove_sgcp_notification(struct platform_device *pdev) {
+ int status;
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+
+ if (drvdata->sgpc_handle) {
+ status = acpi_remove_notify_handler(drvdata->sgpc_handle, ACPI_DEVICE_NOTIFY, shps_sgcp_notify);
+ if (status) {
+ dev_err(&pdev->dev, "failed to remove notify handler: %d\n", status);
+ }
+ }
+}
+
+static struct shps_hardware_traits shps_detect_hardware_traits(struct platform_device *pdev) {
+ const struct shps_hardware_probe *p;
+
+ for (p = shps_hardware_probe_match; p->hardware_id; ++p) {
+ if (acpi_dev_present(p->hardware_id, NULL, -1)) {
+ break;
+ }
+ }
+
+ dev_info(&pdev->dev,
+ "shps_detect_hardware_traits found device %s, generation %d\n",
+ p->hardware_id ? p->hardware_id : "SAN (default)",
+ p->generation);
+
+ return *p->hardware_traits;
+}
+
+static int shps_probe(struct platform_device *pdev)
+{
+ struct acpi_device *shps_dev = ACPI_COMPANION(&pdev->dev);
+ struct shps_driver_data *drvdata;
+ struct ssam_controller *ctrl;
+ struct device_link *link;
+ int power, status;
+ struct shps_hardware_traits detected_traits;
+
+ if (gpiod_count(&pdev->dev, NULL) < 0) {
+ dev_err(&pdev->dev, "gpiod_count returned < 0\n");
+ return -ENODEV;
+ }
+
+ // link to SSH
+ status = ssam_client_bind(&pdev->dev, &ctrl);
+ if (status) {
+ return status == -ENXIO ? -EPROBE_DEFER : status;
+ }
+
+ // detect what kind of hardware we're running
+ detected_traits = shps_detect_hardware_traits(pdev);
+
+ if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
+ // link to SAN
+ status = surface_sam_san_consumer_register(&pdev->dev, 0);
+ if (status) {
+ dev_err(&pdev->dev, "failed to register with san consumer: %d\n", status);
+ return status == -ENXIO ? -EPROBE_DEFER : status;
+ }
+ }
+
+ status = acpi_dev_add_driver_gpios(shps_dev, shps_acpi_gpios);
+ if (status) {
+ dev_err(&pdev->dev, "failed to add gpios: %d\n", status);
+ return status;
+ }
+
+ drvdata = kzalloc(sizeof(struct shps_driver_data), GFP_KERNEL);
+ if (!drvdata) {
+ status = -ENOMEM;
+ goto err_drvdata;
+ }
+ mutex_init(&drvdata->lock);
+ platform_set_drvdata(pdev, drvdata);
+
+ drvdata->ctrl = ctrl;
+ drvdata->hardware_traits = detected_traits;
+
+ drvdata->dgpu_root_port = shps_dgpu_dsm_get_pci_dev(pdev);
+ if (IS_ERR(drvdata->dgpu_root_port)) {
+ status = PTR_ERR(drvdata->dgpu_root_port);
+ dev_err(&pdev->dev, "failed to get pci dev: %d\n", status);
+ goto err_rp_lookup;
+ }
+
+ status = shps_gpios_setup(pdev);
+ if (status) {
+ dev_err(&pdev->dev, "unable to set up gpios, %d\n", status);
+ goto err_gpio;
+ }
+
+ status = shps_gpios_setup_irq(pdev);
+ if (status) {
+ dev_err(&pdev->dev, "unable to set up irqs %d\n", status);
+ goto err_gpio_irqs;
+ }
+
+ status = device_add_groups(&pdev->dev, shps_power_groups);
+ if (status)
+ goto err_devattr;
+
+ link = device_link_add(&pdev->dev, &drvdata->dgpu_root_port->dev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!link)
+ goto err_devlink;
+
+ if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
+ status = surface_sam_san_set_rqsg_handler(shps_dgpu_handle_rqsg, pdev);
+ if (status) {
+ dev_err(&pdev->dev, "unable to set SAN notification handler (%d)\n", status);
+ goto err_devlink;
+ }
+ } else if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) {
+ status = shps_start_sgcp_notification(pdev, &drvdata->sgpc_handle);
+ if (status) {
+ dev_err(&pdev->dev, "unable to install SGCP notification handler (%d)\n", status);
+ goto err_devlink;
+ }
+ }
+
+ // if dGPU is not present turn-off root-port, else obey module param
+ status = shps_dgpu_is_present(pdev);
+ if (status < 0)
+ goto err_post_notification;
+
+ power = status == 0 ? SHPS_DGPU_POWER_OFF : param_dgpu_power_init;
+ if (power != SHPS_DGPU_MP_POWER_ASIS) {
+ status = shps_dgpu_set_power(pdev, power);
+ if (status)
+ goto err_post_notification;
+ }
+
+ // initialize power target
+ status = shps_dgpu_rp_get_power(pdev);
+ if (status < 0)
+ goto err_pwrtgt;
+
+ if (status)
+ set_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
+ else
+ clear_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state);
+
+ device_init_wakeup(&pdev->dev, true);
+ return 0;
+
+err_pwrtgt:
+ if (param_dgpu_power_exit != SHPS_DGPU_MP_POWER_ASIS) {
+ status = shps_dgpu_set_power(pdev, param_dgpu_power_exit);
+ if (status)
+ dev_err(&pdev->dev, "failed to set dGPU power state: %d\n", status);
+ }
+err_post_notification:
+ if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) {
+ shps_remove_sgcp_notification(pdev);
+ } else if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
+ surface_sam_san_set_rqsg_handler(NULL, NULL);
+ }
+err_devlink:
+ device_remove_groups(&pdev->dev, shps_power_groups);
+err_devattr:
+ shps_gpios_remove_irq(pdev);
+err_gpio_irqs:
+ shps_gpios_remove(pdev);
+err_gpio:
+ pci_dev_put(drvdata->dgpu_root_port);
+err_rp_lookup:
+ platform_set_drvdata(pdev, NULL);
+ kfree(drvdata);
+err_drvdata:
+ acpi_dev_remove_driver_gpios(shps_dev);
+ return status;
+}
+
+static int shps_remove(struct platform_device *pdev)
+{
+ struct acpi_device *shps_dev = ACPI_COMPANION(&pdev->dev);
+ struct shps_driver_data *drvdata = platform_get_drvdata(pdev);
+ int status;
+
+ if (param_dgpu_power_exit != SHPS_DGPU_MP_POWER_ASIS) {
+ status = shps_dgpu_set_power(pdev, param_dgpu_power_exit);
+ if (status)
+ dev_err(&pdev->dev, "failed to set dGPU power state: %d\n", status);
+ }
+
+ device_set_wakeup_capable(&pdev->dev, false);
+
+ if (drvdata->hardware_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) {
+ shps_remove_sgcp_notification(pdev);
+ } else if (drvdata->hardware_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) {
+ surface_sam_san_set_rqsg_handler(NULL, NULL);
+ }
+ device_remove_groups(&pdev->dev, shps_power_groups);
+ shps_gpios_remove_irq(pdev);
+ shps_gpios_remove(pdev);
+ pci_dev_put(drvdata->dgpu_root_port);
+ platform_set_drvdata(pdev, NULL);
+ kfree(drvdata);
+
+ acpi_dev_remove_driver_gpios(shps_dev);
+ return 0;
+}
+
+
+static const struct dev_pm_ops shps_pm_ops = {
+ .prepare = shps_pm_prepare,
+ .complete = shps_pm_complete,
+ .suspend = shps_pm_suspend,
+ .resume = shps_pm_resume,
+};
+
+static const struct acpi_device_id shps_acpi_match[] = {
+ { "MSHW0153", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, shps_acpi_match);
+
+static struct platform_driver surface_sam_hps = {
+ .probe = shps_probe,
+ .remove = shps_remove,
+ .shutdown = shps_shutdown,
+ .driver = {
+ .name = "surface_dgpu_hps",
+ .acpi_match_table = shps_acpi_match,
+ .pm = &shps_pm_ops,
+ },
+};
+
+module_platform_driver(surface_sam_hps);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Surface Hot-Plug System (HPS) and dGPU power-state Driver for Surface Book 2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/surface_sam/clients/surface_sam_san.c b/drivers/misc/surface_sam/clients/surface_sam_san.c
new file mode 100644
index 0000000000000..99a5401d7d58a
--- /dev/null
+++ b/drivers/misc/surface_sam/clients/surface_sam_san.c
@@ -0,0 +1,930 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Surface ACPI Notify (SAN) and ACPI integration driver for SAM.
+ * Translates communication from ACPI to SSH and back.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <linux/surface_aggregator_module.h>
+#include "surface_sam_san.h"
+
+
+#define SAN_RQST_RETRY 5
+
+#define SAN_DSM_REVISION 0
+#define SAN_DSM_FN_NOTIFY_SENSOR_TRIP_POINT 0x09
+
+static const guid_t SAN_DSM_UUID =
+ GUID_INIT(0x93b666c5, 0x70c6, 0x469f, 0xa2, 0x15, 0x3d,
+ 0x48, 0x7c, 0x91, 0xab, 0x3c);
+
+#define SAM_EVENT_DELAY_PWR_ADAPTER msecs_to_jiffies(5000)
+#define SAM_EVENT_DELAY_PWR_BST msecs_to_jiffies(2500)
+
+#define SAM_EVENT_PWR_CID_BIX 0x15
+#define SAM_EVENT_PWR_CID_BST 0x16
+#define SAM_EVENT_PWR_CID_ADAPTER 0x17
+#define SAM_EVENT_PWR_CID_DPTF 0x4f
+
+#define SAM_EVENT_TEMP_CID_NOTIFY_SENSOR_TRIP_POINT 0x0b
+
+
+struct san_acpi_consumer {
+ char *path;
+ bool required;
+ u32 flags;
+};
+
+struct san_handler_data {
+ struct acpi_connection_info info; // must be first
+};
+
+struct san_consumer_link {
+ const struct san_acpi_consumer *properties;
+ struct device_link *link;
+};
+
+struct san_consumers {
+ u32 num;
+ struct san_consumer_link *links;
+};
+
+struct san_data {
+ struct device *dev;
+ struct ssam_controller *ctrl;
+
+ struct san_handler_data context;
+ struct san_consumers consumers;
+
+ struct ssam_event_notifier nf_bat;
+ struct ssam_event_notifier nf_tmp;
+};
+
+#define to_san_data(ptr, member) \
+ container_of(ptr, struct san_data, member)
+
+struct san_event_work {
+ struct delayed_work work;
+ struct device *dev;
+ struct ssam_event event; // must be last
+};
+
+struct gsb_data_in {
+ u8 cv;
+} __packed;
+
+struct gsb_data_rqsx {
+ u8 cv; // command value (should be 0x01 or 0x03)
+ u8 tc; // target controller
+ u8 tid; // transport channnel ID
+ u8 iid; // target sub-controller (e.g. primary vs. secondary battery)
+ u8 snc; // expect-response-flag
+ u8 cid; // command ID
+ u16 cdl; // payload length
+ u8 pld[0]; // payload
+} __packed;
+
+struct gsb_data_etwl {
+ u8 cv; // command value (should be 0x02)
+ u8 etw3; // ?
+ u8 etw4; // ?
+ u8 msg[0]; // error message (ASCIIZ)
+} __packed;
+
+struct gsb_data_out {
+ u8 status; // _SSH communication status
+ u8 len; // _SSH payload length
+ u8 pld[0]; // _SSH payload
+} __packed;
+
+union gsb_buffer_data {
+ struct gsb_data_in in; // common input
+ struct gsb_data_rqsx rqsx; // RQSX input
+ struct gsb_data_etwl etwl; // ETWL input
+ struct gsb_data_out out; // output
+};
+
+struct gsb_buffer {
+ u8 status; // GSB AttribRawProcess status
+ u8 len; // GSB AttribRawProcess length
+ union gsb_buffer_data data;
+} __packed;
+
+#define SAN_GSB_MAX_RQSX_PAYLOAD (U8_MAX - 2 - sizeof(struct gsb_data_rqsx))
+#define SAN_GSB_MAX_RESPONSE (U8_MAX - 2 - sizeof(struct gsb_data_out))
+
+#define san_request_sync_onstack(ctrl, rqst, rsp) \
+ ssam_request_sync_onstack(ctrl, rqst, rsp, SAN_GSB_MAX_RQSX_PAYLOAD)
+
+
+enum san_pwr_event {
+ SAN_PWR_EVENT_BAT1_STAT = 0x03,
+ SAN_PWR_EVENT_BAT1_INFO = 0x04,
+ SAN_PWR_EVENT_ADP1_STAT = 0x05,
+ SAN_PWR_EVENT_ADP1_INFO = 0x06,
+ SAN_PWR_EVENT_BAT2_STAT = 0x07,
+ SAN_PWR_EVENT_BAT2_INFO = 0x08,
+ SAN_PWR_EVENT_DPTF = 0x0A,
+};
+
+
+static int sam_san_default_rqsg_handler(struct surface_sam_san_rqsg *rqsg, void *data);
+
+struct sam_san_rqsg_if {
+ struct mutex lock;
+ struct device *san_dev;
+ surface_sam_san_rqsg_handler_fn handler;
+ void *handler_data;
+};
+
+static struct sam_san_rqsg_if rqsg_if = {
+ .lock = __MUTEX_INITIALIZER(rqsg_if.lock),
+ .san_dev = NULL,
+ .handler = sam_san_default_rqsg_handler,
+ .handler_data = NULL,
+};
+
+int surface_sam_san_consumer_register(struct device *consumer, u32 flags)
+{
+ const u32 valid = DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE;
+ int status;
+
+ if ((flags | valid) != valid)
+ return -EINVAL;
+
+ flags |= DL_FLAG_AUTOREMOVE_CONSUMER;
+
+ mutex_lock(&rqsg_if.lock);
+ if (rqsg_if.san_dev)
+ status = device_link_add(consumer, rqsg_if.san_dev, flags) ? 0 : -EINVAL;
+ else
+ status = -ENXIO;
+ mutex_unlock(&rqsg_if.lock);
+ return status;
+}
+EXPORT_SYMBOL_GPL(surface_sam_san_consumer_register);
+
+int surface_sam_san_set_rqsg_handler(surface_sam_san_rqsg_handler_fn fn, void *data)
+{
+ int status = -EBUSY;
+
+ mutex_lock(&rqsg_if.lock);
+
+ if (rqsg_if.handler == sam_san_default_rqsg_handler || !fn) {
+ rqsg_if.handler = fn ? fn : sam_san_default_rqsg_handler;
+ rqsg_if.handler_data = fn ? data : NULL;
+ status = 0;
+ }
+
+ mutex_unlock(&rqsg_if.lock);
+ return status;
+}
+EXPORT_SYMBOL_GPL(surface_sam_san_set_rqsg_handler);
+
+int san_call_rqsg_handler(struct surface_sam_san_rqsg *rqsg)
+{
+ int status;
+
+ mutex_lock(&rqsg_if.lock);
+ status = rqsg_if.handler(rqsg, rqsg_if.handler_data);
+ mutex_unlock(&rqsg_if.lock);
+
+ return status;
+}
+
+static int sam_san_default_rqsg_handler(struct surface_sam_san_rqsg *rqsg, void *data)
+{
+ struct device *dev = rqsg_if.san_dev;
+
+ dev_warn(dev, "unhandled request: RQSG(0x%02x, 0x%02x, 0x%02x)\n",
+ rqsg->tc, rqsg->cid, rqsg->iid);
+
+ return 0;
+}
+
+
+static bool san_acpi_can_notify(struct device *dev, u64 func)
+{
+ acpi_handle san = ACPI_HANDLE(dev);
+ return acpi_check_dsm(san, &SAN_DSM_UUID, SAN_DSM_REVISION, 1 << func);
+}
+
+static int san_acpi_notify_power_event(struct device *dev, enum san_pwr_event event)
+{
+ acpi_handle san = ACPI_HANDLE(dev);
+ union acpi_object *obj;
+
+ if (!san_acpi_can_notify(dev, event))
+ return 0;
+
+ dev_dbg(dev, "notify power event 0x%02x\n", event);
+ obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION,
+ event, NULL, ACPI_TYPE_BUFFER);
+
+ if (IS_ERR_OR_NULL(obj))
+ return obj ? PTR_ERR(obj) : -ENXIO;
+
+ if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) {
+ dev_err(dev, "got unexpected result from _DSM\n");
+ return -EFAULT;
+ }
+
+ ACPI_FREE(obj);
+ return 0;
+}
+
+static int san_acpi_notify_sensor_trip_point(struct device *dev, u8 iid)
+{
+ acpi_handle san = ACPI_HANDLE(dev);
+ union acpi_object *obj;
+ union acpi_object param;
+
+ if (!san_acpi_can_notify(dev, SAN_DSM_FN_NOTIFY_SENSOR_TRIP_POINT))
+ return 0;
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = iid;
+
+ obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION,
+ SAN_DSM_FN_NOTIFY_SENSOR_TRIP_POINT,
+ &param, ACPI_TYPE_BUFFER);
+
+ if (IS_ERR_OR_NULL(obj))
+ return obj ? PTR_ERR(obj) : -ENXIO;
+
+ if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) {
+ dev_err(dev, "got unexpected result from _DSM\n");
+ return -EFAULT;
+ }
+
+ ACPI_FREE(obj);
+ return 0;
+}
+
+
+static inline int san_evt_power_adapter(struct device *dev, const struct ssam_event *event)
+{
+ int status;
+
+ status = san_acpi_notify_power_event(dev, SAN_PWR_EVENT_ADP1_STAT);
+ if (status)
+ return status;
+
+ /*
+ * Enusre that the battery states get updated correctly.
+ * When the battery is fully charged and an adapter is plugged in, it
+ * sometimes is not updated correctly, instead showing it as charging.
+ * Explicitly trigger battery updates to fix this.
+ */
+
+ status = san_acpi_notify_power_event(dev, SAN_PWR_EVENT_BAT1_STAT);
+ if (status)
+ return status;
+
+ return san_acpi_notify_power_event(dev, SAN_PWR_EVENT_BAT2_STAT);
+}
+
+static inline int san_evt_power_bix(struct device *dev, const struct ssam_event *event)
+{
+ enum san_pwr_event evcode;
+
+ if (event->instance_id == 0x02)
+ evcode = SAN_PWR_EVENT_BAT2_INFO;
+ else
+ evcode = SAN_PWR_EVENT_BAT1_INFO;
+
+ return san_acpi_notify_power_event(dev, evcode);
+}
+
+static inline int san_evt_power_bst(struct device *dev, const struct ssam_event *event)
+{
+ enum san_pwr_event evcode;
+
+ if (event->instance_id == 0x02)
+ evcode = SAN_PWR_EVENT_BAT2_STAT;
+ else
+ evcode = SAN_PWR_EVENT_BAT1_STAT;
+
+ return san_acpi_notify_power_event(dev, evcode);
+}
+
+static inline int san_evt_power_dptf(struct device *dev, const struct ssam_event *event)
+{
+ union acpi_object payload;
+ acpi_handle san = ACPI_HANDLE(dev);
+ union acpi_object *obj;
+
+ if (!san_acpi_can_notify(dev, SAN_PWR_EVENT_DPTF))
+ return 0;
+
+ /*
+ * The Surface ACPI expects a buffer and not a package. It specifically
+ * checks for ObjectType (Arg3) == 0x03. This will cause a warning in
+ * acpica/nsarguments.c, but this can safely be ignored.
+ */
+ payload.type = ACPI_TYPE_BUFFER;
+ payload.buffer.length = event->length;
+ payload.buffer.pointer = (u8 *)&event->data[0];
+
+ dev_dbg(dev, "notify power event 0x%02x\n", event->command_id);
+ obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION,
+ SAN_PWR_EVENT_DPTF, &payload,
+ ACPI_TYPE_BUFFER);
+
+ if (IS_ERR_OR_NULL(obj))
+ return obj ? PTR_ERR(obj) : -ENXIO;
+
+ if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) {
+ dev_err(dev, "got unexpected result from _DSM\n");
+ return -EFAULT;
+ }
+
+ ACPI_FREE(obj);
+ return 0;
+}
+
+static unsigned long san_evt_power_delay(u8 cid)
+{
+ switch (cid) {
+ case SAM_EVENT_PWR_CID_ADAPTER:
+ /*
+ * Wait for battery state to update before signalling adapter change.
+ */
+ return SAM_EVENT_DELAY_PWR_ADAPTER;
+
+ case SAM_EVENT_PWR_CID_BST:
+ /*
+ * Ensure we do not miss anything important due to caching.
+ */
+ return SAM_EVENT_DELAY_PWR_BST;
+
+ case SAM_EVENT_PWR_CID_BIX:
+ case SAM_EVENT_PWR_CID_DPTF:
+ default:
+ return 0;
+ }
+}
+
+static bool san_evt_power(const struct ssam_event *event, struct device *dev)
+{
+ int status;
+
+ switch (event->command_id) {
+ case SAM_EVENT_PWR_CID_BIX:
+ status = san_evt_power_bix(dev, event);
+ break;
+
+ case SAM_EVENT_PWR_CID_BST:
+ status = san_evt_power_bst(dev, event);
+ break;
+
+ case SAM_EVENT_PWR_CID_ADAPTER:
+ status = san_evt_power_adapter(dev, event);
+ break;
+
+ case SAM_EVENT_PWR_CID_DPTF:
+ status = san_evt_power_dptf(dev, event);
+ break;
+
+ default:
+ return false;
+ }
+
+ if (status)
+ dev_err(dev, "error handling power event (cid = %x)\n",
+ event->command_id);
+
+ return true;
+}
+
+static void san_evt_power_workfn(struct work_struct *work)
+{
+ struct san_event_work *ev = container_of(work, struct san_event_work, work.work);
+
+ san_evt_power(&ev->event, ev->dev);
+ kfree(ev);
+}
+
+
+static u32 san_evt_power_nb(struct ssam_notifier_block *nb, const struct ssam_event *event)
+{
+ struct san_data *d = to_san_data(nb, nf_bat.base);
+ struct san_event_work *work;
+ unsigned long delay = san_evt_power_delay(event->command_id);
+
+ if (delay == 0) {
+ if (san_evt_power(event, d->dev))
+ return SSAM_NOTIF_HANDLED;
+ else
+ return 0;
+ }
+
+ work = kzalloc(sizeof(struct san_event_work) + event->length, GFP_KERNEL);
+ if (!work)
+ return ssam_notifier_from_errno(-ENOMEM);
+
+ INIT_DELAYED_WORK(&work->work, san_evt_power_workfn);
+ work->dev = d->dev;
+
+ memcpy(&work->event, event, sizeof(struct ssam_event) + event->length);
+
+ schedule_delayed_work(&work->work, delay);
+ return SSAM_NOTIF_HANDLED;
+}
+
+
+static inline int san_evt_thermal_notify(struct device *dev, const struct ssam_event *event)
+{
+ return san_acpi_notify_sensor_trip_point(dev, event->instance_id);
+}
+
+static bool san_evt_thermal(const struct ssam_event *event, struct device *dev)
+{
+ int status;
+
+ switch (event->command_id) {
+ case SAM_EVENT_TEMP_CID_NOTIFY_SENSOR_TRIP_POINT:
+ status = san_evt_thermal_notify(dev, event);
+ break;
+
+ default:
+ return false;
+ }
+
+ if (status) {
+ dev_err(dev, "error handling thermal event (cid = %x)\n",
+ event->command_id);
+ }
+
+ return true;
+}
+
+static u32 san_evt_thermal_nb(struct ssam_notifier_block *nb, const struct ssam_event *event)
+{
+ if (san_evt_thermal(event, to_san_data(nb, nf_tmp.base)->dev))
+ return SSAM_NOTIF_HANDLED;
+ else
+ return 0;
+}
+
+
+static struct gsb_data_rqsx
+*san_validate_rqsx(struct device *dev, const char *type, struct gsb_buffer *buffer)
+{
+ struct gsb_data_rqsx *rqsx = &buffer->data.rqsx;
+
+ if (buffer->len < 8) {
+ dev_err(dev, "invalid %s package (len = %d)\n",
+ type, buffer->len);
+ return NULL;
+ }
+
+ if (get_unaligned(&rqsx->cdl) != buffer->len - sizeof(struct gsb_data_rqsx)) {
+ dev_err(dev, "bogus %s package (len = %d, cdl = %d)\n",
+ type, buffer->len, get_unaligned(&rqsx->cdl));
+ return NULL;
+ }
+
+ if (get_unaligned(&rqsx->cdl) > SAN_GSB_MAX_RQSX_PAYLOAD) {
+ dev_err(dev, "payload for %s package too large (cdl = %d)\n",
+ type, get_unaligned(&rqsx->cdl));
+ return NULL;
+ }
+
+ if (rqsx->tid != 0x01) {
+ dev_warn(dev, "unsupported %s package (tid = 0x%02x)\n",
+ type, rqsx->tid);
+ return NULL;
+ }
+
+ return rqsx;
+}
+
+static acpi_status san_etwl(struct san_data *d, struct gsb_buffer *buffer)
+{
+ struct gsb_data_etwl *etwl = &buffer->data.etwl;
+
+ if (buffer->len < 3) {
+ dev_err(d->dev, "invalid ETWL package (len = %d)\n", buffer->len);
+ return AE_OK;
+ }
+
+ dev_err(d->dev, "ETWL(0x%02x, 0x%02x): %.*s\n",
+ etwl->etw3, etwl->etw4,
+ buffer->len - 3, (char *)etwl->msg);
+
+ // indicate success
+ buffer->status = 0x00;
+ buffer->len = 0x00;
+
+ return AE_OK;
+}
+
+static void gsb_response_error(struct gsb_buffer *gsb, int status)
+{
+ gsb->status = 0x00;
+ gsb->len = 0x02;
+ gsb->data.out.status = (u8)(-status);
+ gsb->data.out.len = 0x00;
+}
+
+static void gsb_response_success(struct gsb_buffer *gsb, u8 *ptr, size_t len)
+{
+ gsb->status = 0x00;
+ gsb->len = len + 2;
+ gsb->data.out.status = 0x00;
+ gsb->data.out.len = len;
+
+ if (len)
+ memcpy(&gsb->data.out.pld[0], ptr, len);
+}
+
+static acpi_status san_rqst_fixup_suspended(struct ssam_request *rqst,
+ struct gsb_buffer *gsb)
+{
+ if (rqst->target_category == 0x11 && rqst->command_id == 0x0D) {
+ /* Base state quirk:
+ * The base state may be queried from ACPI when the EC is still
+ * suspended. In this case it will return '-EPERM'. This query
+ * will only be triggered from the ACPI lid GPE interrupt, thus
+ * we are either in laptop or studio mode (base status 0x01 or
+ * 0x02). Furthermore, we will only get here if the device (and
+ * EC) have been suspended.
+ *
+ * We now assume that the device is in laptop mode (0x01). This
+ * has the drawback that it will wake the device when unfolding
+ * it in studio mode, but it also allows us to avoid actively
+ * waiting for the EC to wake up, which may incur a notable
+ * delay.
+ */
+
+ u8 base_state = 1;
+ gsb_response_success(gsb, &base_state, 1);
+ return AE_OK;
+ }
+
+ gsb_response_error(gsb, -ENXIO);
+ return AE_OK;
+}
+
+static acpi_status san_rqst(struct san_data *d, struct gsb_buffer *buffer)
+{
+ u8 rspbuf[SAN_GSB_MAX_RESPONSE];
+ struct gsb_data_rqsx *gsb_rqst;
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ int status = 0;
+ int try;
+
+ gsb_rqst = san_validate_rqsx(d->dev, "RQST", buffer);
+ if (!gsb_rqst)
+ return AE_OK;
+
+ rqst.target_category = gsb_rqst->tc;
+ rqst.target_id = gsb_rqst->tid;
+ rqst.command_id = gsb_rqst->cid;
+ rqst.instance_id = gsb_rqst->iid;
+ rqst.flags = gsb_rqst->snc ? SSAM_REQUEST_HAS_RESPONSE : 0;
+ rqst.length = get_unaligned(&gsb_rqst->cdl);
+ rqst.payload = &gsb_rqst->pld[0];
+
+ rsp.capacity = ARRAY_SIZE(rspbuf);
+ rsp.length = 0;
+ rsp.pointer = &rspbuf[0];
+
+ // handle suspended device
+ if (d->dev->power.is_suspended) {
+ dev_warn(d->dev, "rqst: device is suspended, not executing\n");
+ return san_rqst_fixup_suspended(&rqst, buffer);
+ }
+
+ for (try = 0; try < SAN_RQST_RETRY; try++) {
+ if (try)
+ dev_warn(d->dev, "rqst: IO error, trying again\n");
+
+ status = san_request_sync_onstack(d->ctrl, &rqst, &rsp);
+ if (status != -ETIMEDOUT && status != -EREMOTEIO)
+ break;
+ }
+
+ if (!status) {
+ gsb_response_success(buffer, rsp.pointer, rsp.length);
+ } else {
+ dev_err(d->dev, "rqst: failed with error %d\n", status);
+ gsb_response_error(buffer, status);
+ }
+
+ return AE_OK;
+}
+
+static acpi_status san_rqsg(struct san_data *d, struct gsb_buffer *buffer)
+{
+ struct gsb_data_rqsx *gsb_rqsg;
+ struct surface_sam_san_rqsg rqsg;
+ int status;
+
+ gsb_rqsg = san_validate_rqsx(d->dev, "RQSG", buffer);
+ if (!gsb_rqsg)
+ return AE_OK;
+
+ rqsg.tc = gsb_rqsg->tc;
+ rqsg.cid = gsb_rqsg->cid;
+ rqsg.iid = gsb_rqsg->iid;
+ rqsg.cdl = get_unaligned(&gsb_rqsg->cdl);
+ rqsg.pld = &gsb_rqsg->pld[0];
+
+ status = san_call_rqsg_handler(&rqsg);
+ if (!status) {
+ gsb_response_success(buffer, NULL, 0);
+ } else {
+ dev_err(d->dev, "rqsg: failed with error %d\n", status);
+ gsb_response_error(buffer, status);
+ }
+
+ return AE_OK;
+}
+
+
+static acpi_status
+san_opreg_handler(u32 function, acpi_physical_address command,
+ u32 bits, u64 *value64,
+ void *opreg_context, void *region_context)
+{
+ struct san_data *d = to_san_data(opreg_context, context);
+ struct gsb_buffer *buffer = (struct gsb_buffer *)value64;
+ int accessor_type = (0xFFFF0000 & function) >> 16;
+
+ if (command != 0) {
+ dev_warn(d->dev, "unsupported command: 0x%02llx\n", command);
+ return AE_OK;
+ }
+
+ if (accessor_type != ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS) {
+ dev_err(d->dev, "invalid access type: 0x%02x\n", accessor_type);
+ return AE_OK;
+ }
+
+ // buffer must have at least contain the command-value
+ if (buffer->len == 0) {
+ dev_err(d->dev, "request-package too small\n");
+ return AE_OK;
+ }
+
+ switch (buffer->data.in.cv) {
+ case 0x01: return san_rqst(d, buffer);
+ case 0x02: return san_etwl(d, buffer);
+ case 0x03: return san_rqsg(d, buffer);
+ }
+
+ dev_warn(d->dev, "unsupported SAN0 request (cv: 0x%02x)\n", buffer->data.in.cv);
+ return AE_OK;
+}
+
+static int san_events_register(struct platform_device *pdev)
+{
+ struct san_data *d = platform_get_drvdata(pdev);
+ int status;
+
+ d->nf_bat.base.priority = 1;
+ d->nf_bat.base.fn = san_evt_power_nb;
+ d->nf_bat.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ d->nf_bat.event.id.target_category = SSAM_SSH_TC_BAT;
+ d->nf_bat.event.id.instance = 0;
+ d->nf_bat.event.flags = SSAM_EVENT_SEQUENCED;
+
+ d->nf_tmp.base.priority = 1;
+ d->nf_tmp.base.fn = san_evt_thermal_nb;
+ d->nf_tmp.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ d->nf_tmp.event.id.target_category = SSAM_SSH_TC_TMP;
+ d->nf_tmp.event.id.instance = 0;
+ d->nf_tmp.event.flags = SSAM_EVENT_SEQUENCED;
+
+ status = ssam_notifier_register(d->ctrl, &d->nf_bat);
+ if (status)
+ return status;
+
+ status = ssam_notifier_register(d->ctrl, &d->nf_tmp);
+ if (status)
+ ssam_notifier_unregister(d->ctrl, &d->nf_bat);
+
+ return status;
+}
+
+static void san_events_unregister(struct platform_device *pdev)
+{
+ struct san_data *d = platform_get_drvdata(pdev);
+
+ ssam_notifier_unregister(d->ctrl, &d->nf_bat);
+ ssam_notifier_unregister(d->ctrl, &d->nf_tmp);
+}
+
+
+static int san_consumers_link(struct platform_device *pdev,
+ const struct san_acpi_consumer *cons,
+ struct san_consumers *out)
+{
+ const struct san_acpi_consumer *con;
+ struct san_consumer_link *links, *link;
+ struct acpi_device *adev;
+ acpi_handle handle;
+ u32 max_links = 0;
+ int status;
+
+ if (!cons)
+ return 0;
+
+ // count links
+ for (con = cons; con->path; ++con)
+ max_links += 1;
+
+ // allocate
+ links = kcalloc(max_links, sizeof(struct san_consumer_link), GFP_KERNEL);
+ link = &links[0];
+
+ if (!links)
+ return -ENOMEM;
+
+ // create links
+ for (con = cons; con->path; ++con) {
+ status = acpi_get_handle(NULL, con->path, &handle);
+ if (status) {
+ if (con->required || status != AE_NOT_FOUND) {
+ status = -ENXIO;
+ goto cleanup;
+ } else {
+ continue;
+ }
+ }
+
+ status = acpi_bus_get_device(handle, &adev);
+ if (status)
+ goto cleanup;
+
+ link->link = device_link_add(&adev->dev, &pdev->dev, con->flags);
+ if (!(link->link)) {
+ status = -EFAULT;
+ goto cleanup;
+ }
+ link->properties = con;
+
+ link += 1;
+ }
+
+ out->num = link - links;
+ out->links = links;
+
+ return 0;
+
+cleanup:
+ for (link = link - 1; link >= links; --link) {
+ if (link->properties->flags & DL_FLAG_STATELESS)
+ device_link_del(link->link);
+ }
+
+ return status;
+}
+
+static void san_consumers_unlink(struct san_consumers *consumers)
+{
+ u32 i;
+
+ if (!consumers)
+ return;
+
+ for (i = 0; i < consumers->num; ++i) {
+ if (consumers->links[i].properties->flags & DL_FLAG_STATELESS)
+ device_link_del(consumers->links[i].link);
+ }
+
+ kfree(consumers->links);
+
+ consumers->num = 0;
+ consumers->links = NULL;
+}
+
+static int surface_sam_san_probe(struct platform_device *pdev)
+{
+ const struct san_acpi_consumer *cons;
+ acpi_handle san = ACPI_HANDLE(&pdev->dev); // _SAN device node
+ struct ssam_controller *ctrl;
+ struct san_data *data;
+ int status;
+
+ status = ssam_client_bind(&pdev->dev, &ctrl);
+ if (status)
+ return status == -ENXIO ? -EPROBE_DEFER : status;
+
+ data = kzalloc(sizeof(struct san_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &pdev->dev;
+ data->ctrl = ctrl;
+
+ cons = acpi_device_get_match_data(&pdev->dev);
+ status = san_consumers_link(pdev, cons, &data->consumers);
+ if (status)
+ goto err_consumers;
+
+ platform_set_drvdata(pdev, data);
+
+ status = acpi_install_address_space_handler(san,
+ ACPI_ADR_SPACE_GSBUS,
+ &san_opreg_handler,
+ NULL, &data->context);
+
+ if (ACPI_FAILURE(status)) {
+ status = -ENODEV;
+ goto err_install_handler;
+ }
+
+ status = san_events_register(pdev);
+ if (status)
+ goto err_enable_events;
+
+ mutex_lock(&rqsg_if.lock);
+ if (!rqsg_if.san_dev)
+ rqsg_if.san_dev = &pdev->dev;
+ else
+ status = -EBUSY;
+ mutex_unlock(&rqsg_if.lock);
+
+ if (status)
+ goto err_install_dev;
+
+ acpi_walk_dep_device_list(san);
+ return 0;
+
+err_install_dev:
+ san_events_unregister(pdev);
+err_enable_events:
+ acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS, &san_opreg_handler);
+err_install_handler:
+ platform_set_drvdata(san, NULL);
+ san_consumers_unlink(&data->consumers);
+err_consumers:
+ kfree(data);
+ return status;
+}
+
+static int surface_sam_san_remove(struct platform_device *pdev)
+{
+ struct san_data *data = platform_get_drvdata(pdev);
+ acpi_handle san = ACPI_HANDLE(&pdev->dev); // _SAN device node
+ acpi_status status = AE_OK;
+
+ mutex_lock(&rqsg_if.lock);
+ rqsg_if.san_dev = NULL;
+ mutex_unlock(&rqsg_if.lock);
+
+ acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS, &san_opreg_handler);
+ san_events_unregister(pdev);
+
+ /*
+ * We have unregistered our event sources. Now we need to ensure that
+ * all delayed works they may have spawned are run to completion.
+ */
+ flush_scheduled_work();
+
+ san_consumers_unlink(&data->consumers);
+ kfree(data);
+
+ platform_set_drvdata(pdev, NULL);
+ return status;
+}
+
+
+static const struct san_acpi_consumer san_mshw0091_consumers[] = {
+ { "\\_SB.SRTC", true, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS },
+ { "\\ADP1", true, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS },
+ { "\\_SB.BAT1", true, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS },
+ { "\\_SB.BAT2", false, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS },
+ { },
+};
+
+static const struct acpi_device_id surface_sam_san_match[] = {
+ { "MSHW0091", (unsigned long) san_mshw0091_consumers },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, surface_sam_san_match);
+
+static struct platform_driver surface_sam_san = {
+ .probe = surface_sam_san_probe,
+ .remove = surface_sam_san_remove,
+ .driver = {
+ .name = "surface_sam_san",
+ .acpi_match_table = surface_sam_san_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(surface_sam_san);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Surface ACPI Notify Driver for 5th Generation Surface Devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/surface_sam/clients/surface_sam_san.h b/drivers/misc/surface_sam/clients/surface_sam_san.h
new file mode 100644
index 0000000000000..3408dde964b3c
--- /dev/null
+++ b/drivers/misc/surface_sam/clients/surface_sam_san.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Interface for Surface ACPI/Notify (SAN).
+ *
+ * The SAN is the main interface between the Surface Serial Hub (SSH) and the
+ * Surface/System Aggregator Module (SAM). It allows requests to be translated
+ * from ACPI to SSH/SAM. It also interfaces with the discrete GPU hot-plug
+ * driver.
+ */
+
+#ifndef _SURFACE_SAM_SAN_H
+#define _SURFACE_SAM_SAN_H
+
+#include <linux/types.h>
+
+
+struct surface_sam_san_rqsg {
+ u8 tc; // target category
+ u8 cid; // command ID
+ u8 iid; // instance ID
+ u16 cdl; // command data length (length of payload)
+ u8 *pld; // pointer to payload of length cdl
+};
+
+typedef int (*surface_sam_san_rqsg_handler_fn)(struct surface_sam_san_rqsg *rqsg, void *data);
+
+int surface_sam_san_consumer_register(struct device *consumer, u32 flags);
+int surface_sam_san_set_rqsg_handler(surface_sam_san_rqsg_handler_fn fn, void *data);
+
+#endif /* _SURFACE_SAM_SAN_H */
diff --git a/drivers/misc/surface_sam/clients/surface_sam_sid_perfmode.c b/drivers/misc/surface_sam/clients/surface_sam_sid_perfmode.c
new file mode 100644
index 0000000000000..24907e15c47ae
--- /dev/null
+++ b/drivers/misc/surface_sam/clients/surface_sam_sid_perfmode.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Surface Performance Mode Driver.
+ * Allows to change cooling capabilities based on user preference.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator_module.h>
+
+
+#define SID_PARAM_PERM 0644
+
+enum sam_perf_mode {
+ SAM_PERF_MODE_NORMAL = 1,
+ SAM_PERF_MODE_BATTERY = 2,
+ SAM_PERF_MODE_PERF1 = 3,
+ SAM_PERF_MODE_PERF2 = 4,
+
+ __SAM_PERF_MODE__START = 1,
+ __SAM_PERF_MODE__END = 4,
+};
+
+enum sid_param_perf_mode {
+ SID_PARAM_PERF_MODE_AS_IS = 0,
+ SID_PARAM_PERF_MODE_NORMAL = SAM_PERF_MODE_NORMAL,
+ SID_PARAM_PERF_MODE_BATTERY = SAM_PERF_MODE_BATTERY,
+ SID_PARAM_PERF_MODE_PERF1 = SAM_PERF_MODE_PERF1,
+ SID_PARAM_PERF_MODE_PERF2 = SAM_PERF_MODE_PERF2,
+
+ __SID_PARAM_PERF_MODE__START = 0,
+ __SID_PARAM_PERF_MODE__END = 4,
+};
+
+
+struct ssam_perf_info {
+ __le32 mode;
+ __le16 unknown1;
+ __le16 unknown2;
+} __packed;
+
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_tmp_perf_mode_get, struct ssam_perf_info, {
+ .target_category = SSAM_SSH_TC_TMP,
+ .target_id = 0x01,
+ .command_id = 0x02,
+ .instance_id = 0x00,
+});
+
+static SSAM_DEFINE_SYNC_REQUEST_W(__ssam_tmp_perf_mode_set, __le32, {
+ .target_category = SSAM_SSH_TC_TMP,
+ .target_id = 0x01,
+ .command_id = 0x03,
+ .instance_id = 0x00,
+});
+
+static int ssam_tmp_perf_mode_set(struct ssam_controller *ctrl, u32 mode)
+{
+ __le32 mode_le = cpu_to_le32(mode);
+
+ if (mode < __SAM_PERF_MODE__START || mode > __SAM_PERF_MODE__END)
+ return -EINVAL;
+
+ return __ssam_tmp_perf_mode_set(ctrl, &mode_le);
+}
+
+
+static int param_perf_mode_set(const char *val, const struct kernel_param *kp)
+{
+ int perf_mode;
+ int status;
+
+ status = kstrtoint(val, 0, &perf_mode);
+ if (status)
+ return status;
+
+ if (perf_mode < __SID_PARAM_PERF_MODE__START || perf_mode > __SID_PARAM_PERF_MODE__END)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops param_perf_mode_ops = {
+ .set = param_perf_mode_set,
+ .get = param_get_int,
+};
+
+static int param_perf_mode_init = SID_PARAM_PERF_MODE_AS_IS;
+static int param_perf_mode_exit = SID_PARAM_PERF_MODE_AS_IS;
+
+module_param_cb(perf_mode_init, &param_perf_mode_ops, &param_perf_mode_init, SID_PARAM_PERM);
+module_param_cb(perf_mode_exit, &param_perf_mode_ops, &param_perf_mode_exit, SID_PARAM_PERM);
+
+MODULE_PARM_DESC(perf_mode_init, "Performance-mode to be set on module initialization");
+MODULE_PARM_DESC(perf_mode_exit, "Performance-mode to be set on module exit");
+
+
+static ssize_t perf_mode_show(struct device *dev, struct device_attribute *attr, char *data)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+ struct ssam_perf_info info;
+ int status;
+
+ status = ssam_tmp_perf_mode_get(sdev->ctrl, &info);
+ if (status) {
+ dev_err(dev, "failed to get current performance mode: %d\n", status);
+ return -EIO;
+ }
+
+ return sprintf(data, "%d\n", le32_to_cpu(info.mode));
+}
+
+static ssize_t perf_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct ssam_device *sdev = to_ssam_device(dev);
+ int perf_mode;
+ int status;
+
+ status = kstrtoint(data, 0, &perf_mode);
+ if (status)
+ return status;
+
+ status = ssam_tmp_perf_mode_set(sdev->ctrl, perf_mode);
+ if (status)
+ return status;
+
+ // TODO: Should we notify ACPI here?
+ //
+ // There is a _DSM call described as
+ // WSID._DSM: Notify DPTF on Slider State change
+ // which calls
+ // ODV3 = ToInteger (Arg3)
+ // Notify(IETM, 0x88)
+ // IETM is an INT3400 Intel Dynamic Power Performance Management
+ // device, part of the DPTF framework. From the corresponding
+ // kernel driver, it looks like event 0x88 is being ignored. Also
+ // it is currently unknown what the consequecnes of setting ODV3
+ // are.
+
+ return count;
+}
+
+static const DEVICE_ATTR_RW(perf_mode);
+
+
+static int surface_sam_sid_perfmode_probe(struct ssam_device *sdev)
+{
+ int status;
+
+ // set initial perf_mode
+ if (param_perf_mode_init != SID_PARAM_PERF_MODE_AS_IS) {
+ status = ssam_tmp_perf_mode_set(sdev->ctrl, param_perf_mode_init);
+ if (status)
+ return status;
+ }
+
+ // register perf_mode attribute
+ status = sysfs_create_file(&sdev->dev.kobj, &dev_attr_perf_mode.attr);
+ if (status)
+ ssam_tmp_perf_mode_set(sdev->ctrl, param_perf_mode_exit);
+
+ return status;
+}
+
+static void surface_sam_sid_perfmode_remove(struct ssam_device *sdev)
+{
+ sysfs_remove_file(&sdev->dev.kobj, &dev_attr_perf_mode.attr);
+ ssam_tmp_perf_mode_set(sdev->ctrl, param_perf_mode_exit);
+}
+
+
+static const struct ssam_device_id ssam_perfmode_match[] = {
+ { SSAM_DEVICE(TMP, 0x01, 0x00, 0x02) },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, ssam_perfmode_match);
+
+static struct ssam_device_driver surface_sam_sid_perfmode = {
+ .probe = surface_sam_sid_perfmode_probe,
+ .remove = surface_sam_sid_perfmode_remove,
+ .match_table = ssam_perfmode_match,
+ .driver = {
+ .name = "surface_sam_sid_perfmode",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_ssam_device_driver(surface_sam_sid_perfmode);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Surface Performance Mode Driver for 5th Generation Surface Devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/surface_sam/clients/surface_sam_sid_power.c b/drivers/misc/surface_sam/clients/surface_sam_sid_power.c
new file mode 100644
index 0000000000000..d6559a251fa8b
--- /dev/null
+++ b/drivers/misc/surface_sam/clients/surface_sam_sid_power.c
@@ -0,0 +1,1112 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Surface SID Battery/AC Driver.
+ * Provides support for the battery and AC on 7th generation Surface devices.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator_module.h>
+
+
+// TODO: check BIX/BST for unknown/unsupported 0xffffffff entries
+// TODO: DPTF (/SAN notifications)?
+// TODO: other properties?
+
+
+static unsigned int cache_time = 1000;
+module_param(cache_time, uint, 0644);
+MODULE_PARM_DESC(cache_time, "battery state chaching time in milliseconds [default: 1000]");
+
+#define SPWR_AC_BAT_UPDATE_DELAY msecs_to_jiffies(5000)
+
+
+/*
+ * SAM Interface.
+ */
+
+#define SAM_EVENT_PWR_CID_BIX 0x15
+#define SAM_EVENT_PWR_CID_BST 0x16
+#define SAM_EVENT_PWR_CID_ADAPTER 0x17
+
+#define SAM_BATTERY_STA_OK 0x0f
+#define SAM_BATTERY_STA_PRESENT 0x10
+
+#define SAM_BATTERY_STATE_DISCHARGING 0x01
+#define SAM_BATTERY_STATE_CHARGING 0x02
+#define SAM_BATTERY_STATE_CRITICAL 0x04
+
+#define SAM_BATTERY_POWER_UNIT_MA 1
+
+
+/* Equivalent to data returned in ACPI _BIX method */
+struct spwr_bix {
+ u8 revision;
+ __le32 power_unit;
+ __le32 design_cap;
+ __le32 last_full_charge_cap;
+ __le32 technology;
+ __le32 design_voltage;
+ __le32 design_cap_warn;
+ __le32 design_cap_low;
+ __le32 cycle_count;
+ __le32 measurement_accuracy;
+ __le32 max_sampling_time;
+ __le32 min_sampling_time;
+ __le32 max_avg_interval;
+ __le32 min_avg_interval;
+ __le32 bat_cap_granularity_1;
+ __le32 bat_cap_granularity_2;
+ u8 model[21];
+ u8 serial[11];
+ u8 type[5];
+ u8 oem_info[21];
+} __packed;
+
+/* Equivalent to data returned in ACPI _BST method */
+struct spwr_bst {
+ __le32 state;
+ __le32 present_rate;
+ __le32 remaining_cap;
+ __le32 present_voltage;
+} __packed;
+
+/* DPTF event payload */
+struct spwr_event_dptf {
+ __le32 pmax;
+ __le32 _1; /* currently unknown */
+ __le32 _2; /* currently unknown */
+} __packed;
+
+
+/* Get battery status (_STA) */
+static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_sta, __le32, {
+ .target_category = SSAM_SSH_TC_BAT,
+ .command_id = 0x01,
+});
+
+/* Get battery static information (_BIX) */
+static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_bix, struct spwr_bix, {
+ .target_category = SSAM_SSH_TC_BAT,
+ .command_id = 0x02,
+});
+
+/* Get battery dynamic information (_BST) */
+static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_bst, struct spwr_bst, {
+ .target_category = SSAM_SSH_TC_BAT,
+ .command_id = 0x03,
+});
+
+/* Set battery trip point (_BTP) */
+static SSAM_DEFINE_SYNC_REQUEST_CL_W(ssam_bat_set_btp, __le32, {
+ .target_category = SSAM_SSH_TC_BAT,
+ .command_id = 0x04,
+});
+
+/* Get platform power soruce for battery (DPTF PSRC) */
+static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_psrc, __le32, {
+ .target_category = SSAM_SSH_TC_BAT,
+ .command_id = 0x0d,
+});
+
+/* Get maximum platform power for battery (DPTF PMAX) */
+__always_unused
+static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_pmax, __le32, {
+ .target_category = SSAM_SSH_TC_BAT,
+ .command_id = 0x0b,
+});
+
+/* Get adapter rating (DPTF ARTG) */
+__always_unused
+static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_artg, __le32, {
+ .target_category = SSAM_SSH_TC_BAT,
+ .command_id = 0x0f,
+});
+
+/* Unknown (DPTF PSOC) */
+__always_unused
+static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_psoc, __le32, {
+ .target_category = SSAM_SSH_TC_BAT,
+ .command_id = 0x0c,
+});
+
+/* Unknown (DPTF CHGI/ INT3403 SPPC) */
+__always_unused
+static SSAM_DEFINE_SYNC_REQUEST_CL_W(ssam_bat_set_chgi, __le32, {
+ .target_category = SSAM_SSH_TC_BAT,
+ .command_id = 0x0e,
+});
+
+
+/*
+ * Common Power-Subsystem Interface.
+ */
+
+struct spwr_psy_properties {
+ const char *name;
+ struct ssam_event_registry registry;
+};
+
+struct spwr_battery_device {
+ struct ssam_device *sdev;
+
+ char name[32];
+ struct power_supply *psy;
+ struct power_supply_desc psy_desc;
+
+ struct delayed_work update_work;
+
+ struct ssam_event_notifier notif;
+
+ struct mutex lock;
+ unsigned long timestamp;
+
+ __le32 sta;
+ struct spwr_bix bix;
+ struct spwr_bst bst;
+ u32 alarm;
+};
+
+struct spwr_ac_device {
+ struct ssam_device *sdev;
+
+ char name[32];
+ struct power_supply *psy;
+ struct power_supply_desc psy_desc;
+
+ struct ssam_event_notifier notif;
+
+ struct mutex lock;
+
+ __le32 state;
+};
+
+static enum power_supply_property spwr_ac_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static enum power_supply_property spwr_battery_props_chg[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
+static enum power_supply_property spwr_battery_props_eng[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
+
+static int spwr_battery_register(struct spwr_battery_device *bat,
+ struct ssam_device *sdev,
+ struct ssam_event_registry registry);
+
+static void spwr_battery_unregister(struct spwr_battery_device *bat);
+
+
+static inline bool spwr_battery_present(struct spwr_battery_device *bat)
+{
+ return le32_to_cpu(bat->sta) & SAM_BATTERY_STA_PRESENT;
+}
+
+
+static inline int spwr_battery_load_sta(struct spwr_battery_device *bat)
+{
+ return ssam_bat_get_sta(bat->sdev, &bat->sta);
+}
+
+static inline int spwr_battery_load_bix(struct spwr_battery_device *bat)
+{
+ if (!spwr_battery_present(bat))
+ return 0;
+
+ return ssam_bat_get_bix(bat->sdev, &bat->bix);
+}
+
+static inline int spwr_battery_load_bst(struct spwr_battery_device *bat)
+{
+ if (!spwr_battery_present(bat))
+ return 0;
+
+ return ssam_bat_get_bst(bat->sdev, &bat->bst);
+}
+
+
+static inline int spwr_battery_set_alarm_unlocked(
+ struct spwr_battery_device *bat, u32 value)
+{
+ __le32 alarm = cpu_to_le32(value);
+
+ bat->alarm = value;
+ return ssam_bat_set_btp(bat->sdev, &alarm);
+}
+
+static inline int spwr_battery_set_alarm(struct spwr_battery_device *bat,
+ u32 value)
+{
+ int status;
+
+ mutex_lock(&bat->lock);
+ status = spwr_battery_set_alarm_unlocked(bat, value);
+ mutex_unlock(&bat->lock);
+
+ return status;
+}
+
+static inline int spwr_battery_update_bst_unlocked(
+ struct spwr_battery_device *bat, bool cached)
+{
+ unsigned long cache_deadline;
+ int status;
+
+ cache_deadline = bat->timestamp + msecs_to_jiffies(cache_time);
+
+ if (cached && bat->timestamp && time_is_after_jiffies(cache_deadline))
+ return 0;
+
+ status = spwr_battery_load_sta(bat);
+ if (status)
+ return status;
+
+ status = spwr_battery_load_bst(bat);
+ if (status)
+ return status;
+
+ bat->timestamp = jiffies;
+ return 0;
+}
+
+static int spwr_battery_update_bst(struct spwr_battery_device *bat, bool cached)
+{
+ int status;
+
+ mutex_lock(&bat->lock);
+ status = spwr_battery_update_bst_unlocked(bat, cached);
+ mutex_unlock(&bat->lock);
+
+ return status;
+}
+
+static inline int spwr_battery_update_bix_unlocked(struct spwr_battery_device *bat)
+{
+ int status;
+
+ status = spwr_battery_load_sta(bat);
+ if (status)
+ return status;
+
+ status = spwr_battery_load_bix(bat);
+ if (status)
+ return status;
+
+ status = spwr_battery_load_bst(bat);
+ if (status)
+ return status;
+
+ bat->timestamp = jiffies;
+ return 0;
+}
+
+static int spwr_battery_update_bix(struct spwr_battery_device *bat)
+{
+ int status;
+
+ mutex_lock(&bat->lock);
+ status = spwr_battery_update_bix_unlocked(bat);
+ mutex_unlock(&bat->lock);
+
+ return status;
+}
+
+static inline int spwr_ac_update_unlocked(struct spwr_ac_device *ac)
+{
+ int status;
+ u32 old = ac->state;
+
+ status = ssam_bat_get_psrc(ac->sdev, &ac->state);
+ if (status < 0)
+ return status;
+
+ return old != ac->state;
+}
+
+static int spwr_ac_update(struct spwr_ac_device *ac)
+{
+ int status;
+
+ mutex_lock(&ac->lock);
+ status = spwr_ac_update_unlocked(ac);
+ mutex_unlock(&ac->lock);
+
+ return status;
+}
+
+
+static int spwr_battery_recheck(struct spwr_battery_device *bat)
+{
+ bool present = spwr_battery_present(bat);
+ u32 unit = get_unaligned_le32(&bat->bix.power_unit);
+ int status;
+
+ status = spwr_battery_update_bix(bat);
+ if (status)
+ return status;
+
+ // if battery has been attached, (re-)initialize alarm
+ if (!present && spwr_battery_present(bat)) {
+ u32 cap_warn = get_unaligned_le32(&bat->bix.design_cap_warn);
+ status = spwr_battery_set_alarm(bat, cap_warn);
+ if (status)
+ return status;
+ }
+
+ // if the unit has changed, re-add the battery
+ if (unit != get_unaligned_le32(&bat->bix.power_unit)) {
+ spwr_battery_unregister(bat);
+ status = spwr_battery_register(bat, bat->sdev,
+ bat->notif.event.reg);
+ }
+
+ return status;
+}
+
+
+static inline int spwr_notify_bix(struct spwr_battery_device *bat)
+{
+ int status;
+
+ status = spwr_battery_recheck(bat);
+ if (!status)
+ power_supply_changed(bat->psy);
+
+ return status;
+}
+
+static inline int spwr_notify_bst(struct spwr_battery_device *bat)
+{
+ int status;
+
+ status = spwr_battery_update_bst(bat, false);
+ if (!status)
+ power_supply_changed(bat->psy);
+
+ return status;
+}
+
+static inline int spwr_notify_adapter_bat(struct spwr_battery_device *bat)
+{
+ u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap);
+ u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
+
+ /*
+ * Handle battery update quirk:
+ * When the battery is fully charged and the adapter is plugged in or
+ * removed, the EC does not send a separate event for the state
+ * (charging/discharging) change. Furthermore it may take some time until
+ * the state is updated on the battery. Schedule an update to solve this.
+ */
+
+ if (remaining_cap >= last_full_cap)
+ schedule_delayed_work(&bat->update_work, SPWR_AC_BAT_UPDATE_DELAY);
+
+ return 0;
+}
+
+static inline int spwr_notify_adapter_ac(struct spwr_ac_device *ac)
+{
+ int status;
+
+ status = spwr_ac_update(ac);
+ if (status > 0)
+ power_supply_changed(ac->psy);
+
+ return status >= 0 ? 0 : status;
+}
+
+static u32 spwr_notify_bat(struct ssam_notifier_block *nb,
+ const struct ssam_event *event)
+{
+ struct spwr_battery_device *bat;
+ int status;
+
+ bat = container_of(nb, struct spwr_battery_device, notif.base);
+
+ dev_dbg(&bat->sdev->dev, "power event (cid = 0x%02x, iid = %d, tid = %d)\n",
+ event->command_id, event->instance_id, event->target_id);
+
+ // handled here, needs to be handled for all targets/instances
+ if (event->command_id == SAM_EVENT_PWR_CID_ADAPTER) {
+ status = spwr_notify_adapter_bat(bat);
+ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
+ }
+
+ if (!ssam_event_matches_device(bat->sdev->uid, event))
+ return 0;
+
+ switch (event->command_id) {
+ case SAM_EVENT_PWR_CID_BIX:
+ status = spwr_notify_bix(bat);
+ break;
+
+ case SAM_EVENT_PWR_CID_BST:
+ status = spwr_notify_bst(bat);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
+}
+
+static u32 spwr_notify_ac(struct ssam_notifier_block *nb,
+ const struct ssam_event *event)
+{
+ struct spwr_ac_device *ac;
+ int status;
+
+ ac = container_of(nb, struct spwr_ac_device, notif.base);
+
+ dev_dbg(&ac->sdev->dev, "power event (cid = 0x%02x, iid = %d, tid = %d)\n",
+ event->command_id, event->instance_id, event->target_id);
+
+ if (event->target_category != ac->sdev->uid.category)
+ return 0;
+
+ /*
+ * Allow events of all targets/instances here. Global adapter status
+ * seems to be handled via target=1 and instance=1, but events are
+ * reported on all targets/instances in use.
+ *
+ * While it should be enough to just listen on 1/1, listen everywhere to
+ * make sure we don't miss anything.
+ */
+
+ switch (event->command_id) {
+ case SAM_EVENT_PWR_CID_ADAPTER:
+ status = spwr_notify_adapter_ac(ac);
+ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
+
+ default:
+ return 0;
+ }
+}
+
+static void spwr_battery_update_bst_workfn(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct spwr_battery_device *bat;
+ int status;
+
+ bat = container_of(dwork, struct spwr_battery_device, update_work);
+
+ status = spwr_battery_update_bst(bat, false);
+ if (!status)
+ power_supply_changed(bat->psy);
+
+ if (status) {
+ dev_err(&bat->sdev->dev, "failed to update battery state: %d\n",
+ status);
+ }
+}
+
+
+static inline int spwr_battery_prop_status(struct spwr_battery_device *bat)
+{
+ u32 state = get_unaligned_le32(&bat->bst.state);
+ u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap);
+ u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
+ u32 present_rate = get_unaligned_le32(&bat->bst.present_rate);
+
+ if (state & SAM_BATTERY_STATE_DISCHARGING)
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+
+ if (state & SAM_BATTERY_STATE_CHARGING)
+ return POWER_SUPPLY_STATUS_CHARGING;
+
+ if (last_full_cap == remaining_cap)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ if (present_rate == 0)
+ return POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+}
+
+static inline int spwr_battery_prop_technology(struct spwr_battery_device *bat)
+{
+ if (!strcasecmp("NiCd", bat->bix.type))
+ return POWER_SUPPLY_TECHNOLOGY_NiCd;
+
+ if (!strcasecmp("NiMH", bat->bix.type))
+ return POWER_SUPPLY_TECHNOLOGY_NiMH;
+
+ if (!strcasecmp("LION", bat->bix.type))
+ return POWER_SUPPLY_TECHNOLOGY_LION;
+
+ if (!strncasecmp("LI-ION", bat->bix.type, 6))
+ return POWER_SUPPLY_TECHNOLOGY_LION;
+
+ if (!strcasecmp("LiP", bat->bix.type))
+ return POWER_SUPPLY_TECHNOLOGY_LIPO;
+
+ return POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+}
+
+static inline int spwr_battery_prop_capacity(struct spwr_battery_device *bat)
+{
+ u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap);
+ u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
+
+ if (remaining_cap && last_full_cap)
+ return remaining_cap * 100 / last_full_cap;
+ else
+ return 0;
+}
+
+static inline int spwr_battery_prop_capacity_level(struct spwr_battery_device *bat)
+{
+ u32 state = get_unaligned_le32(&bat->bst.state);
+ u32 last_full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap);
+ u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
+
+ if (state & SAM_BATTERY_STATE_CRITICAL)
+ return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+
+ if (remaining_cap >= last_full_cap)
+ return POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+
+ if (remaining_cap <= bat->alarm)
+ return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+
+ return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+}
+
+static int spwr_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct spwr_ac_device *ac = power_supply_get_drvdata(psy);
+ int status;
+
+ mutex_lock(&ac->lock);
+
+ status = spwr_ac_update_unlocked(ac);
+ if (status)
+ goto out;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = le32_to_cpu(ac->state) == 1;
+ break;
+
+ default:
+ status = -EINVAL;
+ goto out;
+ }
+
+out:
+ mutex_unlock(&ac->lock);
+ return status;
+}
+
+static int spwr_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
+ int status;
+
+ mutex_lock(&bat->lock);
+
+ status = spwr_battery_update_bst_unlocked(bat, true);
+ if (status)
+ goto out;
+
+ // abort if battery is not present
+ if (!spwr_battery_present(bat) && psp != POWER_SUPPLY_PROP_PRESENT) {
+ status = -ENODEV;
+ goto out;
+ }
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = spwr_battery_prop_status(bat);
+ break;
+
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = spwr_battery_present(bat);
+ break;
+
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = spwr_battery_prop_technology(bat);
+ break;
+
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ val->intval = get_unaligned_le32(&bat->bix.cycle_count);
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = get_unaligned_le32(&bat->bix.design_voltage)
+ * 1000;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = get_unaligned_le32(&bat->bst.present_voltage)
+ * 1000;
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_POWER_NOW:
+ val->intval = get_unaligned_le32(&bat->bst.present_rate) * 1000;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ val->intval = get_unaligned_le32(&bat->bix.design_cap) * 1000;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ val->intval = get_unaligned_le32(&bat->bix.last_full_charge_cap)
+ * 1000;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ val->intval = get_unaligned_le32(&bat->bst.remaining_cap)
+ * 1000;
+ break;
+
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = spwr_battery_prop_capacity(bat);
+ break;
+
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ val->intval = spwr_battery_prop_capacity_level(bat);
+ break;
+
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = bat->bix.model;
+ break;
+
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = bat->bix.oem_info;
+ break;
+
+ case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ val->strval = bat->bix.serial;
+ break;
+
+ default:
+ status = -EINVAL;
+ goto out;
+ }
+
+out:
+ mutex_unlock(&bat->lock);
+ return status;
+}
+
+
+static ssize_t spwr_battery_alarm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
+
+ return sprintf(buf, "%d\n", bat->alarm * 1000);
+}
+
+static ssize_t spwr_battery_alarm_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct power_supply *psy = dev_get_drvdata(dev);
+ struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
+ unsigned long value;
+ int status;
+
+ status = kstrtoul(buf, 0, &value);
+ if (status)
+ return status;
+
+ if (!spwr_battery_present(bat))
+ return -ENODEV;
+
+ status = spwr_battery_set_alarm(bat, value / 1000);
+ if (status)
+ return status;
+
+ return count;
+}
+
+static const struct device_attribute alarm_attr = {
+ .attr = {.name = "alarm", .mode = 0644},
+ .show = spwr_battery_alarm_show,
+ .store = spwr_battery_alarm_store,
+};
+
+
+static void spwr_ac_set_name(struct spwr_ac_device *ac, const char *name)
+{
+ strncpy(ac->name, name, ARRAY_SIZE(ac->name) - 1);
+}
+
+static int spwr_ac_register(struct spwr_ac_device *ac,
+ struct ssam_device *sdev,
+ struct ssam_event_registry registry)
+{
+ struct power_supply_config psy_cfg = {};
+ __le32 sta;
+ int status;
+
+ // make sure the device is there and functioning properly
+ status = ssam_bat_get_sta(sdev, &sta);
+ if (status)
+ return status;
+
+ if ((le32_to_cpu(sta) & SAM_BATTERY_STA_OK) != SAM_BATTERY_STA_OK)
+ return -ENODEV;
+
+ psy_cfg.drv_data = ac;
+
+ ac->sdev = sdev;
+ mutex_init(&ac->lock);
+
+ ac->psy_desc.name = ac->name;
+ ac->psy_desc.type = POWER_SUPPLY_TYPE_MAINS;
+ ac->psy_desc.properties = spwr_ac_props;
+ ac->psy_desc.num_properties = ARRAY_SIZE(spwr_ac_props);
+ ac->psy_desc.get_property = spwr_ac_get_property;
+
+ ac->psy = power_supply_register(&ac->sdev->dev, &ac->psy_desc, &psy_cfg);
+ if (IS_ERR(ac->psy)) {
+ status = PTR_ERR(ac->psy);
+ goto err_psy;
+ }
+
+ ac->notif.base.priority = 1;
+ ac->notif.base.fn = spwr_notify_ac;
+ ac->notif.event.reg = registry;
+ ac->notif.event.id.target_category = sdev->uid.category;
+ ac->notif.event.id.instance = 0;
+ ac->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ status = ssam_notifier_register(sdev->ctrl, &ac->notif);
+ if (status)
+ goto err_notif;
+
+ return 0;
+
+err_notif:
+ power_supply_unregister(ac->psy);
+err_psy:
+ mutex_destroy(&ac->lock);
+ return status;
+}
+
+static int spwr_ac_unregister(struct spwr_ac_device *ac)
+{
+ ssam_notifier_unregister(ac->sdev->ctrl, &ac->notif);
+ power_supply_unregister(ac->psy);
+ mutex_destroy(&ac->lock);
+ return 0;
+}
+
+static void spwr_battery_set_name(struct spwr_battery_device *bat,
+ const char *name)
+{
+ strncpy(bat->name, name, ARRAY_SIZE(bat->name) - 1);
+}
+
+static int spwr_battery_register(struct spwr_battery_device *bat,
+ struct ssam_device *sdev,
+ struct ssam_event_registry registry)
+{
+ struct power_supply_config psy_cfg = {};
+ __le32 sta;
+ int status;
+
+ bat->sdev = sdev;
+
+ // make sure the device is there and functioning properly
+ status = ssam_bat_get_sta(sdev, &sta);
+ if (status)
+ return status;
+
+ if ((le32_to_cpu(sta) & SAM_BATTERY_STA_OK) != SAM_BATTERY_STA_OK)
+ return -ENODEV;
+
+ status = spwr_battery_update_bix_unlocked(bat);
+ if (status)
+ return status;
+
+ if (spwr_battery_present(bat)) {
+ u32 cap_warn = get_unaligned_le32(&bat->bix.design_cap_warn);
+ status = spwr_battery_set_alarm_unlocked(bat, cap_warn);
+ if (status)
+ return status;
+ }
+
+ bat->psy_desc.name = bat->name;
+ bat->psy_desc.type = POWER_SUPPLY_TYPE_BATTERY;
+
+ if (get_unaligned_le32(&bat->bix.power_unit) == SAM_BATTERY_POWER_UNIT_MA) {
+ bat->psy_desc.properties = spwr_battery_props_chg;
+ bat->psy_desc.num_properties = ARRAY_SIZE(spwr_battery_props_chg);
+ } else {
+ bat->psy_desc.properties = spwr_battery_props_eng;
+ bat->psy_desc.num_properties = ARRAY_SIZE(spwr_battery_props_eng);
+ }
+
+ bat->psy_desc.get_property = spwr_battery_get_property;
+
+ mutex_init(&bat->lock);
+ psy_cfg.drv_data = bat;
+
+ INIT_DELAYED_WORK(&bat->update_work, spwr_battery_update_bst_workfn);
+
+ bat->psy = power_supply_register(&bat->sdev->dev, &bat->psy_desc, &psy_cfg);
+ if (IS_ERR(bat->psy)) {
+ status = PTR_ERR(bat->psy);
+ goto err_psy;
+ }
+
+ bat->notif.base.priority = 1;
+ bat->notif.base.fn = spwr_notify_bat;
+ bat->notif.event.reg = registry;
+ bat->notif.event.id.target_category = sdev->uid.category;
+ bat->notif.event.id.instance = 0;
+ bat->notif.event.flags = SSAM_EVENT_SEQUENCED;
+
+ status = ssam_notifier_register(sdev->ctrl, &bat->notif);
+ if (status)
+ goto err_notif;
+
+ status = device_create_file(&bat->psy->dev, &alarm_attr);
+ if (status)
+ goto err_file;
+
+ return 0;
+
+err_file:
+ ssam_notifier_unregister(sdev->ctrl, &bat->notif);
+err_notif:
+ power_supply_unregister(bat->psy);
+err_psy:
+ mutex_destroy(&bat->lock);
+ return status;
+}
+
+static void spwr_battery_unregister(struct spwr_battery_device *bat)
+{
+ ssam_notifier_unregister(bat->sdev->ctrl, &bat->notif);
+ cancel_delayed_work_sync(&bat->update_work);
+ device_remove_file(&bat->psy->dev, &alarm_attr);
+ power_supply_unregister(bat->psy);
+ mutex_destroy(&bat->lock);
+}
+
+
+/*
+ * Battery Driver.
+ */
+
+static int surface_sam_sid_battery_resume(struct device *dev)
+{
+ struct spwr_battery_device *bat;
+
+ // TODO: run this on workqueue
+
+ bat = dev_get_drvdata(dev);
+ return spwr_battery_recheck(bat);
+}
+SIMPLE_DEV_PM_OPS(surface_sam_sid_battery_pm,
+ NULL, surface_sam_sid_battery_resume);
+
+static int surface_sam_sid_battery_probe(struct ssam_device *sdev)
+{
+ const struct spwr_psy_properties *p;
+ struct spwr_battery_device *bat;
+ int status;
+
+ p = ssam_device_get_match_data(sdev);
+ if (!p)
+ return -ENODEV;
+
+ bat = devm_kzalloc(&sdev->dev, sizeof(*bat), GFP_KERNEL);
+ if (!bat)
+ return -ENOMEM;
+
+ spwr_battery_set_name(bat, p->name);
+ ssam_device_set_drvdata(sdev, bat);
+
+ status = spwr_battery_register(bat, sdev, p->registry);
+ if (status)
+ ssam_device_set_drvdata(sdev, NULL);
+
+ return status;
+}
+
+static void surface_sam_sid_battery_remove(struct ssam_device *sdev)
+{
+ struct spwr_battery_device *bat;
+
+ bat = ssam_device_get_drvdata(sdev);
+ spwr_battery_unregister(bat);
+
+ ssam_device_set_drvdata(sdev, NULL);
+}
+
+static const struct spwr_psy_properties spwr_psy_props_bat1 = {
+ .name = "BAT1",
+ .registry = SSAM_EVENT_REGISTRY_SAM,
+};
+
+static const struct spwr_psy_properties spwr_psy_props_bat2_sb3 = {
+ .name = "BAT2",
+ .registry = SSAM_EVENT_REGISTRY_REG,
+};
+
+static const struct ssam_device_id surface_sam_sid_battery_match[] = {
+ { SSAM_DEVICE(BAT, 0x01, 0x01, 0x00), (unsigned long)&spwr_psy_props_bat1 },
+ { SSAM_DEVICE(BAT, 0x02, 0x01, 0x00), (unsigned long)&spwr_psy_props_bat2_sb3 },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, surface_sam_sid_battery_match);
+
+static struct ssam_device_driver surface_sam_sid_battery = {
+ .probe = surface_sam_sid_battery_probe,
+ .remove = surface_sam_sid_battery_remove,
+ .match_table = surface_sam_sid_battery_match,
+ .driver = {
+ .name = "surface_sam_sid_battery",
+ .pm = &surface_sam_sid_battery_pm,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+
+/*
+ * AC Driver.
+ */
+
+// TODO: check/update on resume, call power_supply_changed?
+
+static int surface_sam_sid_ac_probe(struct ssam_device *sdev)
+{
+ const struct spwr_psy_properties *p;
+ struct spwr_ac_device *ac;
+ int status;
+
+ p = ssam_device_get_match_data(sdev);
+ if (!p)
+ return -ENODEV;
+
+ ac = devm_kzalloc(&sdev->dev, sizeof(*ac), GFP_KERNEL);
+ if (!ac)
+ return -ENOMEM;
+
+ spwr_ac_set_name(ac, p->name);
+ ssam_device_set_drvdata(sdev, ac);
+
+ status = spwr_ac_register(ac, sdev, p->registry);
+ if (status)
+ ssam_device_set_drvdata(sdev, NULL);
+
+ return status;
+}
+
+static void surface_sam_sid_ac_remove(struct ssam_device *sdev)
+{
+ struct spwr_ac_device *ac = ssam_device_get_drvdata(sdev);
+
+ spwr_ac_unregister(ac);
+ ssam_device_set_drvdata(sdev, NULL);
+}
+
+static const struct spwr_psy_properties spwr_psy_props_adp1 = {
+ .name = "ADP1",
+ .registry = SSAM_EVENT_REGISTRY_SAM,
+};
+
+static const struct ssam_device_id surface_sam_sid_ac_match[] = {
+ { SSAM_DEVICE(BAT, 0x01, 0x01, 0x01), (unsigned long)&spwr_psy_props_adp1 },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, surface_sam_sid_ac_match);
+
+static struct ssam_device_driver surface_sam_sid_ac = {
+ .probe = surface_sam_sid_ac_probe,
+ .remove = surface_sam_sid_ac_remove,
+ .match_table = surface_sam_sid_ac_match,
+ .driver = {
+ .name = "surface_sam_sid_ac",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+
+static int __init surface_sam_sid_power_init(void)
+{
+ int status;
+
+ status = ssam_device_driver_register(&surface_sam_sid_battery);
+ if (status)
+ return status;
+
+ status = ssam_device_driver_register(&surface_sam_sid_ac);
+ if (status) {
+ ssam_device_driver_unregister(&surface_sam_sid_battery);
+ return status;
+ }
+
+ return 0;
+}
+
+static void __exit surface_sam_sid_power_exit(void)
+{
+ ssam_device_driver_unregister(&surface_sam_sid_battery);
+ ssam_device_driver_unregister(&surface_sam_sid_ac);
+}
+
+module_init(surface_sam_sid_power_init);
+module_exit(surface_sam_sid_power_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Surface Battery/AC Driver for 7th Generation Surface Devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/surface_sam/clients/surface_sam_sid_vhf.c b/drivers/misc/surface_sam/clients/surface_sam_sid_vhf.c
new file mode 100644
index 0000000000000..baf8b53e7f990
--- /dev/null
+++ b/drivers/misc/surface_sam/clients/surface_sam_sid_vhf.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Microsofs Surface HID (VHF) driver for HID input events via SAM.
+ * Used for keyboard input events on the 7th generation Surface Laptops.
+ */
+
+#include <linux/acpi.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator_module.h>
+
+
+#define SID_VHF_INPUT_NAME "Microsoft Surface HID"
+
+#define SAM_EVENT_SID_VHF_TC 0x15
+
+#define VHF_HID_STARTED 0
+
+struct sid_vhf_properties {
+ struct ssam_event_registry registry;
+};
+
+struct sid_vhf {
+ struct ssam_device *sdev;
+ struct ssam_event_notifier notif;
+
+ struct hid_device *hid;
+ unsigned long state;
+};
+
+
+static int sid_vhf_hid_start(struct hid_device *hid)
+{
+ hid_dbg(hid, "%s\n", __func__);
+ return 0;
+}
+
+static void sid_vhf_hid_stop(struct hid_device *hid)
+{
+ hid_dbg(hid, "%s\n", __func__);
+}
+
+static int sid_vhf_hid_open(struct hid_device *hid)
+{
+ struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent);
+
+ hid_dbg(hid, "%s\n", __func__);
+
+ set_bit(VHF_HID_STARTED, &vhf->state);
+ return 0;
+}
+
+static void sid_vhf_hid_close(struct hid_device *hid)
+{
+
+ struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent);
+
+ hid_dbg(hid, "%s\n", __func__);
+
+ clear_bit(VHF_HID_STARTED, &vhf->state);
+}
+
+struct surface_sam_sid_vhf_meta_rqst {
+ u8 id;
+ u32 offset;
+ u32 length; // buffer limit on send, length of data received on receive
+ u8 end; // 0x01 if end was reached
+} __packed;
+
+struct vhf_device_metadata_info {
+ u8 len;
+ u8 _2;
+ u8 _3;
+ u8 _4;
+ u8 _5;
+ u8 _6;
+ u8 _7;
+ u16 hid_len; // hid descriptor length
+} __packed;
+
+struct vhf_device_metadata {
+ u32 len;
+ u16 vendor_id;
+ u16 product_id;
+ u8 _1[24];
+} __packed;
+
+union vhf_buffer_data {
+ struct vhf_device_metadata_info info;
+ u8 pld[0x76];
+ struct vhf_device_metadata meta;
+};
+
+struct surface_sam_sid_vhf_meta_resp {
+ struct surface_sam_sid_vhf_meta_rqst rqst;
+ union vhf_buffer_data data;
+} __packed;
+
+
+static int vhf_get_metadata(struct ssam_device *sdev, struct vhf_device_metadata *meta)
+{
+ struct surface_sam_sid_vhf_meta_resp data = {};
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ int status;
+
+ data.rqst.id = 2;
+ data.rqst.offset = 0;
+ data.rqst.length = 0x76;
+ data.rqst.end = 0;
+
+ rqst.target_category = sdev->uid.category;
+ rqst.target_id = sdev->uid.target;
+ rqst.command_id = 0x04;
+ rqst.instance_id = sdev->uid.instance;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = sizeof(struct surface_sam_sid_vhf_meta_rqst);
+ rqst.payload = (u8 *)&data.rqst;
+
+ rsp.capacity = sizeof(struct surface_sam_sid_vhf_meta_resp);
+ rsp.length = 0;
+ rsp.pointer = (u8 *)&data;
+
+ status = ssam_request_sync(sdev->ctrl, &rqst, &rsp);
+ if (status)
+ return status;
+
+ *meta = data.data.meta;
+
+ return 0;
+}
+
+static int vhf_get_hid_descriptor(struct ssam_device *sdev, u8 **desc, int *size)
+{
+ struct surface_sam_sid_vhf_meta_resp data = {};
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ int status, len;
+ u8 *buf;
+
+ data.rqst.id = 0;
+ data.rqst.offset = 0;
+ data.rqst.length = 0x76;
+ data.rqst.end = 0;
+
+ rqst.target_category = sdev->uid.category;
+ rqst.target_id = sdev->uid.target;;
+ rqst.command_id = 0x04;
+ rqst.instance_id = sdev->uid.instance;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = sizeof(struct surface_sam_sid_vhf_meta_rqst);
+ rqst.payload = (u8 *)&data.rqst;
+
+ rsp.capacity = sizeof(struct surface_sam_sid_vhf_meta_resp);
+ rsp.length = 0;
+ rsp.pointer = (u8 *)&data;
+
+ // first fetch 00 to get the total length
+ status = ssam_request_sync(sdev->ctrl, &rqst, &rsp);
+ if (status)
+ return status;
+
+ len = data.data.info.hid_len;
+
+ // allocate a buffer for the descriptor
+ buf = kzalloc(len, GFP_KERNEL);
+
+ // then, iterate and write into buffer, copying out bytes
+ data.rqst.id = 1;
+ data.rqst.offset = 0;
+ data.rqst.length = 0x76;
+ data.rqst.end = 0;
+
+ while (!data.rqst.end && data.rqst.offset < len) {
+ status = ssam_request_sync(sdev->ctrl, &rqst, &rsp);
+ if (status) {
+ kfree(buf);
+ return status;
+ }
+ memcpy(buf + data.rqst.offset, data.data.pld, data.rqst.length);
+
+ data.rqst.offset += data.rqst.length;
+ }
+
+ *desc = buf;
+ *size = len;
+
+ return 0;
+}
+
+static int sid_vhf_hid_parse(struct hid_device *hid)
+{
+ struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent);
+ int ret = 0, size;
+ u8 *buf;
+
+ ret = vhf_get_hid_descriptor(vhf->sdev, &buf, &size);
+ if (ret != 0) {
+ hid_err(hid, "Failed to read HID descriptor from device: %d\n", ret);
+ return -EIO;
+ }
+ hid_dbg(hid, "HID descriptor of device:");
+ print_hex_dump_debug("descriptor:", DUMP_PREFIX_OFFSET, 16, 1, buf, size, false);
+
+ ret = hid_parse_report(hid, buf, size);
+ kfree(buf);
+ return ret;
+
+}
+
+static int sid_vhf_hid_raw_request(struct hid_device *hid, unsigned char
+ reportnum, u8 *buf, size_t len, unsigned char rtype, int
+ reqtype)
+{
+ struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent);
+ struct ssam_request rqst;
+ struct ssam_response rsp;
+ int status;
+ u8 cid;
+
+ hid_dbg(hid, "%s: reportnum=%#04x rtype=%i reqtype=%i\n", __func__, reportnum, rtype, reqtype);
+ print_hex_dump_debug("report:", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false);
+
+ // Byte 0 is the report number. Report data starts at byte 1.
+ buf[0] = reportnum;
+
+ switch (rtype) {
+ case HID_OUTPUT_REPORT:
+ cid = 0x01;
+ break;
+ case HID_FEATURE_REPORT:
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ // The EC doesn't respond to GET FEATURE for these touchpad reports
+ // we immediately discard to avoid waiting for a timeout.
+ if (reportnum == 6 || reportnum == 7 || reportnum == 8 || reportnum == 9 || reportnum == 0x0b) {
+ hid_dbg(hid, "%s: skipping get feature report for 0x%02x\n", __func__, reportnum);
+ return 0;
+ }
+
+ cid = 0x02;
+ break;
+ case HID_REQ_SET_REPORT:
+ cid = 0x03;
+ break;
+ default:
+ hid_err(hid, "%s: unknown req type 0x%02x\n", __func__, rtype);
+ return -EIO;
+ }
+ break;
+ default:
+ hid_err(hid, "%s: unknown report type 0x%02x\n", __func__, reportnum);
+ return -EIO;
+ }
+
+ rqst.target_category = vhf->sdev->uid.category;
+ rqst.target_id = vhf->sdev->uid.target;
+ rqst.instance_id = vhf->sdev->uid.instance;
+ rqst.command_id = cid;
+ rqst.flags = reqtype == HID_REQ_GET_REPORT ? SSAM_REQUEST_HAS_RESPONSE : 0;
+ rqst.length = reqtype == HID_REQ_GET_REPORT ? 1 : len;
+ rqst.payload = buf;
+
+ rsp.capacity = len;
+ rsp.length = 0;
+ rsp.pointer = buf;
+
+ hid_dbg(hid, "%s: sending to cid=%#04x snc=%#04x\n", __func__, cid, HID_REQ_GET_REPORT == reqtype);
+
+ status = ssam_request_sync(vhf->sdev->ctrl, &rqst, &rsp);
+ hid_dbg(hid, "%s: status %i\n", __func__, status);
+
+ if (status)
+ return status;
+
+ if (rsp.length > 0)
+ print_hex_dump_debug("response:", DUMP_PREFIX_OFFSET, 16, 1, rsp.pointer, rsp.length, false);
+
+ return rsp.length;
+}
+
+static struct hid_ll_driver sid_vhf_hid_ll_driver = {
+ .start = sid_vhf_hid_start,
+ .stop = sid_vhf_hid_stop,
+ .open = sid_vhf_hid_open,
+ .close = sid_vhf_hid_close,
+ .parse = sid_vhf_hid_parse,
+ .raw_request = sid_vhf_hid_raw_request,
+};
+
+
+static struct hid_device *sid_vhf_create_hid_device(struct ssam_device *sdev, struct vhf_device_metadata *meta)
+{
+ struct hid_device *hid;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return hid;
+
+ hid->dev.parent = &sdev->dev;
+
+ hid->bus = BUS_VIRTUAL;
+ hid->vendor = meta->vendor_id;
+ hid->product = meta->product_id;
+
+ hid->ll_driver = &sid_vhf_hid_ll_driver;
+
+ sprintf(hid->name, "%s", SID_VHF_INPUT_NAME);
+
+ return hid;
+}
+
+static u32 sid_vhf_event_handler(struct ssam_notifier_block *nb, const struct ssam_event *event)
+{
+ struct sid_vhf *vhf = container_of(nb, struct sid_vhf, notif.base);
+ int status;
+
+ if (!ssam_event_matches_device(vhf->sdev->uid, event))
+ return 0;
+
+ if (event->command_id != 0x00 && event->command_id != 0x03 && event->command_id != 0x04)
+ return 0;
+
+ // skip if HID hasn't started yet
+ if (!test_bit(VHF_HID_STARTED, &vhf->state))
+ return SSAM_NOTIF_HANDLED;
+
+ status = hid_input_report(vhf->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 0);
+ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
+}
+
+
+#ifdef CONFIG_PM
+
+static int surface_sam_sid_vhf_suspend(struct device *dev)
+{
+ struct sid_vhf *vhf = dev_get_drvdata(dev);
+
+ if (vhf->hid->driver && vhf->hid->driver->suspend)
+ return vhf->hid->driver->suspend(vhf->hid, PMSG_SUSPEND);
+
+ return 0;
+}
+
+static int surface_sam_sid_vhf_resume(struct device *dev)
+{
+ struct sid_vhf *vhf = dev_get_drvdata(dev);
+
+ if (vhf->hid->driver && vhf->hid->driver->resume)
+ return vhf->hid->driver->resume(vhf->hid);
+
+ return 0;
+}
+
+static int surface_sam_sid_vhf_freeze(struct device *dev)
+{
+ struct sid_vhf *vhf = dev_get_drvdata(dev);
+
+ if (vhf->hid->driver && vhf->hid->driver->suspend)
+ return vhf->hid->driver->suspend(vhf->hid, PMSG_FREEZE);
+
+ return 0;
+}
+
+static int surface_sam_sid_vhf_poweroff(struct device *dev)
+{
+ struct sid_vhf *vhf = dev_get_drvdata(dev);
+
+ if (vhf->hid->driver && vhf->hid->driver->suspend)
+ return vhf->hid->driver->suspend(vhf->hid, PMSG_HIBERNATE);
+
+ return 0;
+}
+
+static int surface_sam_sid_vhf_restore(struct device *dev)
+{
+ struct sid_vhf *vhf = dev_get_drvdata(dev);
+
+ if (vhf->hid->driver && vhf->hid->driver->reset_resume)
+ return vhf->hid->driver->reset_resume(vhf->hid);
+
+ return 0;
+}
+
+struct dev_pm_ops surface_sam_sid_vhf_pm_ops = {
+ .freeze = surface_sam_sid_vhf_freeze,
+ .thaw = surface_sam_sid_vhf_resume,
+ .suspend = surface_sam_sid_vhf_suspend,
+ .resume = surface_sam_sid_vhf_resume,
+ .poweroff = surface_sam_sid_vhf_poweroff,
+ .restore = surface_sam_sid_vhf_restore,
+};
+
+#else /* CONFIG_PM */
+
+struct dev_pm_ops surface_sam_sid_vhf_pm_ops = { };
+
+#endif /* CONFIG_PM */
+
+
+static int surface_sam_sid_vhf_probe(struct ssam_device *sdev)
+{
+ const struct sid_vhf_properties *p;
+ struct sid_vhf *vhf;
+ struct vhf_device_metadata meta = {};
+ struct hid_device *hid;
+ int status;
+
+ p = ssam_device_get_match_data(sdev);
+ if (!p)
+ return -ENODEV;
+
+ vhf = kzalloc(sizeof(*vhf), GFP_KERNEL);
+ if (!vhf)
+ return -ENOMEM;
+
+ status = vhf_get_metadata(sdev, &meta);
+ if (status)
+ goto err_create_hid;
+
+ hid = sid_vhf_create_hid_device(sdev, &meta);
+ if (IS_ERR(hid)) {
+ status = PTR_ERR(hid);
+ goto err_create_hid;
+ }
+
+ vhf->sdev = sdev;
+ vhf->hid = hid;
+
+ vhf->notif.base.priority = 1;
+ vhf->notif.base.fn = sid_vhf_event_handler;
+ vhf->notif.event.reg = p->registry;
+ vhf->notif.event.id.target_category = sdev->uid.category;
+ vhf->notif.event.id.instance = sdev->uid.instance;
+ vhf->notif.event.flags = 0;
+
+ ssam_device_set_drvdata(sdev, vhf);
+
+ status = ssam_notifier_register(sdev->ctrl, &vhf->notif);
+ if (status)
+ goto err_notif;
+
+ status = hid_add_device(hid);
+ if (status)
+ goto err_add_hid;
+
+ return 0;
+
+err_add_hid:
+ ssam_notifier_unregister(sdev->ctrl, &vhf->notif);
+err_notif:
+ hid_destroy_device(hid);
+ ssam_device_set_drvdata(sdev, NULL);
+err_create_hid:
+ kfree(vhf);
+ return status;
+}
+
+static void surface_sam_sid_vhf_remove(struct ssam_device *sdev)
+{
+ struct sid_vhf *vhf = ssam_device_get_drvdata(sdev);
+
+ ssam_notifier_unregister(sdev->ctrl, &vhf->notif);
+ hid_destroy_device(vhf->hid);
+ kfree(vhf);
+
+ ssam_device_set_drvdata(sdev, NULL);
+}
+
+static const struct sid_vhf_properties sid_vhf_default_props = {
+ .registry = SSAM_EVENT_REGISTRY_REG,
+};
+
+static const struct ssam_device_id surface_sam_sid_vhf_match[] = {
+ {
+ SSAM_DEVICE(HID, SSAM_ANY_TID, SSAM_ANY_IID, 0x00),
+ .driver_data = (unsigned long)&sid_vhf_default_props
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(ssam, surface_sam_sid_vhf_match);
+
+static struct ssam_device_driver surface_sam_sid_vhf = {
+ .probe = surface_sam_sid_vhf_probe,
+ .remove = surface_sam_sid_vhf_remove,
+ .match_table = surface_sam_sid_vhf_match,
+ .driver = {
+ .name = "surface_sam_sid_vhf",
+ .pm = &surface_sam_sid_vhf_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_ssam_device_driver(surface_sam_sid_vhf);
+
+MODULE_AUTHOR("Blaž Hrastnik <blaz@mxxn.io>");
+MODULE_DESCRIPTION("Driver for HID devices connected via Surface SAM");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/surface_sam/clients/surface_sam_vhf.c b/drivers/misc/surface_sam/clients/surface_sam_vhf.c
new file mode 100644
index 0000000000000..3b7f08f7d028d
--- /dev/null
+++ b/drivers/misc/surface_sam/clients/surface_sam_vhf.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Virtual HID Framework (VHF) driver for input events via SAM.
+ * Used for keyboard input events on the Surface Laptops.
+ */
+
+#include <linux/acpi.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator_module.h>
+
+
+#define USB_VENDOR_ID_MICROSOFT 0x045e
+#define USB_DEVICE_ID_MS_VHF 0xf001
+
+#define VHF_INPUT_NAME "Microsoft Virtual HID Framework Device"
+
+
+struct vhf_drvdata {
+ struct platform_device *dev;
+ struct ssam_controller *ctrl;
+
+ struct ssam_event_notifier notif;
+
+ struct hid_device *hid;
+};
+
+
+/*
+ * These report descriptors have been extracted from a Surface Book 2.
+ * They seems to be similar enough to be usable on the Surface Laptop.
+ */
+static const u8 vhf_hid_desc[] = {
+ // keyboard descriptor (event command ID 0x03)
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x06, /* Usage (Keyboard), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x01, /* Report ID (1), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x08, /* Report Count (8), */
+ 0x05, 0x07, /* Usage Page (Keyboard), */
+ 0x19, 0xE0, /* Usage Minimum (KB Leftcontrol), */
+ 0x29, 0xE7, /* Usage Maximum (KB Right GUI), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x0A, /* Report Count (10), */
+ 0x19, 0x00, /* Usage Minimum (None), */
+ 0x29, 0x91, /* Usage Maximum (KB LANG2), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x81, 0x00, /* Input, */
+ 0x05, 0x0C, /* Usage Page (Consumer), */
+ 0x0A, 0xC0, 0x02, /* Usage (02C0h), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x1A, 0xC1, 0x02, /* Usage Minimum (02C1h), */
+ 0x2A, 0xC6, 0x02, /* Usage Maximum (02C6h), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0xB1, 0x03, /* Feature (Constant, Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x08, /* Usage Page (LED), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x03, /* Usage Maximum (03h), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x91, 0x02, /* Output (Variable), */
+ 0x95, 0x05, /* Report Count (5), */
+ 0x91, 0x01, /* Output (Constant), */
+ 0xC0, /* End Collection, */
+
+ // media key descriptor (event command ID 0x04)
+ 0x05, 0x0C, /* Usage Page (Consumer), */
+ 0x09, 0x01, /* Usage (Consumer Control), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x03, /* Report ID (3), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+ 0x19, 0x00, /* Usage Minimum (00h), */
+ 0x2A, 0xFF, 0x03, /* Usage Maximum (03FFh), */
+ 0x81, 0x00, /* Input, */
+ 0xC0, /* End Collection, */
+};
+
+
+static int vhf_hid_start(struct hid_device *hid)
+{
+ hid_dbg(hid, "%s\n", __func__);
+ return 0;
+}
+
+static void vhf_hid_stop(struct hid_device *hid)
+{
+ hid_dbg(hid, "%s\n", __func__);
+}
+
+static int vhf_hid_open(struct hid_device *hid)
+{
+ hid_dbg(hid, "%s\n", __func__);
+ return 0;
+}
+
+static void vhf_hid_close(struct hid_device *hid)
+{
+ hid_dbg(hid, "%s\n", __func__);
+}
+
+static int vhf_hid_parse(struct hid_device *hid)
+{
+ return hid_parse_report(hid, (u8 *)vhf_hid_desc, ARRAY_SIZE(vhf_hid_desc));
+}
+
+static int vhf_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
+ u8 *buf, size_t len, unsigned char rtype,
+ int reqtype)
+{
+ hid_dbg(hid, "%s\n", __func__);
+ return 0;
+}
+
+static int vhf_hid_output_report(struct hid_device *hid, u8 *buf, size_t len)
+{
+ hid_dbg(hid, "%s\n", __func__);
+ print_hex_dump_debug("report:", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false);
+
+ return len;
+}
+
+static struct hid_ll_driver vhf_hid_ll_driver = {
+ .start = vhf_hid_start,
+ .stop = vhf_hid_stop,
+ .open = vhf_hid_open,
+ .close = vhf_hid_close,
+ .parse = vhf_hid_parse,
+ .raw_request = vhf_hid_raw_request,
+ .output_report = vhf_hid_output_report,
+};
+
+
+static struct hid_device *vhf_create_hid_device(struct platform_device *pdev)
+{
+ struct hid_device *hid;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid))
+ return hid;
+
+ hid->dev.parent = &pdev->dev;
+
+ hid->bus = BUS_VIRTUAL;
+ hid->vendor = USB_VENDOR_ID_MICROSOFT;
+ hid->product = USB_DEVICE_ID_MS_VHF;
+
+ hid->ll_driver = &vhf_hid_ll_driver;
+
+ sprintf(hid->name, "%s", VHF_INPUT_NAME);
+
+ return hid;
+}
+
+static u32 vhf_event_handler(struct ssam_notifier_block *nb, const struct ssam_event *event)
+{
+ struct vhf_drvdata *drvdata = container_of(nb, struct vhf_drvdata, notif.base);
+ int status;
+
+ if (event->target_category != 0x08)
+ return 0;
+
+ if (event->command_id == 0x03 || event->command_id == 0x04) {
+ status = hid_input_report(drvdata->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 1);
+ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
+ }
+
+ return 0;
+}
+
+
+#ifdef CONFIG_PM
+
+static int surface_sam_vhf_suspend(struct device *dev)
+{
+ struct vhf_drvdata *d = dev_get_drvdata(dev);
+
+ if (d->hid->driver && d->hid->driver->suspend)
+ return d->hid->driver->suspend(d->hid, PMSG_SUSPEND);
+
+ return 0;
+}
+
+static int surface_sam_vhf_resume(struct device *dev)
+{
+ struct vhf_drvdata *d = dev_get_drvdata(dev);
+
+ if (d->hid->driver && d->hid->driver->resume)
+ return d->hid->driver->resume(d->hid);
+
+ return 0;
+}
+
+static int surface_sam_vhf_freeze(struct device *dev)
+{
+ struct vhf_drvdata *d = dev_get_drvdata(dev);
+
+ if (d->hid->driver && d->hid->driver->suspend)
+ return d->hid->driver->suspend(d->hid, PMSG_FREEZE);
+
+ return 0;
+}
+
+static int surface_sam_vhf_poweroff(struct device *dev)
+{
+ struct vhf_drvdata *d = dev_get_drvdata(dev);
+
+ if (d->hid->driver && d->hid->driver->suspend)
+ return d->hid->driver->suspend(d->hid, PMSG_HIBERNATE);
+
+ return 0;
+}
+
+static int surface_sam_vhf_restore(struct device *dev)
+{
+ struct vhf_drvdata *d = dev_get_drvdata(dev);
+
+ if (d->hid->driver && d->hid->driver->reset_resume)
+ return d->hid->driver->reset_resume(d->hid);
+
+ return 0;
+}
+
+struct dev_pm_ops surface_sam_vhf_pm_ops = {
+ .freeze = surface_sam_vhf_freeze,
+ .thaw = surface_sam_vhf_resume,
+ .suspend = surface_sam_vhf_suspend,
+ .resume = surface_sam_vhf_resume,
+ .poweroff = surface_sam_vhf_poweroff,
+ .restore = surface_sam_vhf_restore,
+};
+
+#else /* CONFIG_PM */
+
+struct dev_pm_ops surface_sam_vhf_pm_ops = { };
+
+#endif /* CONFIG_PM */
+
+
+static int surface_sam_vhf_probe(struct platform_device *pdev)
+{
+ struct ssam_controller *ctrl;
+ struct vhf_drvdata *drvdata;
+ struct hid_device *hid;
+ int status;
+
+ // add device link to EC
+ status = ssam_client_bind(&pdev->dev, &ctrl);
+ if (status)
+ return status == -ENXIO ? -EPROBE_DEFER : status;
+
+ drvdata = kzalloc(sizeof(struct vhf_drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ hid = vhf_create_hid_device(pdev);
+ if (IS_ERR(hid)) {
+ status = PTR_ERR(hid);
+ goto err_probe_hid;
+ }
+
+ status = hid_add_device(hid);
+ if (status)
+ goto err_add_hid;
+
+ drvdata->dev = pdev;
+ drvdata->ctrl = ctrl;
+ drvdata->hid = hid;
+
+ drvdata->notif.base.priority = 1;
+ drvdata->notif.base.fn = vhf_event_handler;
+ drvdata->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
+ drvdata->notif.event.id.target_category = SSAM_SSH_TC_KBD;
+ drvdata->notif.event.id.instance = 0;
+ drvdata->notif.event.flags = 0;
+
+ platform_set_drvdata(pdev, drvdata);
+
+ status = ssam_notifier_register(ctrl, &drvdata->notif);
+ if (status)
+ goto err_add_hid;
+
+ return 0;
+
+err_add_hid:
+ hid_destroy_device(hid);
+ platform_set_drvdata(pdev, NULL);
+err_probe_hid:
+ kfree(drvdata);
+ return status;
+}
+
+static int surface_sam_vhf_remove(struct platform_device *pdev)
+{
+ struct vhf_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ ssam_notifier_unregister(drvdata->ctrl, &drvdata->notif);
+ hid_destroy_device(drvdata->hid);
+ kfree(drvdata);
+
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+
+static const struct acpi_device_id surface_sam_vhf_match[] = {
+ { "MSHW0096" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, surface_sam_vhf_match);
+
+static struct platform_driver surface_sam_vhf = {
+ .probe = surface_sam_vhf_probe,
+ .remove = surface_sam_vhf_remove,
+ .driver = {
+ .name = "surface_sam_vhf",
+ .acpi_match_table = surface_sam_vhf_match,
+ .pm = &surface_sam_vhf_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(surface_sam_vhf);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Virtual HID Framework Driver for 5th Generation Surface Devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/surface_sam/controller.c b/drivers/misc/surface_sam/controller.c
new file mode 100644
index 0000000000000..5cbb54a2d54f2
--- /dev/null
+++ b/drivers/misc/surface_sam/controller.c
@@ -0,0 +1,2384 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator_module.h>
+
+#include "controller.h"
+#include "ssh_msgb.h"
+#include "ssh_protocol.h"
+#include "ssh_request_layer.h"
+
+#include "ssam_trace.h"
+
+
+/* -- Safe counters. -------------------------------------------------------- */
+
+/**
+ * ssh_seq_reset() - Reset/initialize sequence ID counter.
+ * @c: The counter to reset.
+ */
+static inline void ssh_seq_reset(struct ssh_seq_counter *c)
+{
+ WRITE_ONCE(c->value, 0);
+}
+
+/**
+ * ssh_seq_next() - Get next sequence ID.
+ * @c: The counter providing the sequence IDs.
+ */
+static inline u8 ssh_seq_next(struct ssh_seq_counter *c)
+{
+ u8 old = READ_ONCE(c->value);
+ u8 new = old + 1;
+ u8 ret;
+
+ while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
+ old = ret;
+ new = old + 1;
+ }
+
+ return old;
+}
+
+/**
+ * ssh_rqid_reset() - Reset/initialize request ID counter.
+ * @c: The counter to reset.
+ */
+static inline void ssh_rqid_reset(struct ssh_rqid_counter *c)
+{
+ WRITE_ONCE(c->value, 0);
+}
+
+/**
+ * ssh_rqid_next() - Get next request ID.
+ * @c: The counter providing the request IDs.
+ */
+static inline u16 ssh_rqid_next(struct ssh_rqid_counter *c)
+{
+ u16 old = READ_ONCE(c->value);
+ u16 new = ssh_rqid_next_valid(old);
+ u16 ret;
+
+ while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
+ old = ret;
+ new = ssh_rqid_next_valid(old);
+ }
+
+ return old;
+}
+
+
+/* -- Event notifier/callbacks. --------------------------------------------- */
+/*
+ * The notifier system is based on linux/notifier.h, specifically the SRCU
+ * implementation. The difference to that is, that some bits of the notifier
+ * call return value can be tracked across multiple calls. This is done so that
+ * handling of events can be tracked and a warning can be issued in case an
+ * event goes unhandled. The idea of that waring is that it should help discover
+ * and identify new/currently unimplemented features.
+ */
+
+/**
+ * ssam_nfblk_call_chain() - Call event notifier callbacks of the given chain.
+ * @nh: The notifier head for which the notifier callbacks should be called.
+ * @event: The event data provided to the callbacks.
+ *
+ * Call all registered notifier callbacks in order of their priority until
+ * either no notifier is left or a notifier returns a value with the
+ * %SSAM_NOTIF_STOP bit set. Note that this bit is automatically set via
+ * ssam_notifier_from_errno() on any non-zero error value.
+ *
+ * Returns the notifier status value, which contains the notifier status bits
+ * (%SSAM_NOTIF_HANDLED and %SSAM_NOTIF_STOP) as well as a potential error
+ * value returned from the last executed notifier callback. Use
+ * ssam_notifier_to_errno() to convert this value to the original error value.
+ */
+static int ssam_nfblk_call_chain(struct ssam_nf_head *nh, struct ssam_event *event)
+{
+ struct ssam_notifier_block *nb, *next_nb;
+ int ret = 0, idx;
+
+ idx = srcu_read_lock(&nh->srcu);
+
+ nb = rcu_dereference_raw(nh->head);
+ while (nb) {
+ next_nb = rcu_dereference_raw(nb->next);
+
+ ret = (ret & SSAM_NOTIF_STATE_MASK) | nb->fn(nb, event);
+ if (ret & SSAM_NOTIF_STOP)
+ break;
+
+ nb = next_nb;
+ }
+
+ srcu_read_unlock(&nh->srcu, idx);
+ return ret;
+}
+
+/**
+ * __ssam_nfblk_insert() - Insert a new notifier block into the given notifier
+ * list.
+ * @nh: The notifier head into which the block should be inserted.
+ * @nb: The notifier block to add.
+ *
+ * Note: This function must be synchronized by the caller with respect to other
+ * insert and/or remove calls.
+ *
+ * Return: Returns zero on success, %-EINVAL if the notifier block has already
+ * been registered.
+ */
+static int __ssam_nfblk_insert(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
+{
+ struct ssam_notifier_block **link = &nh->head;
+
+ while ((*link) != NULL) {
+ if (unlikely((*link) == nb)) {
+ WARN(1, "double register detected");
+ return -EINVAL;
+ }
+
+ if (nb->priority > (*link)->priority)
+ break;
+
+ link = &((*link)->next);
+ }
+
+ nb->next = *link;
+ rcu_assign_pointer(*link, nb);
+
+ return 0;
+}
+
+/**
+ * __ssam_nfblk_find_link() - Find a notifier block link on the given list.
+ * @nh: The notifier head on wich the search should be conducted.
+ * @nb: The notifier block to search for.
+ *
+ * Note: This function must be synchronized by the caller with respect to
+ * insert and/or remove calls.
+ *
+ * Returns a pointer to the pointer pointing to the given notifier block from
+ * the previous node in the list, or %NULL if the given notifier block is not
+ * contained in the notifier list.
+ */
+static struct ssam_notifier_block **__ssam_nfblk_find_link(
+ struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
+{
+ struct ssam_notifier_block **link = &nh->head;
+
+ while ((*link) != NULL) {
+ if ((*link) == nb)
+ return link;
+
+ link = &((*link)->next);
+ }
+
+ return NULL;
+}
+
+/**
+ * __ssam_nfblk_erase() - Erase a notifier block link in the given notifier
+ * list.
+ * @link: The link to be erased.
+ *
+ * Note: This function must be synchronized by the caller with respect to other
+ * insert and/or remove/erase/find calls. The caller _must_ ensure SRCU
+ * synchronization by calling `synchronize_srcu(&nh->srcu)` after leaving the
+ * critical section, to ensure that the removed notifier block is not in use any
+ * more.
+ */
+static void __ssam_nfblk_erase(struct ssam_notifier_block **link)
+{
+ rcu_assign_pointer(*link, (*link)->next);
+}
+
+
+/**
+ * __ssam_nfblk_remove() - Remove a notifier block from the given notifier list.
+ * @nh: The notifier head from which the block should be removed.
+ * @nb: The notifier block to remove.
+ *
+ * Note: This function must be synchronized by the caller with respect to
+ * other insert and/or remove calls. On success, the caller *must* ensure SRCU
+ * synchronization by calling synchronize_srcu() with ``nh->srcu`` after
+ * leaving the critical section, to ensure that the removed notifier block is
+ * not in use any more.
+ *
+ * Return: Returns zero on success, %-ENOENT if the specified notifier block
+ * could not be found on the notifier list.
+ */
+static int __ssam_nfblk_remove(struct ssam_nf_head *nh,
+ struct ssam_notifier_block *nb)
+{
+ struct ssam_notifier_block **link;
+
+ link = __ssam_nfblk_find_link(nh, nb);
+ if (!link)
+ return -ENOENT;
+
+ __ssam_nfblk_erase(link);
+ return 0;
+}
+
+/**
+ * ssam_nf_head_init() - Initialize the given notifier head.
+ * @nh: The notifier head to initialize.
+ */
+static int ssam_nf_head_init(struct ssam_nf_head *nh)
+{
+ int status;
+
+ status = init_srcu_struct(&nh->srcu);
+ if (status)
+ return status;
+
+ nh->head = NULL;
+ return 0;
+}
+
+/**
+ * ssam_nf_head_destroy() - Deinitialize the given notifier head.
+ * @nh: The notifier head to deinitialize.
+ */
+static void ssam_nf_head_destroy(struct ssam_nf_head *nh)
+{
+ cleanup_srcu_struct(&nh->srcu);
+}
+
+
+/* -- Event/notification registry. ------------------------------------------ */
+
+/**
+ * struct ssam_nf_refcount_key - Key used for event activation reference
+ * counting.
+ * @reg: The registry via which the event is enabled/disabled.
+ * @id: The ID uniquely describing the event.
+ */
+struct ssam_nf_refcount_key {
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+};
+
+/**
+ * struct ssam_nf_refcount_entry - RB-tree entry for referecnce counting event
+ * activations.
+ * @node: The node of this entry in the rb-tree.
+ * @key: The key of the event.
+ * @refcount: The reference-count of the event.
+ * @flags: The flags used when enabling the event.
+ */
+struct ssam_nf_refcount_entry {
+ struct rb_node node;
+ struct ssam_nf_refcount_key key;
+ int refcount;
+ u8 flags;
+};
+
+
+/**
+ * ssam_nf_refcount_inc() - Increment reference-/activation-count of the given
+ * event.
+ * @nf: The notifier system reference.
+ * @reg: The registry used to enable/disable the event.
+ * @id: The event ID.
+ *
+ * Increments the reference-/activation-count associated with the specified
+ * event type/ID, allocating a new entry for this event ID if necessary. A
+ * newly allocated entry will have a refcount of one.
+ *
+ * Return: Returns the refcount entry on success. Returns ``ERR_PTR(-ENOSPC)``
+ * if there have already been %INT_MAX events of the specified ID and type
+ * registered, or ``ERR_PTR(-ENOMEM)`` if the entry could not be allocated.
+ */
+static struct ssam_nf_refcount_entry *ssam_nf_refcount_inc(
+ struct ssam_nf *nf, struct ssam_event_registry reg,
+ struct ssam_event_id id)
+{
+ struct ssam_nf_refcount_entry *entry;
+ struct ssam_nf_refcount_key key;
+ struct rb_node **link = &nf->refcount.rb_node;
+ struct rb_node *parent = NULL;
+ int cmp;
+
+ key.reg = reg;
+ key.id = id;
+
+ while (*link) {
+ entry = rb_entry(*link, struct ssam_nf_refcount_entry, node);
+ parent = *link;
+
+ cmp = memcmp(&key, &entry->key, sizeof(key));
+ if (cmp < 0) {
+ link = &(*link)->rb_left;
+ } else if (cmp > 0) {
+ link = &(*link)->rb_right;
+ } else if (entry->refcount < INT_MAX) {
+ entry->refcount++;
+ return entry;
+ } else {
+ return ERR_PTR(-ENOSPC);
+ }
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ entry->key = key;
+ entry->refcount = 1;
+
+ rb_link_node(&entry->node, parent, link);
+ rb_insert_color(&entry->node, &nf->refcount);
+
+ return entry;
+}
+
+/**
+ * ssam_nf_refcount_dec() - Decrement reference-/activation-count of the given
+ * event.
+ * @nf: The notifier system reference.
+ * @reg: The registry used to enable/disable the event.
+ * @id: The event ID.
+ *
+ * Decrements the reference-/activation-count of the specified event,
+ * returning its entry. If the returned entry has a refcount of zero, the
+ * caller is responsible for freeing it using kfree().
+ *
+ * Return: Returns the refcount entry on success or %NULL if the entry has not
+ * been found.
+ */
+static struct ssam_nf_refcount_entry *ssam_nf_refcount_dec(
+ struct ssam_nf *nf, struct ssam_event_registry reg,
+ struct ssam_event_id id)
+{
+ struct ssam_nf_refcount_entry *entry;
+ struct ssam_nf_refcount_key key;
+ struct rb_node *node = nf->refcount.rb_node;
+ int cmp;
+
+ key.reg = reg;
+ key.id = id;
+
+ while (node) {
+ entry = rb_entry(node, struct ssam_nf_refcount_entry, node);
+
+ cmp = memcmp(&key, &entry->key, sizeof(key));
+ if (cmp < 0) {
+ node = node->rb_left;
+ } else if (cmp > 0) {
+ node = node->rb_right;
+ } else {
+ entry->refcount--;
+ if (entry->refcount == 0)
+ rb_erase(&entry->node, &nf->refcount);
+
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * ssam_nf_refcount_empty() - Test if the notification system has any
+ * enabled/active events.
+ * @nf: The notification system.
+ */
+static bool ssam_nf_refcount_empty(struct ssam_nf *nf)
+{
+ return RB_EMPTY_ROOT(&nf->refcount);
+}
+
+/**
+ * ssam_nf_call() - Call notification callbacks for the provided event.
+ * @nf: The notifier system
+ * @dev: The associated device, only used for logging.
+ * @rqid: The request ID of the event.
+ * @event: The event provided to the callbacks.
+ *
+ * Executa registered callbacks in order of their priority until either no
+ * callback is left or a callback returned a value with the %SSAM_NOTIF_STOP
+ * bit set. Note that this bit is set automatically when converting non.zero
+ * error values via ssam_notifier_from_errno() to notifier values.
+ *
+ * Also note that any callback that could handle an event should return a value
+ * with bit %SSAM_NOTIF_HANDLED set, indicating that the event does not go
+ * unhandled/ignored. In case no registered callback could handle an event,
+ * this function will emit a warning.
+ *
+ * In case a callback failed, this function will emit an error message.
+ */
+static void ssam_nf_call(struct ssam_nf *nf, struct device *dev, u16 rqid,
+ struct ssam_event *event)
+{
+ struct ssam_nf_head *nf_head;
+ int status, nf_ret;
+
+ if (!ssh_rqid_is_event(rqid)) {
+ dev_warn(dev, "event: unsupported rqid: 0x%04x\n", rqid);
+ return;
+ }
+
+ nf_head = &nf->head[ssh_rqid_to_event(rqid)];
+ nf_ret = ssam_nfblk_call_chain(nf_head, event);
+ status = ssam_notifier_to_errno(nf_ret);
+
+ if (status < 0) {
+ dev_err(dev, "event: error handling event: %d "
+ "(tc: 0x%02x, tid: 0x%02x, cid: 0x%02x, iid: 0x%02x)\n",
+ status, event->target_category, event->target_id,
+ event->command_id, event->instance_id);
+ }
+
+ if (!(nf_ret & SSAM_NOTIF_HANDLED)) {
+ dev_warn(dev, "event: unhandled event (rqid: 0x%02x, "
+ "tc: 0x%02x, tid: 0x%02x, cid: 0x%02x, iid: 0x%02x)\n",
+ rqid, event->target_category, event->target_id,
+ event->command_id, event->instance_id);
+ }
+}
+
+/**
+ * ssam_nf_init() - Initialize the notifier system.
+ * @nf: The notifier system to initialize.
+ */
+static int ssam_nf_init(struct ssam_nf *nf)
+{
+ int i, status;
+
+ for (i = 0; i < SSH_NUM_EVENTS; i++) {
+ status = ssam_nf_head_init(&nf->head[i]);
+ if (status)
+ break;
+ }
+
+ if (status) {
+ for (i = i - 1; i >= 0; i--)
+ ssam_nf_head_destroy(&nf->head[i]);
+
+ return status;
+ }
+
+ mutex_init(&nf->lock);
+ return 0;
+}
+
+/**
+ * ssam_nf_destroy() - Deinitialize the notifier system.
+ * @nf: The notifier system to deinitialize.
+ */
+static void ssam_nf_destroy(struct ssam_nf *nf)
+{
+ int i;
+
+ for (i = 0; i < SSH_NUM_EVENTS; i++)
+ ssam_nf_head_destroy(&nf->head[i]);
+
+ mutex_destroy(&nf->lock);
+}
+
+
+/* -- Event/async request completion system. -------------------------------- */
+
+#define SSAM_CPLT_WQ_NAME "ssam_cpltq"
+
+/**
+ * SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN - Maximum payload length for a cached
+ * &struct ssam_event_item.
+ *
+ * This length has been chosen to be accomodate standard touchpad and keyboard
+ * input events. Events with larger payloads will be allocated separately.
+ */
+#define SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN 32
+
+static struct kmem_cache *ssam_event_item_cache;
+
+/**
+ * ssam_event_item_cache_init() - Initialize the event item cache.
+ */
+int ssam_event_item_cache_init(void)
+{
+ const unsigned int size = sizeof(struct ssam_event_item)
+ + SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN;
+ const unsigned int align = __alignof__(struct ssam_event_item);
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("ssam_event_item", size, align, 0, NULL);
+ if (!cache)
+ return -ENOMEM;
+
+ ssam_event_item_cache = cache;
+ return 0;
+}
+
+/**
+ * ssam_event_item_cache_destroy() - Deinitialize the event item cache.
+ */
+void ssam_event_item_cache_destroy(void)
+{
+ kmem_cache_destroy(ssam_event_item_cache);
+ ssam_event_item_cache = NULL;
+}
+
+static void __ssam_event_item_free_cached(struct ssam_event_item *item)
+{
+ kmem_cache_free(ssam_event_item_cache, item);
+}
+
+static void __ssam_event_item_free_generic(struct ssam_event_item *item)
+{
+ kfree(item);
+}
+
+/**
+ * ssam_event_item_free() - Free the provided event item.
+ * @item: The event item to free.
+ */
+static inline void ssam_event_item_free(struct ssam_event_item *item)
+{
+ trace_ssam_event_item_free(item);
+ item->ops.free(item);
+}
+
+/**
+ * ssam_event_item_alloc() - Allocate an event item with the given payload size.
+ * @len: The event payload length.
+ * @flags: The flags used for allocation.
+ *
+ * Allocate an event item with the given payload size, preferring allocation
+ * from the event item cache if the payload is small enough (i.e. smaller than
+ * %SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN).
+ */
+static struct ssam_event_item *ssam_event_item_alloc(size_t len, gfp_t flags)
+{
+ struct ssam_event_item *item;
+
+ if (len <= SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN) {
+ item = kmem_cache_alloc(ssam_event_item_cache, GFP_KERNEL);
+ if (!item)
+ return NULL;
+
+ item->ops.free = __ssam_event_item_free_cached;
+ } else {
+ const size_t n = sizeof(struct ssam_event_item) + len;
+
+ item = kzalloc(n, GFP_KERNEL);
+ if (!item)
+ return NULL;
+
+ item->ops.free = __ssam_event_item_free_generic;
+ }
+
+ item->event.length = len;
+
+ trace_ssam_event_item_alloc(item, len);
+ return item;
+}
+
+
+/**
+ * ssam_event_queue_push() - Push an event item to the event queue.
+ * @q: The event queue.
+ * @item: The item to add.
+ */
+static void ssam_event_queue_push(struct ssam_event_queue *q,
+ struct ssam_event_item *item)
+{
+ spin_lock(&q->lock);
+ list_add_tail(&item->node, &q->head);
+ spin_unlock(&q->lock);
+}
+
+/**
+ * ssam_event_queue_pop() - Pop the next event item from the event queue.
+ * @q: The event queue.
+ *
+ * Returns and removes the next event item from the queue. Returns %NULL If
+ * there is no event item left.
+ */
+static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q)
+{
+ struct ssam_event_item *item;
+
+ spin_lock(&q->lock);
+ item = list_first_entry_or_null(&q->head, struct ssam_event_item, node);
+ if (item)
+ list_del(&item->node);
+ spin_unlock(&q->lock);
+
+ return item;
+}
+
+/**
+ * ssam_event_queue_is_empty() - Check if the event queue is empty.
+ * @q: The event queue.
+ */
+static bool ssam_event_queue_is_empty(struct ssam_event_queue *q)
+{
+ bool empty;
+
+ spin_lock(&q->lock);
+ empty = list_empty(&q->head);
+ spin_unlock(&q->lock);
+
+ return empty;
+}
+
+/**
+ * ssam_cplt_get_event_queue() - Get the event queue for the given parameters.
+ * @cplt: The completion system on which to look for the queue.
+ * @tid: The target ID of the queue.
+ * @rqid: The request ID representing the event ID for which to get the queue.
+ *
+ * Returns the event queue corresponding to the event type described by the
+ * given parameters. If the request ID does not represent an event, this
+ * function returns %NULL. If the target ID is not supported, this function
+ * will fall back to the default target ID (tid=1).
+ */
+static struct ssam_event_queue *ssam_cplt_get_event_queue(
+ struct ssam_cplt *cplt, u8 tid, u16 rqid)
+{
+ u16 event = ssh_rqid_to_event(rqid);
+ u16 tidx = ssh_tid_to_index(tid);
+
+ if (!ssh_rqid_is_event(rqid)) {
+ dev_err(cplt->dev, "event: unsupported rquest ID: 0x%04x\n", rqid);
+ return NULL;
+ }
+
+ if (!ssh_tid_is_valid(tid)) {
+ dev_warn(cplt->dev, "event: unsupported target ID: %u\n", tid);
+ tidx = 0;
+ }
+
+ return &cplt->event.target[tidx].queue[event];
+}
+
+/**
+ * ssam_cplt_submit() - Submit a work item to the compeltion system workqueue.
+ * @cplt: The completion system.
+ * @work: The work item to submit.
+ */
+static inline bool ssam_cplt_submit(struct ssam_cplt *cplt,
+ struct work_struct *work)
+{
+ return queue_work(cplt->wq, work);
+}
+
+/**
+ * ssam_cplt_submit_event() - Submit an event to the completion system.
+ * @cplt: The completion system.
+ * @item: The event item to submit.
+ *
+ * Submits the event to the completion system by queuing it on the event item
+ * queue and queuing the respective event queue work item on the completion
+ * workqueue, which will eventually complete the event.
+ */
+static int ssam_cplt_submit_event(struct ssam_cplt *cplt,
+ struct ssam_event_item *item)
+{
+ struct ssam_event_queue *evq;
+
+ evq = ssam_cplt_get_event_queue(cplt, item->event.target_id, item->rqid);
+ if (!evq)
+ return -EINVAL;
+
+ ssam_event_queue_push(evq, item);
+ ssam_cplt_submit(cplt, &evq->work);
+ return 0;
+}
+
+/**
+ * ssam_cplt_flush() - Flush the completion system.
+ * @cplt: The completion system.
+ *
+ * Flush the completion system by waiting until all currently submitted work
+ * items have been completed.
+ *
+ * Note: This function does not guarantee that all events will have been
+ * handled once this call terminates. In case of a larger number of
+ * to-be-completed events, the event queue work function may re-schedule its
+ * work item, which this flush operation will ignore.
+ *
+ * This operation is only intended to, during normal operation prior to
+ * shutdown, try to complete most events and requests to get them out of the
+ * system while the system is still fully operational. It does not aim to
+ * provide any guraantee that all of them have been handled.
+ */
+static void ssam_cplt_flush(struct ssam_cplt *cplt)
+{
+ flush_workqueue(cplt->wq);
+}
+
+static void ssam_event_queue_work_fn(struct work_struct *work)
+{
+ struct ssam_event_queue *queue;
+ struct ssam_event_item *item;
+ struct ssam_nf *nf;
+ struct device *dev;
+ int i;
+
+ queue = container_of(work, struct ssam_event_queue, work);
+ nf = &queue->cplt->event.notif;
+ dev = queue->cplt->dev;
+
+ // limit number of processed events to avoid livelocking
+ for (i = 0; i < 10; i++) {
+ item = ssam_event_queue_pop(queue);
+ if (item == NULL)
+ return;
+
+ ssam_nf_call(nf, dev, item->rqid, &item->event);
+ ssam_event_item_free(item);
+ }
+
+ if (!ssam_event_queue_is_empty(queue))
+ ssam_cplt_submit(queue->cplt, &queue->work);
+}
+
+/**
+ * ssam_event_queue_init() - Initialize an event queue.
+ * @cplt: The completion system on which the queue resides.
+ * @evq: The event queue to initialize.
+ */
+static void ssam_event_queue_init(struct ssam_cplt *cplt,
+ struct ssam_event_queue *evq)
+{
+ evq->cplt = cplt;
+ spin_lock_init(&evq->lock);
+ INIT_LIST_HEAD(&evq->head);
+ INIT_WORK(&evq->work, ssam_event_queue_work_fn);
+}
+
+/**
+ * ssam_cplt_init() - Initialize completion system.
+ * @cplt: The completion system to initialize.
+ * @dev: The device used for logging.
+ */
+static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev)
+{
+ struct ssam_event_target *target;
+ int status, c, i;
+
+ cplt->dev = dev;
+
+ cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME);
+ if (!cplt->wq)
+ return -ENOMEM;
+
+ for (c = 0; c < ARRAY_SIZE(cplt->event.target); c++) {
+ target = &cplt->event.target[c];
+
+ for (i = 0; i < ARRAY_SIZE(target->queue); i++)
+ ssam_event_queue_init(cplt, &target->queue[i]);
+ }
+
+ status = ssam_nf_init(&cplt->event.notif);
+ if (status)
+ destroy_workqueue(cplt->wq);
+
+ return status;
+}
+
+/**
+ * ssam_cplt_destroy() - Deinitialize the completion system.
+ * @cplt: The completion system to deinitialize.
+ *
+ * Deinitialize the given completion system and ensure that all pending, i.e.
+ * yet-to-be-completed, event items and requests have been handled.
+ */
+static void ssam_cplt_destroy(struct ssam_cplt *cplt)
+{
+ /*
+ * Note: destroy_workqueue ensures that all currently queued work will
+ * be fully completed and the workqueue drained. This means that this
+ * call will inherently also free any queued ssam_event_items, thus we
+ * don't have to take care of that here explicitly.
+ */
+ destroy_workqueue(cplt->wq);
+ ssam_nf_destroy(&cplt->event.notif);
+}
+
+
+/* -- Main SSAM device structures. ------------------------------------------ */
+
+/**
+ * ssam_controller_device() - Return the &struct device associated with this
+ * controller.
+ * @c: The controller for which to get the device.
+ */
+struct device *ssam_controller_device(struct ssam_controller *c)
+{
+ return ssh_rtl_get_device(&c->rtl);
+}
+EXPORT_SYMBOL_GPL(ssam_controller_device);
+
+static void __ssam_controller_release(struct kref *kref)
+{
+ struct ssam_controller *ctrl = to_ssam_controller(kref, kref);
+
+ ssam_controller_destroy(ctrl);
+ kfree(ctrl);
+}
+
+/**
+ * ssam_controller_get() - Increment reference count of controller.
+ * @c: The controller.
+ *
+ * Return: Returns the controller provided as input.
+ */
+struct ssam_controller *ssam_controller_get(struct ssam_controller *c)
+{
+ kref_get(&c->kref);
+ return c;
+}
+EXPORT_SYMBOL_GPL(ssam_controller_get);
+
+/**
+ * ssam_controller_put() - Decrement reference count of controller.
+ * @c: The controller.
+ */
+void ssam_controller_put(struct ssam_controller *c)
+{
+ kref_put(&c->kref, __ssam_controller_release);
+}
+EXPORT_SYMBOL_GPL(ssam_controller_put);
+
+
+/**
+ * ssam_controller_statelock() - Lock the controller against state transitions.
+ * @c: The controller to lock.
+ *
+ * Lock the controller against state transitions. Holding this lock guarantees
+ * that the controller will not transition between states, i.e. if the
+ * controller is in state "started", when this lock has been acquired, it will
+ * remain in this state at least until the lock has been released.
+ *
+ * Multiple clients may concurrently hold this lock. In other words: The
+ * ``statelock`` functions represent the read-lock part of a r/w-semaphore.
+ * Actions causing state transitions of the controller must be executed while
+ * holding the write-part of this r/w-semaphore (see ssam_controller_lock()
+ * and ssam_controller_unlock() for that).
+ *
+ * See ssam_controller_stateunlock() for the corresponding unlock function.
+ */
+void ssam_controller_statelock(struct ssam_controller *c)
+{
+ down_read(&c->lock);
+}
+EXPORT_SYMBOL_GPL(ssam_controller_statelock);
+
+/**
+ * ssam_controller_stateunlock() - Unlock controller state transitions.
+ * @c: The controller to unlock.
+ *
+ * See ssam_controller_statelock() for the corresponding lock function.
+ */
+void ssam_controller_stateunlock(struct ssam_controller *c)
+{
+ up_read(&c->lock);
+}
+EXPORT_SYMBOL_GPL(ssam_controller_stateunlock);
+
+/**
+ * ssam_controller_lock() - Acquire the main controller lock.
+ * @c: The controller to lock.
+ *
+ * This lock must be held for any state transitions, including transition to
+ * suspend/resumed states and during shutdown. See ssam_controller_statelock()
+ * for more details on controller locking.
+ *
+ * See ssam_controller_unlock() for the corresponding unlock function.
+ */
+void ssam_controller_lock(struct ssam_controller *c)
+{
+ down_write(&c->lock);
+}
+
+/*
+ * ssam_controller_unlock() - Release the main controller lock.
+ * @c: The controller to unlock.
+ *
+ * See ssam_controller_lock() for the corresponding lock function.
+ */
+void ssam_controller_unlock(struct ssam_controller *c)
+{
+ up_write(&c->lock);
+}
+
+
+static void ssam_handle_event(struct ssh_rtl *rtl,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data)
+{
+ struct ssam_controller *ctrl = to_ssam_controller(rtl, rtl);
+ struct ssam_event_item *item;
+
+ item = ssam_event_item_alloc(data->len, GFP_KERNEL);
+ if (!item)
+ return;
+
+ item->rqid = get_unaligned_le16(&cmd->rqid);
+ item->event.target_category = cmd->tc;
+ item->event.target_id = cmd->tid_in;
+ item->event.command_id = cmd->cid;
+ item->event.instance_id = cmd->iid;
+ memcpy(&item->event.data[0], data->ptr, data->len);
+
+ WARN_ON(ssam_cplt_submit_event(&ctrl->cplt, item));
+}
+
+static const struct ssh_rtl_ops ssam_rtl_ops = {
+ .handle_event = ssam_handle_event,
+};
+
+
+static bool ssam_notifier_empty(struct ssam_controller *ctrl);
+static void ssam_notifier_unregister_all(struct ssam_controller *ctrl);
+
+
+#define SSAM_SSH_DSM_REVISION 0
+#define SSAM_SSH_DSM_NOTIF_D0 8
+static const guid_t SSAM_SSH_DSM_UUID = GUID_INIT(0xd5e383e1, 0xd892, 0x4a76,
+ 0x89, 0xfc, 0xf6, 0xaa, 0xae, 0x7e, 0xd5, 0xb5);
+
+/**
+ * ssam_device_caps_load_from_acpi() - Load controller capabilities from _DSM.
+ * @handle: The handle of the ACPI controller/SSH device.
+ * @caps: Where to store the capabilities in.
+ *
+ * Initializes the given controller capabilities with default values, then
+ * checks and, if the respective _DSM functions are available, loads the
+ * actual capabilities from the _DSM.
+ */
+static int ssam_device_caps_load_from_acpi(acpi_handle handle,
+ struct ssam_device_caps *caps)
+{
+ union acpi_object *obj;
+ u64 funcs = 0;
+ int i;
+
+ // set defaults
+ caps->notif_display = true;
+ caps->notif_d0exit = false;
+
+ if (!acpi_has_method(handle, "_DSM"))
+ return 0;
+
+ // get function availability bitfield
+ obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_UUID, 0, 0, NULL,
+ ACPI_TYPE_BUFFER);
+ if (!obj)
+ return -EFAULT;
+
+ for (i = 0; i < obj->buffer.length && i < 8; i++)
+ funcs |= (((u64)obj->buffer.pointer[i]) << (i * 8));
+
+ ACPI_FREE(obj);
+
+ // D0 exit/entry notification
+ if (funcs & BIT(SSAM_SSH_DSM_NOTIF_D0)) {
+ obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_UUID,
+ SSAM_SSH_DSM_REVISION, SSAM_SSH_DSM_NOTIF_D0,
+ NULL, ACPI_TYPE_INTEGER);
+ if (!obj)
+ return -EFAULT;
+
+ caps->notif_d0exit = !!obj->integer.value;
+ ACPI_FREE(obj);
+ }
+
+ return 0;
+}
+
+/**
+ * ssam_controller_init() - Initialize SSAM controller.
+ * @ctrl: The controller to initialize.
+ * @serdev: The serial device representing the underlying data transport.
+ *
+ * Initializes the given controller. Does neither start receiver nor
+ * transmitter threads. After this call, the controller has to be hooked up to
+ * the serdev core separately via &struct serdev_device_ops, relaying calls to
+ * ssam_controller_receive_buf() and ssam_controller_write_wakeup(). Once the
+ * controller has been hooked up, transmitter and receiver threads may be
+ * started via ssam_controller_start(). These setup steps need to be completed
+ * before controller can be used for requests.
+ */
+int ssam_controller_init(struct ssam_controller *ctrl,
+ struct serdev_device *serdev)
+{
+ acpi_handle handle = ACPI_HANDLE(&serdev->dev);
+ int status;
+
+ init_rwsem(&ctrl->lock);
+ kref_init(&ctrl->kref);
+
+ status = ssam_device_caps_load_from_acpi(handle, &ctrl->caps);
+ if (status)
+ return status;
+
+ dev_dbg(&serdev->dev, "device capabilities:\n");
+ dev_dbg(&serdev->dev, " notif_display: %u\n", ctrl->caps.notif_display);
+ dev_dbg(&serdev->dev, " notif_d0exit: %u\n", ctrl->caps.notif_d0exit);
+
+ ssh_seq_reset(&ctrl->counter.seq);
+ ssh_rqid_reset(&ctrl->counter.rqid);
+
+ // initialize event/request completion system
+ status = ssam_cplt_init(&ctrl->cplt, &serdev->dev);
+ if (status)
+ return status;
+
+ // initialize request and packet transmission layers
+ status = ssh_rtl_init(&ctrl->rtl, serdev, &ssam_rtl_ops);
+ if (status) {
+ ssam_cplt_destroy(&ctrl->cplt);
+ return status;
+ }
+
+ // update state
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_INITIALIZED);
+ return 0;
+}
+
+/**
+ * ssam_controller_start() - Start the receiver and transmitter threads of the
+ * controller.
+ * @ctrl: The controller.
+ *
+ * Note: When this function is called, the controller shouldbe properly hooked
+ * up to the serdev core via &struct serdev_device_ops. Please refert to
+ * ssam_controller_init() for more details on controller initialization.
+ */
+int ssam_controller_start(struct ssam_controller *ctrl)
+{
+ int status;
+
+ if (READ_ONCE(ctrl->state) != SSAM_CONTROLLER_INITIALIZED)
+ return -EINVAL;
+
+ status = ssh_rtl_tx_start(&ctrl->rtl);
+ if (status)
+ return status;
+
+ status = ssh_rtl_rx_start(&ctrl->rtl);
+ if (status) {
+ ssh_rtl_tx_flush(&ctrl->rtl);
+ return status;
+ }
+
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED);
+ return 0;
+}
+
+/**
+ * ssam_controller_shutdown() - Shut down the controller.
+ * @ctrl: The controller.
+ *
+ * Shuts down the controller by flushing all pending requests and stopping the
+ * transmitter and receiver threads. All requests submitted after this call
+ * will fail with %-ESHUTDOWN. While it is discouraged to do so, this function
+ * is safe to use in parallel with ongoing request submission.
+ *
+ * In the course of this shutdown procedure, all currently registered
+ * notifiers will be unregistered. It is, however, strongly recommended to not
+ * rely on this behavior, and instead the party registring the notifier should
+ * unregister it before the controller gets shut down, e.g. via the SSAM bus
+ * which guarantees client devices to be removed before a shutdown.
+ *
+ * Note that events may still be pending after this call, but due to the
+ * notifiers being unregistered, the will be dropped when the controller is
+ * subsequently being destroyed via ssam_controller_destroy().
+ */
+void ssam_controller_shutdown(struct ssam_controller *ctrl)
+{
+ enum ssam_controller_state s = READ_ONCE(ctrl->state);
+ int status;
+
+ if (s == SSAM_CONTROLLER_UNINITIALIZED || s == SSAM_CONTROLLER_STOPPED)
+ return;
+
+ // try to flush pending events and requests while everything still works
+ status = ssh_rtl_flush(&ctrl->rtl, msecs_to_jiffies(5000));
+ if (status) {
+ ssam_err(ctrl, "failed to flush request transmission layer: %d\n",
+ status);
+ }
+
+ // try to flush out all currently completing requests and events
+ ssam_cplt_flush(&ctrl->cplt);
+
+ /*
+ * We expect all notifiers to have been removed by the respective client
+ * driver that set them up at this point. If this warning occurs, some
+ * client driver has not done that...
+ */
+ WARN_ON(!ssam_notifier_empty(ctrl));
+
+ /*
+ * Nevertheless, we should still take care of drivers that don't behave
+ * well. Thus disable all enabled events, unregister all notifiers.
+ */
+ ssam_notifier_unregister_all(ctrl);
+
+ // cancel rem. requests, ensure no new ones can be queued, stop threads
+ ssh_rtl_tx_flush(&ctrl->rtl);
+ ssh_rtl_shutdown(&ctrl->rtl);
+
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STOPPED);
+ ctrl->rtl.ptl.serdev = NULL;
+}
+
+/**
+ * ssam_controller_destroy() - Destroy the controller and free its resources.
+ * @ctrl: The controller.
+ *
+ * Ensures that all resources associated with the controller get freed. This
+ * function should only be called after the controller has been stopped via
+ * ssam_controller_shutdown().
+ */
+void ssam_controller_destroy(struct ssam_controller *ctrl)
+{
+ if (READ_ONCE(ctrl->state) == SSAM_CONTROLLER_UNINITIALIZED)
+ return;
+
+ WARN_ON(ctrl->state != SSAM_CONTROLLER_STOPPED);
+
+ /*
+ * Note: New events could still have been received after the previous
+ * flush in ssam_controller_shutdown, before the request transport layer
+ * has been shut down. At this point, after the shutdown, we can be sure
+ * that no new events will be queued. The call to ssam_cplt_destroy will
+ * ensure that those remaining are being completed and freed.
+ */
+
+ // actually free resources
+ ssam_cplt_destroy(&ctrl->cplt);
+ ssh_rtl_destroy(&ctrl->rtl);
+
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_UNINITIALIZED);
+}
+
+/**
+ * ssam_controller_suspend() - Suspend the controller.
+ * @ctrl: The controller to suspend.
+ *
+ * Marks the controller as suspended. Note that display-off and D0-exit
+ * notifications have to be sent manually before transitioning the controller
+ * into the suspended state via this function.
+ *
+ * See ssam_controller_resume() for the corresponding resume function.
+ *
+ * Return: Returns %-EINVAL if the controller is currently not in the
+ * "started" state.
+ */
+int ssam_controller_suspend(struct ssam_controller *ctrl)
+{
+ ssam_controller_lock(ctrl);
+
+ if (READ_ONCE(ctrl->state) != SSAM_CONTROLLER_STARTED) {
+ ssam_controller_unlock(ctrl);
+ return -EINVAL;
+ }
+
+ ssam_dbg(ctrl, "pm: suspending controller\n");
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_SUSPENDED);
+
+ ssam_controller_unlock(ctrl);
+ return 0;
+}
+
+/**
+ * ssam_controller_resume() - Resume the controller from suspend.
+ * @ctrl: The controller to resume.
+ *
+ * Resume the controller from the suspended state it was put into via
+ * ssam_controller_suspend(). This function does not issue display-on and
+ * D0-entry notifications. If required, those have to be sent manually after
+ * this call.
+ *
+ * Return: Returns %-EINVAL if the controller is currently not suspended.
+ */
+int ssam_controller_resume(struct ssam_controller *ctrl)
+{
+ ssam_controller_lock(ctrl);
+
+ if (READ_ONCE(ctrl->state) != SSAM_CONTROLLER_SUSPENDED) {
+ ssam_controller_unlock(ctrl);
+ return -EINVAL;
+ }
+
+ ssam_dbg(ctrl, "pm: resuming controller\n");
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED);
+
+ ssam_controller_unlock(ctrl);
+ return 0;
+}
+
+
+/* -- Top-level request interface ------------------------------------------- */
+
+/**
+ * ssam_request_write_data() - Construct and write SAM request message to
+ * buffer.
+ * @buf: The buffer to write the data to.
+ * @ctrl: The controller via which the request will be sent.
+ * @spec: The request data and specification.
+ *
+ * Constructs a SAM/SSH request message and writes it to the provided buffer.
+ * The request and transport counters, specifically RQID and SEQ, will be set
+ * in this call. These counters are obtained from the controller. It is thus
+ * only valid to send the resulting message via the controller specified here.
+ *
+ * For calculation of the required buffer size, refer to the
+ * ``SSH_COMMAND_MESSAGE_LENGTH()`` macro.
+ *
+ * Return: Returns the number of bytes used in the buffer on success. Returns
+ * %-EINVAL if the payload length provided in the request specification is too
+ * large (larger than %SSH_COMMAND_MAX_PAYLOAD_SIZE) or if the provided buffer
+ * is too small.
+ */
+ssize_t ssam_request_write_data(struct ssam_span *buf,
+ struct ssam_controller *ctrl,
+ struct ssam_request *spec)
+{
+ struct msgbuf msgb;
+ u16 rqid;
+ u8 seq;
+
+ if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE)
+ return -EINVAL;
+
+ if (SSH_COMMAND_MESSAGE_LENGTH(spec->length) > buf->len)
+ return -EINVAL;
+
+ msgb_init(&msgb, buf->ptr, buf->len);
+ seq = ssh_seq_next(&ctrl->counter.seq);
+ rqid = ssh_rqid_next(&ctrl->counter.rqid);
+ msgb_push_cmd(&msgb, seq, rqid, spec);
+
+ return msgb_bytes_used(&msgb);
+}
+EXPORT_SYMBOL_GPL(ssam_request_write_data);
+
+
+static void ssam_request_sync_complete(struct ssh_request *rqst,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data, int status)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+ struct ssam_request_sync *r;
+
+ r = container_of(rqst, struct ssam_request_sync, base);
+ r->status = status;
+
+ if (r->resp)
+ r->resp->length = 0;
+
+ if (status) {
+ rtl_dbg_cond(rtl, "rsp: request failed: %d\n", status);
+ return;
+ }
+
+ if (!data) // handle requests without a response
+ return;
+
+ if (!r->resp || !r->resp->pointer) {
+ if (data->len)
+ rtl_warn(rtl, "rsp: no response buffer provided, dropping data\n");
+ return;
+ }
+
+ if (data->len > r->resp->capacity) {
+ rtl_err(rtl, "rsp: response buffer too small, capacity: %zu bytes,"
+ " got: %zu bytes\n", r->resp->capacity, data->len);
+ r->status = -ENOSPC;
+ return;
+ }
+
+ r->resp->length = data->len;
+ memcpy(r->resp->pointer, data->ptr, data->len);
+}
+
+static void ssam_request_sync_release(struct ssh_request *rqst)
+{
+ complete_all(&container_of(rqst, struct ssam_request_sync, base)->comp);
+}
+
+static const struct ssh_request_ops ssam_request_sync_ops = {
+ .release = ssam_request_sync_release,
+ .complete = ssam_request_sync_complete,
+};
+
+
+/**
+ * ssam_request_sync_alloc() - Allocate a synchronous request.
+ * @payload_len: The length of the request payload.
+ * @flags: Flags used for allocation.
+ * @rqst: Where to store the pointer to the allocated request.
+ * @buffer: Where to store the buffer descriptor for the message buffer of
+ * the request.
+ *
+ * Allocates a synchronous request with corresponding message buffer. The
+ * request still needs to be initialized ssam_request_sync_init() before
+ * it can be submitted, and the message buffer data must still be set to the
+ * returned buffer via ssam_request_sync_set_data() after it has been filled,
+ * if need be with adjusted message length.
+ *
+ * After use, the request and its corresponding message buffer should be freed
+ * via ssam_request_sync_free(). The buffer must not be freed separately.
+ */
+int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
+ struct ssam_request_sync **rqst,
+ struct ssam_span *buffer)
+{
+ size_t msglen = SSH_COMMAND_MESSAGE_LENGTH(payload_len);
+
+ *rqst = kzalloc(sizeof(struct ssam_request_sync) + msglen, flags);
+ if (!*rqst)
+ return -ENOMEM;
+
+ buffer->ptr = (u8 *)(*rqst + 1);
+ buffer->len = msglen;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync_alloc);
+
+/**
+ * ssam_request_sync_free() - Free a synchronous request.
+ * @rqst: The request to free.
+ *
+ * Free a synchronous request and its corresponding buffer allocated with
+ * ssam_request_sync_alloc(). Do not use for requests allocated on the stack
+ * or via any other function.
+ *
+ * Warning: The caller must ensure that the request is not in use any more.
+ * I.e. the caller must ensure that it has the only reference to the request
+ * and the request is not currently pending. This means that the caller has
+ * either never submitted the request, request submission has failed, or the
+ * caller has waited until the submitted request has been completed via
+ * ssam_request_sync_wait().
+ */
+void ssam_request_sync_free(struct ssam_request_sync *rqst)
+{
+ kfree(rqst);
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync_free);
+
+/**
+ * ssam_request_sync_init() - Initialize a synchronous request struct.
+ * @rqst: The request to initialize.
+ * @flags: The request flags.
+ *
+ * Initializes the given request struct. Does not initialize the request
+ * message data. This has to be done explicitly after this call via
+ * ssam_request_sync_set_data() and the actual message data has to be written
+ * via ssam_request_write_data().
+ */
+void ssam_request_sync_init(struct ssam_request_sync *rqst,
+ enum ssam_request_flags flags)
+{
+ ssh_request_init(&rqst->base, flags, &ssam_request_sync_ops);
+ init_completion(&rqst->comp);
+ rqst->resp = NULL;
+ rqst->status = 0;
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync_init);
+
+/**
+ * ssam_request_sync_submit() - Submit a synchronous request.
+ * @ctrl: The controller with which to submit the request.
+ * @rqst: The request to submit.
+ *
+ * Submit a synchronous request. The request has to be initialized and
+ * properly set up, including response buffer (may be %NULL if no response is
+ * expected) and command message data. This function does not wait for the
+ * request to be completed.
+ *
+ * If this function succeeds, ssam_request_sync_wait() must be used to ensure
+ * that the request has been completed before the response data can be
+ * accessed and/or the request can be freed. On failure, the request may
+ * immediately be freed.
+ *
+ * This function may only be used if the controller is active, i.e. has been
+ * initialized and not suspended.
+ */
+int ssam_request_sync_submit(struct ssam_controller *ctrl,
+ struct ssam_request_sync *rqst)
+{
+ int status;
+
+ /*
+ * This is only a superficial check. In general, the caller needs to
+ * ensure that the controller is initialized and is not (and does not
+ * get) suspended during use, i.e. until the request has been completed
+ * (if _absolutely_ necessary, by use of ssam_controller_statelock/
+ * ssam_controller_stateunlock, but something like ssam_client_link
+ * should be preferred as this needs to last until the request has been
+ * completed).
+ *
+ * Note that it is actually safe to use this function while the
+ * controller is in the process of being shut down (as ssh_rtl_submit
+ * is safe with regards to this), but it is generally discouraged to do
+ * so.
+ */
+ if (WARN_ON(READ_ONCE(ctrl->state) != SSAM_CONTROLLER_STARTED)) {
+ ssh_request_put(&rqst->base);
+ return -ENXIO;
+ }
+
+ status = ssh_rtl_submit(&ctrl->rtl, &rqst->base);
+ ssh_request_put(&rqst->base);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync_submit);
+
+/**
+ * ssam_request_sync() - Execute a synchronous request.
+ * @ctrl: The controller via which the request will be submitted.
+ * @spec: The request specification and payload.
+ * @rsp: The response buffer.
+ *
+ * Allocates a synchronous request with its message data buffer on the heap
+ * via ssam_request_sync_alloc(), fully intializes it via the provided request
+ * specification, submits it, and finally waits for its completion before
+ * freeing it and returning its status.
+ *
+ * Returns the status of the request or any failure during setup.
+ */
+int ssam_request_sync(struct ssam_controller *ctrl, struct ssam_request *spec,
+ struct ssam_response *rsp)
+{
+ struct ssam_request_sync *rqst;
+ struct ssam_span buf;
+ ssize_t len;
+ int status;
+
+ // prevent overflow, allows us to skip checks later on
+ if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE) {
+ ssam_err(ctrl, "rqst: request payload too large\n");
+ return -EINVAL;
+ }
+
+ status = ssam_request_sync_alloc(spec->length, GFP_KERNEL, &rqst, &buf);
+ if (status)
+ return status;
+
+ ssam_request_sync_init(rqst, spec->flags);
+ ssam_request_sync_set_resp(rqst, rsp);
+
+ len = ssam_request_write_data(&buf, ctrl, spec);
+ if (len < 0)
+ return len;
+
+ ssam_request_sync_set_data(rqst, buf.ptr, len);
+
+ status = ssam_request_sync_submit(ctrl, rqst);
+ if (!status)
+ status = ssam_request_sync_wait(rqst);
+
+ ssam_request_sync_free(rqst);
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync);
+
+/**
+ * ssam_request_sync_with_buffer() - Execute a synchronous request with the
+ * provided buffer as backend for the message buffer.
+ * @ctrl: The controller via which the request will be submitted.
+ * @spec: The request specification and payload.
+ * @rsp: The response buffer.
+ * @buf: The buffer for the request message data.
+ *
+ * Allocates a synchronous request struct on the stack, fully initializes it
+ * using the provided buffer as message data buffer, submits it, and then
+ * waits for its completion before returning its staus. The
+ * ``SSH_COMMAND_MESSAGE_LENGTH()`` macro can be used to compute the required
+ * message buffer size.
+ *
+ * This function does essentially the same as ssam_request_sync(), but instead
+ * of dynamically allocating the request and message data buffer, it uses the
+ * provided message data buffer and stores the (small) request struct on the
+ * heap.
+ *
+ * Returns the status of the request or any failure during setup.
+ */
+int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
+ struct ssam_request *spec,
+ struct ssam_response *rsp,
+ struct ssam_span *buf)
+{
+ struct ssam_request_sync rqst;
+ ssize_t len;
+ int status;
+
+ // prevent overflow, allows us to skip checks later on
+ if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE) {
+ ssam_err(ctrl, "rqst: request payload too large\n");
+ return -EINVAL;
+ }
+
+ ssam_request_sync_init(&rqst, spec->flags);
+ ssam_request_sync_set_resp(&rqst, rsp);
+
+ len = ssam_request_write_data(buf, ctrl, spec);
+ if (len < 0)
+ return len;
+
+ ssam_request_sync_set_data(&rqst, buf->ptr, len);
+
+ status = ssam_request_sync_submit(ctrl, &rqst);
+ if (!status)
+ status = ssam_request_sync_wait(&rqst);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_request_sync_with_buffer);
+
+
+/* -- Internal SAM requests. ------------------------------------------------ */
+
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
+ .target_category = SSAM_SSH_TC_SAM,
+ .target_id = 0x01,
+ .command_id = 0x13,
+ .instance_id = 0x00,
+});
+
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
+ .target_category = SSAM_SSH_TC_SAM,
+ .target_id = 0x01,
+ .command_id = 0x15,
+ .instance_id = 0x00,
+});
+
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
+ .target_category = SSAM_SSH_TC_SAM,
+ .target_id = 0x01,
+ .command_id = 0x16,
+ .instance_id = 0x00,
+});
+
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
+ .target_category = SSAM_SSH_TC_SAM,
+ .target_id = 0x01,
+ .command_id = 0x33,
+ .instance_id = 0x00,
+});
+
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
+ .target_category = SSAM_SSH_TC_SAM,
+ .target_id = 0x01,
+ .command_id = 0x34,
+ .instance_id = 0x00,
+});
+
+/**
+ * ssam_ssh_event_enable() - Enable SSH event.
+ * @ctrl: The controller for which to enable the event.
+ * @reg: The event registry describing what request to use for enabling and
+ * disabling the event.
+ * @id: The event identifier.
+ * @flags: The event flags.
+ *
+ * This is a wrapper for the raw SAM request to enable an event, thus it does
+ * not handle referecnce counting for enable/disable of events. If an event
+ * has already been enabled, the EC will ignore this request.
+ *
+ * Return: Returns the status of the executed SAM request or %-EPROTO if the
+ * request response indicates a failure.
+ */
+static int ssam_ssh_event_enable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags)
+{
+ struct ssh_notification_params params;
+ struct ssam_request rqst;
+ struct ssam_response result;
+ int status;
+
+ u16 rqid = ssh_tc_to_rqid(id.target_category);
+ u8 buf[1] = { 0x00 };
+
+ // only allow RQIDs that lie within event spectrum
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ params.target_category = id.target_category;
+ params.instance_id = id.instance;
+ params.flags = flags;
+ put_unaligned_le16(rqid, &params.request_id);
+
+ rqst.target_category = reg.target_category;
+ rqst.target_id = reg.target_id;
+ rqst.command_id = reg.cid_enable;
+ rqst.instance_id = 0x00;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = sizeof(params);
+ rqst.payload = (u8 *)&params;
+
+ result.capacity = ARRAY_SIZE(buf);
+ result.length = 0;
+ result.pointer = buf;
+
+ status = ssam_request_sync_onstack(ctrl, &rqst, &result, sizeof(params));
+ if (status) {
+ ssam_err(ctrl, "failed to enable event source (tc: 0x%02x, "
+ "iid: 0x%02x, reg: 0x%02x)\n", id.target_category,
+ id.instance, reg.target_category);
+ }
+
+ if (buf[0] != 0x00) {
+ ssam_err(ctrl, "unexpected result while enabling event source: "
+ "0x%02x (tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n",
+ buf[0], id.target_category, id.instance,
+ reg.target_category);
+ return -EPROTO;
+ }
+
+ return status;
+
+}
+
+/**
+ * ssam_ssh_event_disable() - Disable SSH event.
+ * @ctrl: The controller for which to disable the event.
+ * @reg: The event registry describing what request to use for enabling and
+ * disabling the event (must be same as used when enabling the event).
+ * @id: The event identifier.
+ * @flags: The event flags (likely ignored for disabling of events).
+ *
+ * This is a wrapper for the raw SAM request to disable an event, thus it does
+ * not handle reference counting for enable/disable of events. If an event has
+ * already been disabled, the EC will ignore this request.
+ *
+ * Return: Returns the status of the executed SAM request or %-EPROTO if the
+ * request response indicates a failure.
+ */
+static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags)
+{
+ struct ssh_notification_params params;
+ struct ssam_request rqst;
+ struct ssam_response result;
+ int status;
+
+ u16 rqid = ssh_tc_to_rqid(id.target_category);
+ u8 buf[1] = { 0x00 };
+
+ // only allow RQIDs that lie within event spectrum
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ params.target_category = id.target_category;
+ params.instance_id = id.instance;
+ params.flags = flags;
+ put_unaligned_le16(rqid, &params.request_id);
+
+ rqst.target_category = reg.target_category;
+ rqst.target_id = reg.target_id;
+ rqst.command_id = reg.cid_disable;
+ rqst.instance_id = 0x00;
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
+ rqst.length = sizeof(params);
+ rqst.payload = (u8 *)&params;
+
+ result.capacity = ARRAY_SIZE(buf);
+ result.length = 0;
+ result.pointer = buf;
+
+ status = ssam_request_sync_onstack(ctrl, &rqst, &result, sizeof(params));
+ if (status) {
+ ssam_err(ctrl, "failed to disable event source (tc: 0x%02x, "
+ "iid: 0x%02x, reg: 0x%02x)\n", id.target_category,
+ id.instance, reg.target_category);
+ }
+
+ if (buf[0] != 0x00) {
+ ssam_err(ctrl, "unexpected result while disabling event source: "
+ "0x%02x (tc: 0x%02x, iid: 0x%02x, reg: 0x%02x)\n",
+ buf[0], id.target_category, id.instance,
+ reg.target_category);
+ return -EPROTO;
+ }
+
+ return status;
+}
+
+
+/* -- Wrappers for internal SAM requests. ----------------------------------- */
+
+/**
+ * ssam_log_firmware_version() - Log SAM/EC firmware version to kernel log.
+ * @ctrl: The controller.
+ */
+int ssam_log_firmware_version(struct ssam_controller *ctrl)
+{
+ __le32 __version;
+ u32 version, a, b, c;
+ int status;
+
+ status = ssam_ssh_get_firmware_version(ctrl, &__version);
+ if (status)
+ return status;
+
+ version = le32_to_cpu(__version);
+ a = (version >> 24) & 0xff;
+ b = ((version >> 8) & 0xffff);
+ c = version & 0xff;
+
+ ssam_info(ctrl, "SAM controller version: %u.%u.%u\n", a, b, c);
+ return 0;
+}
+
+/**
+ * ssam_ctrl_notif_display_off() - Notify EC that the display has been turned
+ * off.
+ * @ctrl: The controller.
+ *
+ * Notify the EC that the display has been turned off and the driver may enter
+ * a lower-power state. This will prevent events from being sent directly.
+ * Rather, the EC signals an event by pulling the wakeup GPIO high for as long
+ * as there are pending events. The events then need to be manually released,
+ * one by one, via the GPIO callback request. All pending events accumulated
+ * during this state can also be released by issuing the display-on
+ * notification, e.g. via ssam_ctrl_notif_display_on(), which will also reset
+ * the GPIO.
+ *
+ * On some devices, specifically ones with an integrated keyboard, the keyboard
+ * backlight will be turned off by this call.
+ *
+ * This function will only send the display-off notification command if
+ * display noticications are supported by the EC. Currently all known devices
+ * support these notification.
+ *
+ * Use ssam_ctrl_notif_display_on() to reverse the effects of this function.
+ *
+ * Return: Returns the status of the executed SAM command, zero on success or
+ * if no request has been executed, or %-EPROTO if an unexpected response has
+ * been received.
+ */
+int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl)
+{
+ int status;
+ u8 response;
+
+ if (!ctrl->caps.notif_display)
+ return 0;
+
+ ssam_dbg(ctrl, "pm: notifying display off\n");
+
+ status = ssam_ssh_notif_display_off(ctrl, &response);
+ if (status)
+ return status;
+
+ if (response != 0) {
+ ssam_err(ctrl, "unexpected response from display-off notification: 0x%02x\n",
+ response);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+/**
+ * ssam_ctrl_notif_display_on() - Notify EC that the display has been turned on.
+ * @ctrl: The controller.
+ *
+ * Notify the EC that the display has been turned back on and the driver has
+ * exited its lower-power state. This notification is the counterpart to the
+ * display-off notification sent via ssam_ctrl_notif_display_off() and will
+ * reverse its effects, including resetting events to their default behavior.
+ *
+ * This function will only send the display-on notification command if display
+ * noticications are supported by the EC. Currently all known devices support
+ * these notification.
+ *
+ * See ssam_ctrl_notif_display_off() for more details.
+ *
+ * Return: Returns the status of the executed SAM command, zero on success or
+ * if no request has been executed, or %-EPROTO if an unexpected response has
+ * been received.
+ */
+int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl)
+{
+ int status;
+ u8 response;
+
+ if (!ctrl->caps.notif_display)
+ return 0;
+
+ ssam_dbg(ctrl, "pm: notifying display on\n");
+
+ status = ssam_ssh_notif_display_on(ctrl, &response);
+ if (status)
+ return status;
+
+ if (response != 0) {
+ ssam_err(ctrl, "unexpected response from display-on notification: 0x%02x\n",
+ response);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+/**
+ * ssam_ctrl_notif_d0_exit() - Notify EC that the driver/device exits the D0
+ * power state.
+ * @ctrl: The controller
+ *
+ * Notifies the EC that the driver prepares to exit the D0 power state in
+ * favor of a lower-power state. Exact effects of this function related to the
+ * EC are currently unknown.
+ *
+ * This function will only send the D0-exit notification command if D0-state
+ * noticications are supported by the EC. Only newer Surface generations
+ * support these notifications.
+ *
+ * Use ssam_ctrl_notif_d0_entry() to reverse the effects of this function.
+ *
+ * Return: Returns the status of the executed SAM command, zero on success or
+ * if no request has been executed, or %-EPROTO if an unexpected response has
+ * been received.
+ */
+int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl)
+{
+ int status;
+ u8 response;
+
+ if (!ctrl->caps.notif_d0exit)
+ return 0;
+
+ ssam_dbg(ctrl, "pm: notifying D0 exit\n");
+
+ status = ssam_ssh_notif_d0_exit(ctrl, &response);
+ if (status)
+ return status;
+
+ if (response != 0) {
+ ssam_err(ctrl, "unexpected response from D0-exit notification:"
+ " 0x%02x\n", response);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+/**
+ * ssam_ctrl_notif_d0_entry() - Notify EC that the driver/device enters the D0
+ * power state.
+ * @ctrl: The controller
+ *
+ * Notifies the EC that the driver has exited a lower-power state and entered
+ * the D0 power state. Exact effects of this function related to the EC are
+ * currently unknown.
+ *
+ * This function will only send the D0-entry notification command if D0-state
+ * noticications are supported by the EC. Only newer Surface generations
+ * support these notifications.
+ *
+ * See ssam_ctrl_notif_d0_exit() for more details.
+ *
+ * Return: Returns the status of the executed SAM command, zero on success or
+ * if no request has been executed, or %-EPROTO if an unexpected response has
+ * been received.
+ */
+int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl)
+{
+ int status;
+ u8 response;
+
+ if (!ctrl->caps.notif_d0exit)
+ return 0;
+
+ ssam_dbg(ctrl, "pm: notifying D0 entry\n");
+
+ status = ssam_ssh_notif_d0_entry(ctrl, &response);
+ if (status)
+ return status;
+
+ if (response != 0) {
+ ssam_err(ctrl, "unexpected response from D0-entry notification:"
+ " 0x%02x\n", response);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+
+/* -- Top-level event registry interface. ----------------------------------- */
+
+/**
+ * ssam_notifier_register() - Register an event notifier.
+ * @ctrl: The controller to register the notifier on.
+ * @n: The event notifier to register.
+ *
+ * Register an event notifier and increment the usage counter of the
+ * associated SAM event. If the event was previously not enabled, it will be
+ * enabled during this call.
+ *
+ * Return: Returns zero on success, %-ENOSPC if there have already been
+ * %INT_MAX notifiers for the event ID/type associated with the notifier block
+ * registered, %-ENOMEM if the corresponding event entry could not be
+ * allocated. If this is the first time that a notifier block is registered
+ * for the specific associated event, returns the status of the event enable
+ * EC command.
+ */
+int ssam_notifier_register(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n)
+{
+ u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
+ struct ssam_nf_refcount_entry *entry;
+ struct ssam_nf_head *nf_head;
+ struct ssam_nf *nf;
+ int status;
+
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ nf = &ctrl->cplt.event.notif;
+ nf_head = &nf->head[ssh_rqid_to_event(rqid)];
+
+ mutex_lock(&nf->lock);
+
+ entry = ssam_nf_refcount_inc(nf, n->event.reg, n->event.id);
+ if (IS_ERR(entry)) {
+ mutex_unlock(&nf->lock);
+ return PTR_ERR(entry);
+ }
+
+ ssam_dbg(ctrl, "enabling event (reg: 0x%02x, tc: 0x%02x, iid: 0x%02x,"
+ " rc: %d)\n", n->event.reg.target_category,
+ n->event.id.target_category, n->event.id.instance,
+ entry->refcount);
+
+ status = __ssam_nfblk_insert(nf_head, &n->base);
+ if (status) {
+ entry = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
+ if (entry->refcount == 0)
+ kfree(entry);
+
+ mutex_unlock(&nf->lock);
+ return status;
+ }
+
+ if (entry->refcount == 1) {
+ status = ssam_ssh_event_enable(ctrl, n->event.reg, n->event.id,
+ n->event.flags);
+ if (status) {
+ __ssam_nfblk_remove(nf_head, &n->base);
+ kfree(ssam_nf_refcount_dec(nf, n->event.reg, n->event.id));
+ mutex_unlock(&nf->lock);
+ synchronize_srcu(&nf_head->srcu);
+ return status;
+ }
+
+ entry->flags = n->event.flags;
+
+ } else if (entry->flags != n->event.flags) {
+ ssam_warn(ctrl, "inconsistent flags when enabling event: got 0x%02x,"
+ " expected 0x%02x (reg: 0x%02x, tc: 0x%02x, iid: 0x%02x)",
+ n->event.flags, entry->flags, n->event.reg.target_category,
+ n->event.id.target_category, n->event.id.instance);
+ }
+
+ mutex_unlock(&nf->lock);
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(ssam_notifier_register);
+
+/**
+ * ssam_notifier_unregister() - Unregister an event notifier.
+ * @ctrl: The controller the notifier has been registered on.
+ * @n: The event notifier to unregister.
+ *
+ * Unregister an event notifier and decrement the usage counter of the
+ * associated SAM event. If the usage counter reaches zero, the event will be
+ * disabled.
+ *
+ * Return: Returns zero on success, %-ENOENT if the given notifier block has
+ * not been registered on the controller. If the given notifier block was the
+ * last one associated with its specific event, returns the status of the
+ * event disable EC command.
+ */
+int ssam_notifier_unregister(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n)
+{
+ u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
+ struct ssam_notifier_block **link;
+ struct ssam_nf_refcount_entry *entry;
+ struct ssam_nf_head *nf_head;
+ struct ssam_nf *nf;
+ int status = 0;
+
+ if (!ssh_rqid_is_event(rqid))
+ return -EINVAL;
+
+ nf = &ctrl->cplt.event.notif;
+ nf_head = &nf->head[ssh_rqid_to_event(rqid)];
+
+ mutex_lock(&nf->lock);
+
+ link = __ssam_nfblk_find_link(nf_head, &n->base);
+ if (!link) {
+ mutex_unlock(&nf->lock);
+ return -ENOENT;
+ }
+
+ entry = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
+ if (WARN_ON(!entry)) {
+ mutex_unlock(&nf->lock);
+ return -ENOENT;
+ }
+
+ ssam_dbg(ctrl, "disabling event (reg: 0x%02x, tc: 0x%02x, iid: 0x%02x,"
+ " rc: %d)\n", n->event.reg.target_category,
+ n->event.id.target_category, n->event.id.instance,
+ entry->refcount);
+
+ if (entry->flags != n->event.flags) {
+ ssam_warn(ctrl, "inconsistent flags when enabling event: got 0x%02x,"
+ " expected 0x%02x (reg: 0x%02x, tc: 0x%02x, iid: 0x%02x)",
+ n->event.flags, entry->flags, n->event.reg.target_category,
+ n->event.id.target_category, n->event.id.instance);
+ }
+
+ if (entry->refcount == 0) {
+ status = ssam_ssh_event_disable(ctrl, n->event.reg, n->event.id,
+ n->event.flags);
+ kfree(entry);
+ }
+
+ __ssam_nfblk_erase(link);
+ mutex_unlock(&nf->lock);
+ synchronize_srcu(&nf_head->srcu);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_notifier_unregister);
+
+/**
+ * ssam_notifier_disable_registered() - Disable events for all registered
+ * notifiers.
+ * @ctrl: The controller for which to disable the notifiers/events.
+ *
+ * Disables events for all currently registered notifiers. In case of an error
+ * (EC command failing), all previously disabled events will be restored and
+ * the error code returned.
+ *
+ * This function is intended to disable all events prior to hibenration entry.
+ * See ssam_notifier_restore_registered() to restore/re-enable all events
+ * disabled with this fucntion.
+ *
+ * Note that this function will not disable events for notifiers registered
+ * after calling this function. It should thus be made sure that no new
+ * notifiers are going to be added after this call and before the corresponding
+ * call to ssam_notifier_restore_registered().
+ *
+ * Returns zero on success. In case of failure returns the error code returned
+ * by the failed EC command to disable an event.
+ */
+int ssam_notifier_disable_registered(struct ssam_controller *ctrl)
+{
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ struct rb_node *n;
+ int status;
+
+ mutex_lock(&nf->lock);
+ for (n = rb_first(&nf->refcount); n != NULL; n = rb_next(n)) {
+ struct ssam_nf_refcount_entry *e;
+
+ e = rb_entry(n, struct ssam_nf_refcount_entry, node);
+ status = ssam_ssh_event_disable(ctrl, e->key.reg,
+ e->key.id, e->flags);
+ if (status)
+ goto err;
+ }
+ mutex_unlock(&nf->lock);
+
+ return 0;
+
+err:
+ for (n = rb_prev(n); n != NULL; n = rb_prev(n)) {
+ struct ssam_nf_refcount_entry *e;
+
+ e = rb_entry(n, struct ssam_nf_refcount_entry, node);
+ ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags);
+ }
+ mutex_unlock(&nf->lock);
+
+ return status;
+}
+
+/**
+ * ssam_notifier_restore_registered() - Restore/re-enable events for all
+ * registered notifiers.
+ * @ctrl: The controller for which to restore the notifiers/events.
+ *
+ * Restores/re-enables all events for which notifiers have been registered on
+ * the given controller. In case of a failure, the error is logged and the
+ * function continues to try and enable the remaining events.
+ *
+ * This function is intended to restore/re-enable all registered events after
+ * hibernation. See ssam_notifier_disable_registered() for the counter part
+ * disabling the events and more details.
+ */
+void ssam_notifier_restore_registered(struct ssam_controller *ctrl)
+{
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ struct rb_node *n;
+
+ mutex_lock(&nf->lock);
+ for (n = rb_first(&nf->refcount); n != NULL; n = rb_next(n)) {
+ struct ssam_nf_refcount_entry *e;
+
+ e = rb_entry(n, struct ssam_nf_refcount_entry, node);
+
+ // ignore errors, will get logged in call
+ ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags);
+ }
+ mutex_unlock(&nf->lock);
+}
+
+/**
+ * ssam_notifier_empty() - Check if there are any registered notifiers.
+ * @ctrl: The controller to check on.
+ *
+ * Return true if there are currently no notifiers registered on the
+ * controller, false otherwise.
+ */
+static bool ssam_notifier_empty(struct ssam_controller *ctrl)
+{
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ bool result;
+
+ mutex_lock(&nf->lock);
+ result = ssam_nf_refcount_empty(nf);
+ mutex_unlock(&nf->lock);
+
+ return result;
+}
+
+/**
+ * ssam_notifier_unregister_all() - Unregister all currently registered
+ * notifiers.
+ * @ctrl: The controller to unregister the notifiers on.
+ *
+ * Unregisters all currently registered notifiers. This function is used to
+ * ensure that all notifiers will be unregistered and assocaited
+ * entries/resources freed when the controller is being shut down.
+ */
+static void ssam_notifier_unregister_all(struct ssam_controller *ctrl)
+{
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
+ struct ssam_nf_refcount_entry *e, *n;
+
+ mutex_lock(&nf->lock);
+ rbtree_postorder_for_each_entry_safe(e, n, &nf->refcount, node) {
+ // ignore errors, will get logged in call
+ ssam_ssh_event_disable(ctrl, e->key.reg, e->key.id, e->flags);
+ kfree(e);
+ }
+ nf->refcount = RB_ROOT;
+ mutex_unlock(&nf->lock);
+}
+
+
+/* -- Wakeup IRQ. ----------------------------------------------------------- */
+
+static irqreturn_t ssam_irq_handle(int irq, void *dev_id)
+{
+ struct ssam_controller *ctrl = dev_id;
+
+ ssam_dbg(ctrl, "pm: wake irq triggered\n");
+
+ /*
+ * Note: Proper wakeup detection is currently unimplemented.
+ * When the EC is in display-off or any other non-D0 state, it
+ * does not send events/notifications to the host. Instead it
+ * signals that there are events available via the wakeup IRQ.
+ * This driver is responsible for calling back to the EC to
+ * release these events one-by-one.
+ *
+ * This IRQ should not cause a full system resume by its own.
+ * Instead, events should be handled by their respective subsystem
+ * drivers, which in turn should signal whether a full system
+ * resume should be performed.
+ *
+ * TODO: Send GPIO callback command repeatedly to EC until callback
+ * returns 0x00. Return flag of callback is "has more events".
+ * Each time the command is sent, one event is "released". Once
+ * all events have been released (return = 0x00), the GPIO is
+ * re-armed. Detect wakeup events during this process, go back to
+ * sleep if no wakeup event has been received.
+ */
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ssam_irq_setup() - Set up SAM EC wakeup-GPIO interrupt.
+ * @ctrl: The controller for which the IRQ should be set up.
+ *
+ * Set up an IRQ for the wakeup-GPIO pin of the SAM EC. This IRQ can be used
+ * to wake the device from a low power state.
+ *
+ * Note that this IRQ can only be triggered while the EC is in the display-off
+ * state. In this state, events are not sent to the host in the usual way.
+ * Instead the wakeup-GPIO gets pulled to "high" as long as there are pending
+ * events and these events need to be released one-by-one via the GPIO
+ * callback request, either until there are no events left and the GPIO is
+ * reset, or all at once by transitioning the EC out of the display-off state,
+ * which will also clear the GPIO.
+ *
+ * Not all events, however, should trigger a full system wakeup. Instead the
+ * driver should, if necessary, inspect and forward each event to the
+ * corresponding subsystem, which in turn should decide if the system needs to
+ * be woken up. This logic has not been implemented yet, thus wakeup by this
+ * IRQ should be disabled by default to avoid spurious wake-ups, caused, for
+ * example, by the remaining battery percentage changing. Refer to comments in
+ * this function and comments in the corresponding IRQ handler for more
+ * details on how this should be implemented.
+ *
+ * See also ssam_ctrl_notif_display_off() and ssam_ctrl_notif_display_off()
+ * for functions to transition the EC into and out of the display-off state as
+ * well as more details on it.
+ *
+ * The IRQ is disabled by default and has to be enabled before it can wake up
+ * the device from suspend via ssam_irq_arm_for_wakeup(). On teardown, the IRQ
+ * should be freed via ssam_irq_free().
+ */
+int ssam_irq_setup(struct ssam_controller *ctrl)
+{
+ struct device *dev = ssam_controller_device(ctrl);
+ struct gpio_desc *gpiod;
+ int irq;
+ int status;
+
+ /*
+ * The actual GPIO interrupt is declared in ACPI as TRIGGER_HIGH.
+ * However, the GPIO line only gets reset by sending the GPIO callback
+ * command to SAM (or alternatively the display-on notification). As
+ * proper handling for this interrupt is not implemented yet, leaving
+ * the IRQ at TRIGGER_HIGH would cause an IRQ storm (as the callback
+ * never gets sent and thus the line line never gets reset). To avoid
+ * this, mark the IRQ as TRIGGER_RISING for now, only creating a single
+ * interrupt, and let the SAM resume callback during the controller
+ * resume process clear it.
+ */
+ const int irqf = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING;
+
+ gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+
+ irq = gpiod_to_irq(gpiod);
+ gpiod_put(gpiod);
+
+ if (irq < 0)
+ return irq;
+
+ status = request_threaded_irq(irq, NULL, ssam_irq_handle, irqf,
+ "surface_sam_wakeup", ctrl);
+ if (status)
+ return status;
+
+ ctrl->irq.num = irq;
+ disable_irq(ctrl->irq.num);
+ return 0;
+}
+
+/**
+ * ssam_irq_free() - Free SAM EC wakeup-GPIO interrupt.
+ * @ctrl: The controller for which the IRQ should be freed.
+ *
+ * Free the wakeup-GPIO IRQ previously set-up via ssam_irq_setup().
+ */
+void ssam_irq_free(struct ssam_controller *ctrl)
+{
+ free_irq(ctrl->irq.num, ctrl);
+ ctrl->irq.num = -1;
+}
+
+/**
+ * ssam_irq_arm_for_wakeup() - Arm the EC IRQ for wakeup, if enabled.
+ * @ctrl: The controller for which the IRQ should be armed.
+ *
+ * Sets up the IRQ so that it can be used to wake the device. Specifically,
+ * this function enables the irq and then, if the device is allowed to wake up
+ * the system, calls enable_irq_wake(). See ssam_irq_disarm_wakeup() for the
+ * corresponding function to disable the IRQ.
+ *
+ * This function is intended to arm the IRQ before entering S2idle suspend.
+ *
+ * Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must
+ * be balanced.
+ */
+int ssam_irq_arm_for_wakeup(struct ssam_controller *ctrl)
+{
+ struct device *dev = ssam_controller_device(ctrl);
+ int status;
+
+ enable_irq(ctrl->irq.num);
+ if (device_may_wakeup(dev)) {
+ status = enable_irq_wake(ctrl->irq.num);
+ if (status) {
+ ssam_err(ctrl, "failed to enable wake IRQ: %d\n", status);
+ disable_irq(ctrl->irq.num);
+ return status;
+ }
+
+ ctrl->irq.wakeup_enabled = true;
+ } else {
+ ctrl->irq.wakeup_enabled = false;
+ }
+
+ return 0;
+}
+
+/**
+ * ssam_irq_disarm_wakeup() - Disarm the wakeup IRQ.
+ * @ctrl: The controller for which the IRQ should be disarmed.
+ *
+ * Disarm the IRQ previously set up for wake via ssam_irq_arm_for_wakeup().
+ *
+ * This function is intended to disarm the IRQ after exiting S2idle suspend.
+ *
+ * Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must
+ * be balanced.
+ */
+void ssam_irq_disarm_wakeup(struct ssam_controller *ctrl)
+{
+ int status;
+
+ if (ctrl->irq.wakeup_enabled) {
+ status = disable_irq_wake(ctrl->irq.num);
+ if (status)
+ ssam_err(ctrl, "failed to disable wake IRQ: %d\n", status);
+
+ ctrl->irq.wakeup_enabled = false;
+ }
+ disable_irq(ctrl->irq.num);
+}
diff --git a/drivers/misc/surface_sam/controller.h b/drivers/misc/surface_sam/controller.h
new file mode 100644
index 0000000000000..7f58e2a4b51b2
--- /dev/null
+++ b/drivers/misc/surface_sam/controller.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _SSAM_CONTROLLER_H
+#define _SSAM_CONTROLLER_H
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/serdev.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator_module.h>
+
+#include "ssh_protocol.h"
+#include "ssh_request_layer.h"
+
+
+/* -- Safe counters. -------------------------------------------------------- */
+
+/**
+ * struct ssh_seq_counter - Safe counter for SSH sequence IDs.
+ * @value: The current counter value.
+ */
+struct ssh_seq_counter {
+ u8 value;
+};
+
+/**
+ * struct ssh_rqid_counter - Safe counter for SSH request IDs.
+ * @value: The current counter value.
+ */
+struct ssh_rqid_counter {
+ u16 value;
+};
+
+
+/* -- Event/notification system. -------------------------------------------- */
+
+/**
+ * struct ssam_nf_head - Notifier head for SSAM events.
+ * @srcu: The SRCU struct for synchronization.
+ * @head: Head-pointer for the single-linked list of notifier blocks registered
+ * under this head.
+ */
+struct ssam_nf_head {
+ struct srcu_struct srcu;
+ struct ssam_notifier_block __rcu *head;
+};
+
+/**
+ * struct ssam_nf - Notifier callback- and activation-registry for SSAM events.
+ * @lock: Lock guarding (de-)registration of notifier blocks. Note: This
+ * lock does not need to be held for notifier calls, only
+ * registration and deregistration.
+ * @refcount: The root of the RB-tree used for reference-counting enabled
+ * events/notifications.
+ * @head: The list of notifier heads for event/notifiaction callbacks.
+ */
+struct ssam_nf {
+ struct mutex lock;
+ struct rb_root refcount;
+ struct ssam_nf_head head[SSH_NUM_EVENTS];
+};
+
+
+/* -- Event/async request completion system. -------------------------------- */
+
+struct ssam_cplt;
+
+/**
+ * struct ssam_event_item - Struct for event queuing and completion.
+ * @node: The node in the queue.
+ * @rqid: The request ID of the event.
+ * @ops: Instance specific functions.
+ * @ops.free: Callback for freeing this event item.
+ * @event: Actual event data.
+ */
+struct ssam_event_item {
+ struct list_head node;
+ u16 rqid;
+
+ struct {
+ void (*free)(struct ssam_event_item *event);
+ } ops;
+
+ struct ssam_event event; // must be last
+};
+
+/**
+ * struct ssam_event_queue - Queue for completing received events.
+ * @cplt: Reference to the completion system on which this queue is active.
+ * @lock: The lock for any operation on the queue.
+ * @head: The list-head of the queue.
+ * @work: The &struct work_struct performing completion work for this queue.
+ */
+struct ssam_event_queue {
+ struct ssam_cplt *cplt;
+
+ spinlock_t lock;
+ struct list_head head;
+ struct work_struct work;
+};
+
+/**
+ * struct ssam_event_target - Set of queues for a single SSH target ID.
+ * @queue: The array of queues, one queue per event ID.
+ */
+struct ssam_event_target {
+ struct ssam_event_queue queue[SSH_NUM_EVENTS];
+};
+
+/**
+ * struct ssam_cplt - SSAM event/async request completion system.
+ * @dev: The device with which this system is associated. Only used
+ * for logging.
+ * @wq: The &struct workqueue_struct on which all completion work
+ * items are queued.
+ * @event: Event completion management.
+ * @event.target: Array of &struct ssam_event_target, one for each target.
+ * @event.notif: Notifier callbacks and event activation reference counting.
+ */
+struct ssam_cplt {
+ struct device *dev;
+ struct workqueue_struct *wq;
+
+ struct {
+ struct ssam_event_target target[SSH_NUM_TARGETS];
+ struct ssam_nf notif;
+ } event;
+};
+
+
+/* -- Main SSAM device structures. ------------------------------------------ */
+
+/**
+ * enum ssam_controller_state - State values for &struct ssam_controller.
+ * @SSAM_CONTROLLER_UNINITIALIZED:
+ * The controller has not been initialized yet or has been de-initialized.
+ * @SSAM_CONTROLLER_INITIALIZED:
+ * The controller is initialized, but has not been started yet.
+ * @SSAM_CONTROLLER_STARTED:
+ * The controller has been started and is ready to use.
+ * @SSAM_CONTROLLER_STOPPED:
+ * The controller has been stopped.
+ * @SSAM_CONTROLLER_SUSPENDED:
+ * The controller has been suspended.
+ */
+enum ssam_controller_state {
+ SSAM_CONTROLLER_UNINITIALIZED,
+ SSAM_CONTROLLER_INITIALIZED,
+ SSAM_CONTROLLER_STARTED,
+ SSAM_CONTROLLER_STOPPED,
+ SSAM_CONTROLLER_SUSPENDED,
+};
+
+/**
+ * struct ssam_device_caps - Controller device capabilities.
+ * @notif_display: The controller supports display-on/-off notifications.
+ * @notif_d0exit: The controller supports D0-entry/D0-exit notifications
+ */
+struct ssam_device_caps {
+ u32 notif_display:1;
+ u32 notif_d0exit:1;
+};
+
+/**
+ * struct ssam_controller - SSAM controller device.
+ * @kref: Reference count of the controller.
+ * @lock: Main lock for the controller, used to guard state changes.
+ * @state: Controller state.
+ * @rtl: Request transport layer for SSH I/O.
+ * @cplt: Completion system for SSH/SSAM events and asynchronous requests.
+ * @counter: Safe SSH message ID counters.
+ * @counter.seq: Sequence ID counter.
+ * @counter.rqid: Request ID counter.
+ * @irq: Wakeup IRQ resources.
+ * @irq.num: The wakeup IRQ number.
+ * @irq.wakeup_enabled: Whether wakeup by IRQ is enabled during suspend.
+ * @caps: The controller device capabilities.
+ */
+struct ssam_controller {
+ struct kref kref;
+
+ struct rw_semaphore lock;
+ enum ssam_controller_state state;
+
+ struct ssh_rtl rtl;
+ struct ssam_cplt cplt;
+
+ struct {
+ struct ssh_seq_counter seq;
+ struct ssh_rqid_counter rqid;
+ } counter;
+
+ struct {
+ int num;
+ bool wakeup_enabled;
+ } irq;
+
+ struct ssam_device_caps caps;
+};
+
+#define to_ssam_controller(ptr, member) \
+ container_of(ptr, struct ssam_controller, member)
+
+#define ssam_dbg(ctrl, fmt, ...) rtl_dbg(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
+#define ssam_info(ctrl, fmt, ...) rtl_info(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
+#define ssam_warn(ctrl, fmt, ...) rtl_warn(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
+#define ssam_err(ctrl, fmt, ...) rtl_err(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
+
+
+/**
+ * ssam_controller_receive_buf() - Provide input-data to the controller.
+ * @ctrl: The controller.
+ * @buf: The input buffer.
+ * @n: The number of bytes in the input buffer.
+ *
+ * Provide input data to be evaluated by the controller, which has been
+ * received via the lower-level transport.
+ *
+ * Returns the number of bytes consumed, or, if the packet transition
+ * layer of the controller has been shut down, -ESHUTDOWN.
+ */
+static inline
+int ssam_controller_receive_buf(struct ssam_controller *ctrl,
+ const unsigned char *buf, size_t n)
+{
+ return ssh_ptl_rx_rcvbuf(&ctrl->rtl.ptl, buf, n);
+}
+
+/**
+ * ssam_controller_write_wakeup() - Notify the controller that the underlying
+ * device has space avaliable for data to be written.
+ * @ctrl: The controller.
+ */
+static inline void ssam_controller_write_wakeup(struct ssam_controller *ctrl)
+{
+ ssh_ptl_tx_wakeup(&ctrl->rtl.ptl, true);
+}
+
+
+int ssam_controller_init(struct ssam_controller *ctrl, struct serdev_device *s);
+int ssam_controller_start(struct ssam_controller *ctrl);
+void ssam_controller_shutdown(struct ssam_controller *ctrl);
+void ssam_controller_destroy(struct ssam_controller *ctrl);
+
+int ssam_notifier_disable_registered(struct ssam_controller *ctrl);
+void ssam_notifier_restore_registered(struct ssam_controller *ctrl);
+
+int ssam_irq_setup(struct ssam_controller *ctrl);
+void ssam_irq_free(struct ssam_controller *ctrl);
+int ssam_irq_arm_for_wakeup(struct ssam_controller *ctrl);
+void ssam_irq_disarm_wakeup(struct ssam_controller *ctrl);
+
+void ssam_controller_lock(struct ssam_controller *c);
+void ssam_controller_unlock(struct ssam_controller *c);
+
+int ssam_log_firmware_version(struct ssam_controller *ctrl);
+int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl);
+int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl);
+int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl);
+int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl);
+
+int ssam_controller_suspend(struct ssam_controller *ctrl);
+int ssam_controller_resume(struct ssam_controller *ctrl);
+
+int ssam_event_item_cache_init(void);
+void ssam_event_item_cache_destroy(void);
+
+#endif /* _SSAM_CONTROLLER_H */
diff --git a/drivers/misc/surface_sam/core.c b/drivers/misc/surface_sam/core.c
new file mode 100644
index 0000000000000..db1019c90b114
--- /dev/null
+++ b/drivers/misc/surface_sam/core.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Surface Serial Hub (SSH) driver for communication with the Surface/System
+ * Aggregator Module.
+ */
+
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/pm.h>
+#include <linux/serdev.h>
+
+#include <linux/surface_aggregator_module.h>
+
+#include "bus.h"
+#include "controller.h"
+
+#define CREATE_TRACE_POINTS
+#include "ssam_trace.h"
+
+
+/* -- Static controller reference. ------------------------------------------ */
+
+/*
+ * Main controller reference. The corresponding lock must be held while
+ * accessing (reading/writing) the reference.
+ */
+static struct ssam_controller *__ssam_controller;
+static DEFINE_SPINLOCK(__ssam_controller_lock);
+
+/**
+ * ssam_get_controller() - Get reference to SSAM controller.
+ *
+ * Returns a reference to the SSAM controller of the system or %NULL if there
+ * is none, it hasn't been set up yet, or it has already been unregistered.
+ * This function automatically increments the reference count of the
+ * controller, thus the calling party must ensure that ssam_controller_put()
+ * is called when it doesn't need the controller any more.
+ */
+struct ssam_controller *ssam_get_controller(void)
+{
+ struct ssam_controller *ctrl;
+
+ spin_lock(&__ssam_controller_lock);
+
+ ctrl = __ssam_controller;
+ if (!ctrl)
+ goto out;
+
+ if (WARN_ON(!kref_get_unless_zero(&ctrl->kref)))
+ ctrl = NULL;
+
+out:
+ spin_unlock(&__ssam_controller_lock);
+ return ctrl;
+}
+EXPORT_SYMBOL_GPL(ssam_get_controller);
+
+/**
+ * ssam_try_set_controller() - Try to set the main controller reference.
+ * @ctrl: The controller to which the reference should point.
+ *
+ * Set the main controller reference to the given pointer if the reference
+ * hasn't been set already.
+ *
+ * Return: Returns zero on success or %-EBUSY if the reference has already
+ * been set.
+ */
+static int ssam_try_set_controller(struct ssam_controller *ctrl)
+{
+ int status = 0;
+
+ spin_lock(&__ssam_controller_lock);
+ if (!__ssam_controller)
+ __ssam_controller = ctrl;
+ else
+ status = -EBUSY;
+ spin_unlock(&__ssam_controller_lock);
+
+ return status;
+}
+
+/**
+ * ssam_clear_controller() - Remove/clear the main controller reference.
+ *
+ * Clears the main controller reference, i.e. sets it to %NULL. This function
+ * should be called before the controller is shut down.
+ */
+static void ssam_clear_controller(void)
+{
+ spin_lock(&__ssam_controller_lock);
+ __ssam_controller = NULL;
+ spin_unlock(&__ssam_controller_lock);
+}
+
+
+/**
+ * ssam_client_link() - Link an arbitrary client device to the controller.
+ * @c: The controller to link to.
+ * @client: The client device.
+ *
+ * Link an arbitrary client device to the controller by creating a device link
+ * between it as consumer and the controller device as provider. This function
+ * can be used for non-SSAM devices (or SSAM devices not registered as child
+ * under the controller) to guarantee that the controller is valid for as long
+ * as the driver of the client device is bound, and that proper suspend and
+ * resume ordering is guaranteed.
+ *
+ * The device link does not have to be destructed manually. It is removed
+ * automatically once the driver of the client device unbinds.
+ *
+ * Return: Returns zero on success, %-ENXIO if the controller is not ready or
+ * going to be removed soon, or %-ENOMEM if the device link could not be
+ * created for other reasons.
+ */
+int ssam_client_link(struct ssam_controller *c, struct device *client)
+{
+ const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
+ struct device_link *link;
+ struct device *ctrldev;
+
+ ssam_controller_statelock(c);
+
+ if (READ_ONCE(c->state) != SSAM_CONTROLLER_STARTED) {
+ ssam_controller_stateunlock(c);
+ return -ENXIO;
+ }
+
+ ctrldev = ssam_controller_device(c);
+ if (!ctrldev) {
+ ssam_controller_stateunlock(c);
+ return -ENXIO;
+ }
+
+ link = device_link_add(client, ctrldev, flags);
+ if (!link) {
+ ssam_controller_stateunlock(c);
+ return -ENOMEM;
+ }
+
+ /*
+ * Return -ENXIO if supplier driver is on its way to be removed. In this
+ * case, the controller won't be around for much longer and the device
+ * link is not going to save us any more, as unbinding is already in
+ * progress.
+ */
+ if (link->status == DL_STATE_SUPPLIER_UNBIND) {
+ ssam_controller_stateunlock(c);
+ return -ENXIO;
+ }
+
+ ssam_controller_stateunlock(c);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ssam_client_link);
+
+/**
+ * ssam_client_bind() - Bind an arbitrary client device to the controller.
+ * @client: The client device.
+ * @ctrl: A pointer to where the controller reference should be returned.
+ *
+ * Link an arbitrary client device to the controller by creating a device link
+ * between it as consumer and the main controller device as provider. This
+ * function can be used for non-SSAM devices to guarantee that the controller
+ * returned by this function is valid for as long as the driver of the client
+ * device is bound, and that proper suspend and resume ordering is guaranteed.
+ *
+ * This function does essentially the same as ssam_client_link(), except that
+ * it first fetches the main controller reference, then creates the link, and
+ * finally returns this reference in the @ctrl parameter. Note that this
+ * function does not increment the reference counter of the controller, as,
+ * due to the link, the controller lifetime is assured as long as the driver
+ * of the client device is bound.
+ *
+ * It is not valid to use the controller reference obtained by this method
+ * outside of the driver bound to the client device at the time of calling
+ * this function, without first incrementing the reference count of the
+ * controller via ssam_controller_get(). Even after doing this, care must be
+ * taken that requests are only submitted and notifiers are only
+ * (un-)registered when the controller is active and not suspended. In other
+ * words: The device link only lives as long as the client driver is bound and
+ * any guarantees enforced by this link (e.g. active controller state) can
+ * only be relied upon as long as this link exists and may need to be enforced
+ * in other ways afterwards.
+ *
+ * The created device link does not have to be destructed manually. It is
+ * removed automatically once the driver of the client device unbinds.
+ *
+ * Return: Returns zero on success, %-ENXIO if the controller is not present,
+ * not ready or going to be removed soon, or %-ENOMEM if the device link could
+ * not be created for other reasons.
+ */
+int ssam_client_bind(struct device *client, struct ssam_controller **ctrl)
+{
+ struct ssam_controller *c;
+ int status;
+
+ c = ssam_get_controller();
+ if (!c)
+ return -ENXIO;
+
+ status = ssam_client_link(c, client);
+
+ /*
+ * Note that we can drop our controller reference in both success and
+ * failure cases: On success, we have bound the controller lifetime
+ * inherently to the client driver lifetime, i.e. it the controller is
+ * now guaranteed to outlive the client driver. On failure, we're not
+ * going to use the controller any more.
+ */
+ ssam_controller_put(c);
+
+ *ctrl = status == 0 ? c : NULL;
+ return status;
+}
+EXPORT_SYMBOL_GPL(ssam_client_bind);
+
+
+/* -- Glue layer (serdev_device -> ssam_controller). ------------------------ */
+
+static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
+ size_t n)
+{
+ struct ssam_controller *ctrl;
+
+ ctrl = serdev_device_get_drvdata(dev);
+ return ssam_controller_receive_buf(ctrl, buf, n);
+}
+
+static void ssam_write_wakeup(struct serdev_device *dev)
+{
+ ssam_controller_write_wakeup(serdev_device_get_drvdata(dev));
+}
+
+static const struct serdev_device_ops ssam_serdev_ops = {
+ .receive_buf = ssam_receive_buf,
+ .write_wakeup = ssam_write_wakeup,
+};
+
+
+/* -- ACPI based device setup. ---------------------------------------------- */
+
+static acpi_status ssam_serdev_setup_via_acpi_crs(struct acpi_resource *rsc,
+ void *ctx)
+{
+ struct serdev_device *serdev = ctx;
+ struct acpi_resource_common_serialbus *serial;
+ struct acpi_resource_uart_serialbus *uart;
+ bool flow_control;
+ int status = 0;
+
+ if (rsc->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+ return AE_OK;
+
+ serial = &rsc->data.common_serial_bus;
+ if (serial->type != ACPI_RESOURCE_SERIAL_TYPE_UART)
+ return AE_OK;
+
+ uart = &rsc->data.uart_serial_bus;
+
+ // set up serdev device
+ serdev_device_set_baudrate(serdev, uart->default_baud_rate);
+
+ // serdev currently only supports RTSCTS flow control
+ if (uart->flow_control & (~((u8) ACPI_UART_FLOW_CONTROL_HW))) {
+ dev_warn(&serdev->dev, "setup: unsupported flow control (value: 0x%02x)\n",
+ uart->flow_control);
+ }
+
+ // set RTSCTS flow control
+ flow_control = uart->flow_control & ACPI_UART_FLOW_CONTROL_HW;
+ serdev_device_set_flow_control(serdev, flow_control);
+
+ // serdev currently only supports EVEN/ODD parity
+ switch (uart->parity) {
+ case ACPI_UART_PARITY_NONE:
+ status = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
+ break;
+ case ACPI_UART_PARITY_EVEN:
+ status = serdev_device_set_parity(serdev, SERDEV_PARITY_EVEN);
+ break;
+ case ACPI_UART_PARITY_ODD:
+ status = serdev_device_set_parity(serdev, SERDEV_PARITY_ODD);
+ break;
+ default:
+ dev_warn(&serdev->dev, "setup: unsupported parity (value: 0x%02x)\n",
+ uart->parity);
+ break;
+ }
+
+ if (status) {
+ dev_err(&serdev->dev, "setup: failed to set parity (value: 0x%02x)\n",
+ uart->parity);
+ return status;
+ }
+
+ return AE_CTRL_TERMINATE; // we've found the resource and are done
+}
+
+static acpi_status ssam_serdev_setup_via_acpi(acpi_handle handle,
+ struct serdev_device *serdev)
+{
+ return acpi_walk_resources(handle, METHOD_NAME__CRS,
+ ssam_serdev_setup_via_acpi_crs, serdev);
+}
+
+
+/* -- Power management. ----------------------------------------------------- */
+
+static void ssam_serial_hub_shutdown(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * Try to signal display-off and D0-exit, ignore any errors.
+ *
+ * Note: It has not been established yet if this is actually
+ * necessary/useful for shutdown.
+ */
+
+ status = ssam_ctrl_notif_display_off(c);
+ if (status)
+ ssam_err(c, "pm: display-off notification failed: %d\n", status);
+
+ status = ssam_ctrl_notif_d0_exit(c);
+ if (status)
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
+}
+
+static int ssam_serial_hub_suspend(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * Try to signal display-off and D0-exit, enable IRQ wakeup if
+ * specified. Abort on error.
+ *
+ * Note: Signalling display-off/display-on should normally be done from
+ * some sort of display state notifier. As that is not available, signal
+ * it here.
+ */
+
+ status = ssam_ctrl_notif_display_off(c);
+ if (status) {
+ ssam_err(c, "pm: display-off notification failed: %d\n", status);
+ return status;
+ }
+
+ status = ssam_ctrl_notif_d0_exit(c);
+ if (status) {
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
+ goto err_notif;
+ }
+
+ status = ssam_irq_arm_for_wakeup(c);
+ if (status)
+ goto err_irq;
+
+ WARN_ON(ssam_controller_suspend(c));
+ return 0;
+
+err_irq:
+ ssam_ctrl_notif_d0_entry(c);
+err_notif:
+ ssam_ctrl_notif_display_on(c);
+ return status;
+}
+
+static int ssam_serial_hub_resume(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ WARN_ON(ssam_controller_resume(c));
+
+ /*
+ * Try to disable IRQ wakeup (if specified), signal display-on and
+ * D0-entry. In case of errors, log them and try to restore normal
+ * operation state as far as possible.
+ *
+ * Note: Signalling display-off/display-on should normally be done from
+ * some sort of display state notifier. As that is not available, signal
+ * it here.
+ */
+
+ ssam_irq_disarm_wakeup(c);
+
+ status = ssam_ctrl_notif_d0_entry(c);
+ if (status)
+ ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
+
+ status = ssam_ctrl_notif_display_on(c);
+ if (status)
+ ssam_err(c, "pm: display-on notification failed: %d\n", status);
+
+ return 0;
+}
+
+static int ssam_serial_hub_freeze(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * During hibernation image creation, we only have to ensure that the
+ * EC doesn't send us any events. This is done via the display-off
+ * and D0-exit notifications. Note that this sets up the wakeup IRQ
+ * on the EC side, however, we have disabled it by default on our side
+ * and won't enable it here.
+ *
+ * See ssam_serial_hub_poweroff() for more details on the hibernation
+ * process.
+ */
+
+ status = ssam_ctrl_notif_display_off(c);
+ if (status) {
+ ssam_err(c, "pm: display-off notification failed: %d\n", status);
+ return status;
+ }
+
+ status = ssam_ctrl_notif_d0_exit(c);
+ if (status) {
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
+ ssam_ctrl_notif_display_on(c);
+ return status;
+ }
+
+ WARN_ON(ssam_controller_suspend(c));
+ return 0;
+}
+
+static int ssam_serial_hub_thaw(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ WARN_ON(ssam_controller_resume(c));
+
+ status = ssam_ctrl_notif_d0_entry(c);
+ if (status) {
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
+
+ // try to restore as much as possible in case of failure
+ ssam_ctrl_notif_display_on(c);
+ return status;
+ }
+
+ status = ssam_ctrl_notif_display_on(c);
+ if (status)
+ ssam_err(c, "pm: display-on notification failed: %d\n", status);
+
+ return status;
+}
+
+static int ssam_serial_hub_poweroff(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * When entering hibernation and powering off the system, the EC, at
+ * least on some models, may disable events. Without us taking care of
+ * that, this leads to events not being enabled/restored when the
+ * system resumes from hibernation, resulting SAM-HID subsystem devices
+ * (i.e. keyboard, touchpad) not working, AC-plug/AC-unplug events being
+ * gone, etc.
+ *
+ * To avoid these issues, we disable all registered events here (this is
+ * likely not actually required) and restore them during the drivers PM
+ * restore callback.
+ *
+ * Wakeup from the EC interrupt is not supported during hibernation,
+ * so don't arm the IRQ here.
+ */
+
+ status = ssam_notifier_disable_registered(c);
+ if (status) {
+ ssam_err(c, "pm: failed to disable notifiers for hibernation: %d\n",
+ status);
+ return status;
+ }
+
+ status = ssam_ctrl_notif_display_off(c);
+ if (status) {
+ ssam_err(c, "pm: display-off notification failed: %d\n", status);
+ goto err_dpnf;
+ }
+
+ status = ssam_ctrl_notif_d0_exit(c);
+ if (status) {
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
+ goto err_d0nf;
+ }
+
+ WARN_ON(ssam_controller_suspend(c));
+ return 0;
+
+err_d0nf:
+ ssam_ctrl_notif_display_on(c);
+err_dpnf:
+ ssam_notifier_restore_registered(c);
+ return status;
+}
+
+static int ssam_serial_hub_restore(struct device *dev)
+{
+ struct ssam_controller *c = dev_get_drvdata(dev);
+ int status;
+
+ /*
+ * Ignore but log errors, try to restore state as much as possible in
+ * case of failures. See ssam_serial_hub_poweroff() for more details on
+ * the hibernation process.
+ */
+
+ WARN_ON(ssam_controller_resume(c));
+
+ status = ssam_ctrl_notif_d0_entry(c);
+ if (status)
+ ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
+
+ status = ssam_ctrl_notif_display_on(c);
+ if (status)
+ ssam_err(c, "pm: display-on notification failed: %d\n", status);
+
+ ssam_notifier_restore_registered(c);
+ return 0;
+}
+
+static const struct dev_pm_ops ssam_serial_hub_pm_ops = {
+ .suspend = ssam_serial_hub_suspend,
+ .resume = ssam_serial_hub_resume,
+ .freeze = ssam_serial_hub_freeze,
+ .thaw = ssam_serial_hub_thaw,
+ .poweroff = ssam_serial_hub_poweroff,
+ .restore = ssam_serial_hub_restore,
+};
+
+
+/* -- Device/driver setup. -------------------------------------------------- */
+
+static const struct acpi_gpio_params gpio_ssam_wakeup_int = { 0, 0, false };
+static const struct acpi_gpio_params gpio_ssam_wakeup = { 1, 0, false };
+
+static const struct acpi_gpio_mapping ssam_acpi_gpios[] = {
+ { "ssam_wakeup-int-gpio", &gpio_ssam_wakeup_int, 1 },
+ { "ssam_wakeup-gpio", &gpio_ssam_wakeup, 1 },
+ { },
+};
+
+static int ssam_serial_hub_probe(struct serdev_device *serdev)
+{
+ struct ssam_controller *ctrl;
+ acpi_handle *ssh = ACPI_HANDLE(&serdev->dev);
+ int status;
+
+ if (gpiod_count(&serdev->dev, NULL) < 0)
+ return -ENODEV;
+
+ status = devm_acpi_dev_add_driver_gpios(&serdev->dev, ssam_acpi_gpios);
+ if (status)
+ return status;
+
+ // allocate controller
+ ctrl = kzalloc(sizeof(struct ssam_controller), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ // initialize controller
+ status = ssam_controller_init(ctrl, serdev);
+ if (status)
+ goto err_ctrl_init;
+
+ // set up serdev device
+ serdev_device_set_drvdata(serdev, ctrl);
+ serdev_device_set_client_ops(serdev, &ssam_serdev_ops);
+ status = serdev_device_open(serdev);
+ if (status)
+ goto err_devopen;
+
+ status = ssam_serdev_setup_via_acpi(ssh, serdev);
+ if (ACPI_FAILURE(status))
+ goto err_devinit;
+
+ // start controller
+ status = ssam_controller_start(ctrl);
+ if (status)
+ goto err_devinit;
+
+ // initial SAM requests: log version, notify default/init power states
+ status = ssam_log_firmware_version(ctrl);
+ if (status)
+ goto err_initrq;
+
+ status = ssam_ctrl_notif_d0_entry(ctrl);
+ if (status)
+ goto err_initrq;
+
+ status = ssam_ctrl_notif_display_on(ctrl);
+ if (status)
+ goto err_initrq;
+
+ // setup IRQ
+ status = ssam_irq_setup(ctrl);
+ if (status)
+ goto err_initrq;
+
+ // finally, set main controller reference
+ status = ssam_try_set_controller(ctrl);
+ if (WARN_ON(status)) // currently, we're the only provider
+ goto err_initrq;
+
+ /*
+ * TODO: The EC can wake up the system via the associated GPIO interrupt
+ * in multiple situations. One of which is the remaining battery
+ * capacity falling below a certain threshold. Normally, we should
+ * use the device_init_wakeup function, however, the EC also seems
+ * to have other reasons for waking up the system and it seems
+ * that Windows has additional checks whether the system should be
+ * resumed. In short, this causes some spurious unwanted wake-ups.
+ * For now let's thus default power/wakeup to false.
+ */
+ device_set_wakeup_capable(&serdev->dev, true);
+ acpi_walk_dep_device_list(ssh);
+
+ return 0;
+
+err_initrq:
+ ssam_controller_shutdown(ctrl);
+err_devinit:
+ serdev_device_close(serdev);
+err_devopen:
+ ssam_controller_destroy(ctrl);
+ serdev_device_set_drvdata(serdev, NULL);
+err_ctrl_init:
+ kfree(ctrl);
+ return status;
+}
+
+static void ssam_serial_hub_remove(struct serdev_device *serdev)
+{
+ struct ssam_controller *ctrl = serdev_device_get_drvdata(serdev);
+ int status;
+
+ // clear static reference, so that no one else can get a new one
+ ssam_clear_controller();
+
+ ssam_irq_free(ctrl);
+ ssam_controller_lock(ctrl);
+
+ // remove all client devices
+ ssam_controller_remove_clients(ctrl);
+
+ // act as if suspending to disable events
+ status = ssam_ctrl_notif_display_off(ctrl);
+ if (status) {
+ dev_err(&serdev->dev, "display-off notification failed: %d\n",
+ status);
+ }
+
+ status = ssam_ctrl_notif_d0_exit(ctrl);
+ if (status) {
+ dev_err(&serdev->dev, "D0-exit notification failed: %d\n",
+ status);
+ }
+
+ // shut down controller and remove serdev device reference from it
+ ssam_controller_shutdown(ctrl);
+
+ // shut down actual transport
+ serdev_device_wait_until_sent(serdev, 0);
+ serdev_device_close(serdev);
+
+ // drop our controller reference
+ ssam_controller_unlock(ctrl);
+ ssam_controller_put(ctrl);
+
+ device_set_wakeup_capable(&serdev->dev, false);
+ serdev_device_set_drvdata(serdev, NULL);
+}
+
+
+static const struct acpi_device_id ssam_serial_hub_match[] = {
+ { "MSHW0084", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ssam_serial_hub_match);
+
+static struct serdev_device_driver ssam_serial_hub = {
+ .probe = ssam_serial_hub_probe,
+ .remove = ssam_serial_hub_remove,
+ .driver = {
+ .name = "surface_sam_ssh",
+ .acpi_match_table = ssam_serial_hub_match,
+ .pm = &ssam_serial_hub_pm_ops,
+ .shutdown = ssam_serial_hub_shutdown,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+
+/* -- Module setup. --------------------------------------------------------- */
+
+static int __init ssam_core_init(void)
+{
+ int status;
+
+ status = ssam_bus_register();
+ if (status)
+ goto err_bus;
+
+ status = ssh_ctrl_packet_cache_init();
+ if (status)
+ goto err_cpkg;
+
+ status = ssam_event_item_cache_init();
+ if (status)
+ goto err_evitem;
+
+ status = serdev_device_driver_register(&ssam_serial_hub);
+ if (status)
+ goto err_register;
+
+ return 0;
+
+err_register:
+ ssam_event_item_cache_destroy();
+err_evitem:
+ ssh_ctrl_packet_cache_destroy();
+err_cpkg:
+ ssam_bus_unregister();
+err_bus:
+ return status;
+}
+
+static void __exit ssam_core_exit(void)
+{
+ serdev_device_driver_unregister(&ssam_serial_hub);
+ ssam_event_item_cache_destroy();
+ ssh_ctrl_packet_cache_destroy();
+ ssam_bus_unregister();
+}
+
+/*
+ * Ensure that the driver is loaded late due to some issues with the UART
+ * communication. Specifically, we want to ensure that DMA is ready and being
+ * used. Not using DMA can result in spurious communication failures,
+ * especially during boot, which among other things will result in wrong
+ * battery information (via ACPI _BIX) being displayed. Using a late init_call
+ * instead of the normal module_init gives the DMA subsystem time to
+ * initialize and via that results in a more stable communication, avoiding
+ * such failures.
+ */
+late_initcall(ssam_core_init);
+module_exit(ssam_core_exit);
+
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
+MODULE_DESCRIPTION("Surface Serial Hub Driver for 5th Generation Surface Devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/surface_sam/ssam_trace.h b/drivers/misc/surface_sam/ssam_trace.h
new file mode 100644
index 0000000000000..7aa05c81ad427
--- /dev/null
+++ b/drivers/misc/surface_sam/ssam_trace.h
@@ -0,0 +1,619 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ssam
+
+#if !defined(_SURFACE_SAM_SSH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _SURFACE_SAM_SSH_TRACE_H
+
+#include <linux/surface_aggregator_module.h>
+#include <linux/tracepoint.h>
+
+
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_SEQ);
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_NSQ);
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_ACK);
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_NAK);
+
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_LOCKED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_QUEUED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_PENDING_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTING_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_ACKED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_CANCELED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_COMPLETED_BIT);
+
+TRACE_DEFINE_ENUM(SSH_PACKET_TY_FLUSH_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_TY_SEQUENCED_BIT);
+TRACE_DEFINE_ENUM(SSH_PACKET_TY_BLOCKING_BIT);
+
+TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_SF_MASK);
+TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_TY_MASK);
+
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_LOCKED_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_QUEUED_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_PENDING_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTING_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTED_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_RSPRCVD_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_CANCELED_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_COMPLETED_BIT);
+
+TRACE_DEFINE_ENUM(SSH_REQUEST_TY_FLUSH_BIT);
+TRACE_DEFINE_ENUM(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
+
+TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_SF_MASK);
+TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_TY_MASK);
+
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SAM);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAT);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TMP);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_PMC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_FAN);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_PoM);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_DBG);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_KBD);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_FWU);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_UNI);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_LPC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCL);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SFL);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_KIP);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_EXT);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BLD);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAS);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SEN);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SRQ);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_MCU);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_HID);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCH);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BKL);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TAM);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_UFI);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_USC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_PEN);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_VID);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_AUD);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SMC);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_KPD);
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_REG);
+
+
+#define SSAM_PTR_UID_LEN 9
+#define SSAM_U8_FIELD_NOT_APPLICABLE ((u16)-1)
+#define SSAM_SEQ_NOT_APPLICABLE ((u16)-1)
+#define SSAM_RQID_NOT_APPLICABLE ((u32)-1)
+#define SSAM_SSH_TC_NOT_APPLICABLE 0
+
+
+#ifndef _SURFACE_SAM_SSH_TRACE_HELPERS
+#define _SURFACE_SAM_SSH_TRACE_HELPERS
+
+/**
+ * ssam_trace_ptr_uid() - Convert the pointer to a non-pointer UID string.
+ * @ptr: The pointer to convert.
+ * @uid_str: A buffer of length SSAM_PTR_UID_LEN where the UID will be stored.
+ *
+ * Converts the given pointer into a UID string that is safe to be shared
+ * with userspace and logs, i.e. doesn't give away the real memory location.
+ */
+static inline void ssam_trace_ptr_uid(const void *ptr, char *uid_str)
+{
+ char buf[2 * sizeof(void *) + 1];
+
+ snprintf(buf, ARRAY_SIZE(buf), "%p", ptr);
+ memcpy(uid_str, &buf[ARRAY_SIZE(buf) - SSAM_PTR_UID_LEN],
+ SSAM_PTR_UID_LEN);
+}
+
+/**
+ * ssam_trace_get_packet_seq() - Read the packet's sequence ID.
+ * @p: The packet.
+ *
+ * Return: Returns the packet's sequence ID (SEQ) field if present, or
+ * %SSAM_SEQ_NOT_APPLICABLE if not (e.g. flush packet).
+ */
+static inline u16 ssam_trace_get_packet_seq(const struct ssh_packet *p)
+{
+ if (!p->data.ptr || p->data.len < SSH_MESSAGE_LENGTH(0))
+ return SSAM_SEQ_NOT_APPLICABLE;
+
+ return p->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
+}
+
+/**
+ * ssam_trace_get_request_id() - Read the packet's request ID.
+ * @p: The packet.
+ *
+ * Return: Returns the packet's request ID (RQID) field if the packet
+ * represents a request with command data, or %SSAM_RQID_NOT_APPLICABLE if not
+ * (e.g. flush request, control packet).
+ */
+static inline u32 ssam_trace_get_request_id(const struct ssh_packet *p)
+{
+ if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
+ return SSAM_RQID_NOT_APPLICABLE;
+
+ return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(rqid)]);
+}
+
+/**
+ * ssam_trace_get_request_tc() - Read the packet's request target category.
+ * @p: The packet.
+ *
+ * Returns the packet's request target category (TC) field if the packet
+ * represents a request with command data, or %SSAM_TC_NOT_APPLICABLE if not
+ * (e.g. flush request, control packet).
+ */
+static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p)
+{
+ if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
+ return SSAM_SSH_TC_NOT_APPLICABLE;
+
+ return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(tc)]);
+}
+
+#endif /* _SURFACE_SAM_SSH_TRACE_HELPERS */
+
+#define ssam_trace_get_command_field_u8(packet, field) \
+ ((!packet || packet->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) \
+ ? 0 : p->data.ptr[SSH_MSGOFFSET_COMMAND(field)])
+
+#define ssam_show_generic_u8_field(value) \
+ __print_symbolic(value, \
+ { SSAM_U8_FIELD_NOT_APPLICABLE, "N/A" } \
+ )
+
+
+#define ssam_show_frame_type(ty) \
+ __print_symbolic(ty, \
+ { SSH_FRAME_TYPE_DATA_SEQ, "DSEQ" }, \
+ { SSH_FRAME_TYPE_DATA_NSQ, "DNSQ" }, \
+ { SSH_FRAME_TYPE_ACK, "ACK" }, \
+ { SSH_FRAME_TYPE_NAK, "NAK" } \
+ )
+
+#define ssam_show_packet_type(type) \
+ __print_flags(flags & SSH_PACKET_FLAGS_TY_MASK, "", \
+ { BIT(SSH_PACKET_TY_FLUSH_BIT), "F" }, \
+ { BIT(SSH_PACKET_TY_SEQUENCED_BIT), "S" }, \
+ { BIT(SSH_PACKET_TY_BLOCKING_BIT), "B" } \
+ )
+
+#define ssam_show_packet_state(state) \
+ __print_flags(flags & SSH_PACKET_FLAGS_SF_MASK, "", \
+ { BIT(SSH_PACKET_SF_LOCKED_BIT), "L" }, \
+ { BIT(SSH_PACKET_SF_QUEUED_BIT), "Q" }, \
+ { BIT(SSH_PACKET_SF_PENDING_BIT), "P" }, \
+ { BIT(SSH_PACKET_SF_TRANSMITTING_BIT), "S" }, \
+ { BIT(SSH_PACKET_SF_TRANSMITTED_BIT), "T" }, \
+ { BIT(SSH_PACKET_SF_ACKED_BIT), "A" }, \
+ { BIT(SSH_PACKET_SF_CANCELED_BIT), "C" }, \
+ { BIT(SSH_PACKET_SF_COMPLETED_BIT), "F" } \
+ )
+
+#define ssam_show_packet_seq(seq) \
+ __print_symbolic(seq, \
+ { SSAM_SEQ_NOT_APPLICABLE, "N/A" } \
+ )
+
+
+#define ssam_show_request_type(flags) \
+ __print_flags(flags & SSH_REQUEST_FLAGS_TY_MASK, "", \
+ { BIT(SSH_REQUEST_TY_FLUSH_BIT), "F" }, \
+ { BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT), "R" } \
+ )
+
+#define ssam_show_request_state(flags) \
+ __print_flags(flags & SSH_REQUEST_FLAGS_SF_MASK, "", \
+ { BIT(SSH_REQUEST_SF_LOCKED_BIT), "L" }, \
+ { BIT(SSH_REQUEST_SF_QUEUED_BIT), "Q" }, \
+ { BIT(SSH_REQUEST_SF_PENDING_BIT), "P" }, \
+ { BIT(SSH_REQUEST_SF_TRANSMITTING_BIT), "S" }, \
+ { BIT(SSH_REQUEST_SF_TRANSMITTED_BIT), "T" }, \
+ { BIT(SSH_REQUEST_SF_RSPRCVD_BIT), "A" }, \
+ { BIT(SSH_REQUEST_SF_CANCELED_BIT), "C" }, \
+ { BIT(SSH_REQUEST_SF_COMPLETED_BIT), "F" } \
+ )
+
+#define ssam_show_request_id(rqid) \
+ __print_symbolic(rqid, \
+ { SSAM_RQID_NOT_APPLICABLE, "N/A" } \
+ )
+
+#define ssam_show_ssh_tc(rqid) \
+ __print_symbolic(rqid, \
+ { SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \
+ { SSAM_SSH_TC_SAM, "SAM" }, \
+ { SSAM_SSH_TC_BAT, "BAT" }, \
+ { SSAM_SSH_TC_TMP, "TMP" }, \
+ { SSAM_SSH_TC_PMC, "PMC" }, \
+ { SSAM_SSH_TC_FAN, "FAN" }, \
+ { SSAM_SSH_TC_PoM, "PoM" }, \
+ { SSAM_SSH_TC_DBG, "DBG" }, \
+ { SSAM_SSH_TC_KBD, "KBD" }, \
+ { SSAM_SSH_TC_FWU, "FWU" }, \
+ { SSAM_SSH_TC_UNI, "UNI" }, \
+ { SSAM_SSH_TC_LPC, "LPC" }, \
+ { SSAM_SSH_TC_TCL, "TCL" }, \
+ { SSAM_SSH_TC_SFL, "SFL" }, \
+ { SSAM_SSH_TC_KIP, "KIP" }, \
+ { SSAM_SSH_TC_EXT, "EXT" }, \
+ { SSAM_SSH_TC_BLD, "BLD" }, \
+ { SSAM_SSH_TC_BAS, "BAS" }, \
+ { SSAM_SSH_TC_SEN, "SEN" }, \
+ { SSAM_SSH_TC_SRQ, "SRQ" }, \
+ { SSAM_SSH_TC_MCU, "MCU" }, \
+ { SSAM_SSH_TC_HID, "HID" }, \
+ { SSAM_SSH_TC_TCH, "TCH" }, \
+ { SSAM_SSH_TC_BKL, "BKL" }, \
+ { SSAM_SSH_TC_TAM, "TAM" }, \
+ { SSAM_SSH_TC_ACC, "ACC" }, \
+ { SSAM_SSH_TC_UFI, "UFI" }, \
+ { SSAM_SSH_TC_USC, "USC" }, \
+ { SSAM_SSH_TC_PEN, "PEN" }, \
+ { SSAM_SSH_TC_VID, "VID" }, \
+ { SSAM_SSH_TC_AUD, "AUD" }, \
+ { SSAM_SSH_TC_SMC, "SMC" }, \
+ { SSAM_SSH_TC_KPD, "KPD" }, \
+ { SSAM_SSH_TC_REG, "REG" } \
+ )
+
+
+DECLARE_EVENT_CLASS(ssam_frame_class,
+ TP_PROTO(const struct ssh_frame *frame),
+
+ TP_ARGS(frame),
+
+ TP_STRUCT__entry(
+ __field(u8, type)
+ __field(u8, seq)
+ __field(u16, len)
+ ),
+
+ TP_fast_assign(
+ __entry->type = frame->type;
+ __entry->seq = frame->seq;
+ __entry->len = get_unaligned_le16(&frame->len);
+ ),
+
+ TP_printk("ty=%s, seq=0x%02x, len=%u",
+ ssam_show_frame_type(__entry->type),
+ __entry->seq,
+ __entry->len
+ )
+);
+
+#define DEFINE_SSAM_FRAME_EVENT(name) \
+ DEFINE_EVENT(ssam_frame_class, ssam_##name, \
+ TP_PROTO(const struct ssh_frame *frame), \
+ TP_ARGS(frame) \
+ )
+
+
+DECLARE_EVENT_CLASS(ssam_command_class,
+ TP_PROTO(const struct ssh_command *cmd, u16 len),
+
+ TP_ARGS(cmd, len),
+
+ TP_STRUCT__entry(
+ __field(u16, rqid)
+ __field(u16, len)
+ __field(u8, tc)
+ __field(u8, cid)
+ __field(u8, iid)
+ ),
+
+ TP_fast_assign(
+ __entry->rqid = get_unaligned_le16(&cmd->rqid);
+ __entry->tc = cmd->tc;
+ __entry->cid = cmd->cid;
+ __entry->iid = cmd->iid;
+ __entry->len = len;
+ ),
+
+ TP_printk("rqid=0x%04x, tc=%s, cid=0x%02x, iid=0x%02x, len=%u",
+ __entry->rqid,
+ ssam_show_ssh_tc(__entry->tc),
+ __entry->cid,
+ __entry->iid,
+ __entry->len
+ )
+);
+
+#define DEFINE_SSAM_COMMAND_EVENT(name) \
+ DEFINE_EVENT(ssam_command_class, ssam_##name, \
+ TP_PROTO(const struct ssh_command *cmd, u16 len), \
+ TP_ARGS(cmd, len) \
+ )
+
+
+DECLARE_EVENT_CLASS(ssam_packet_class,
+ TP_PROTO(const struct ssh_packet *packet),
+
+ TP_ARGS(packet),
+
+ TP_STRUCT__entry(
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ __field(u8, priority)
+ __field(u16, length)
+ __field(unsigned long, state)
+ __field(u16, seq)
+ ),
+
+ TP_fast_assign(
+ ssam_trace_ptr_uid(packet, __entry->uid);
+ __entry->priority = READ_ONCE(packet->priority);
+ __entry->length = packet->data.len;
+ __entry->state = READ_ONCE(packet->state);
+ __entry->seq = ssam_trace_get_packet_seq(packet);
+ ),
+
+ TP_printk("uid=%s, seq=%s, ty=%s, pri=0x%02x, len=%u, sta=%s",
+ __entry->uid,
+ ssam_show_packet_seq(__entry->seq),
+ ssam_show_packet_type(__entry->state),
+ __entry->priority,
+ __entry->length,
+ ssam_show_packet_state(__entry->state)
+ )
+);
+
+#define DEFINE_SSAM_PACKET_EVENT(name) \
+ DEFINE_EVENT(ssam_packet_class, ssam_##name, \
+ TP_PROTO(const struct ssh_packet *packet), \
+ TP_ARGS(packet) \
+ )
+
+
+DECLARE_EVENT_CLASS(ssam_packet_status_class,
+ TP_PROTO(const struct ssh_packet *packet, int status),
+
+ TP_ARGS(packet, status),
+
+ TP_STRUCT__entry(
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ __field(u8, priority)
+ __field(u16, length)
+ __field(unsigned long, state)
+ __field(u16, seq)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ ssam_trace_ptr_uid(packet, __entry->uid);
+ __entry->priority = READ_ONCE(packet->priority);
+ __entry->length = packet->data.len;
+ __entry->state = READ_ONCE(packet->state);
+ __entry->seq = ssam_trace_get_packet_seq(packet);
+ __entry->status = status;
+ ),
+
+ TP_printk("uid=%s, seq=%s, ty=%s, pri=0x%02x, len=%u, sta=%s, status=%d",
+ __entry->uid,
+ ssam_show_packet_seq(__entry->seq),
+ ssam_show_packet_type(__entry->state),
+ __entry->priority,
+ __entry->length,
+ ssam_show_packet_state(__entry->state),
+ __entry->status
+ )
+);
+
+#define DEFINE_SSAM_PACKET_STATUS_EVENT(name) \
+ DEFINE_EVENT(ssam_packet_status_class, ssam_##name, \
+ TP_PROTO(const struct ssh_packet *packet, int status), \
+ TP_ARGS(packet, status) \
+ )
+
+
+DECLARE_EVENT_CLASS(ssam_request_class,
+ TP_PROTO(const struct ssh_request *request),
+
+ TP_ARGS(request),
+
+ TP_STRUCT__entry(
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ __field(unsigned long, state)
+ __field(u32, rqid)
+ __field(u8, tc)
+ __field(u16, cid)
+ __field(u16, iid)
+ ),
+
+ TP_fast_assign(
+ const struct ssh_packet *p = &request->packet;
+
+ // use packet for UID so we can match requests to packets
+ ssam_trace_ptr_uid(p, __entry->uid);
+ __entry->state = READ_ONCE(request->state);
+ __entry->rqid = ssam_trace_get_request_id(p);
+ __entry->tc = ssam_trace_get_request_tc(p);
+ __entry->cid = ssam_trace_get_command_field_u8(p, cid);
+ __entry->iid = ssam_trace_get_command_field_u8(p, iid);
+ ),
+
+ TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s",
+ __entry->uid,
+ ssam_show_request_id(__entry->rqid),
+ ssam_show_request_type(__entry->state),
+ ssam_show_request_state(__entry->state),
+ ssam_show_ssh_tc(__entry->tc),
+ ssam_show_generic_u8_field(__entry->cid),
+ ssam_show_generic_u8_field(__entry->iid)
+ )
+);
+
+#define DEFINE_SSAM_REQUEST_EVENT(name) \
+ DEFINE_EVENT(ssam_request_class, ssam_##name, \
+ TP_PROTO(const struct ssh_request *request), \
+ TP_ARGS(request) \
+ )
+
+
+DECLARE_EVENT_CLASS(ssam_request_status_class,
+ TP_PROTO(const struct ssh_request *request, int status),
+
+ TP_ARGS(request, status),
+
+ TP_STRUCT__entry(
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ __field(unsigned long, state)
+ __field(u32, rqid)
+ __field(u8, tc)
+ __field(u16, cid)
+ __field(u16, iid)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ const struct ssh_packet *p = &request->packet;
+
+ // use packet for UID so we can match requests to packets
+ ssam_trace_ptr_uid(p, __entry->uid);
+ __entry->state = READ_ONCE(request->state);
+ __entry->rqid = ssam_trace_get_request_id(p);
+ __entry->tc = ssam_trace_get_request_tc(p);
+ __entry->cid = ssam_trace_get_command_field_u8(p, cid);
+ __entry->iid = ssam_trace_get_command_field_u8(p, iid);
+ __entry->status = status;
+ ),
+
+ TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s, status=%d",
+ __entry->uid,
+ ssam_show_request_id(__entry->rqid),
+ ssam_show_request_type(__entry->state),
+ ssam_show_request_state(__entry->state),
+ ssam_show_ssh_tc(__entry->tc),
+ ssam_show_generic_u8_field(__entry->cid),
+ ssam_show_generic_u8_field(__entry->iid),
+ __entry->status
+ )
+);
+
+#define DEFINE_SSAM_REQUEST_STATUS_EVENT(name) \
+ DEFINE_EVENT(ssam_request_status_class, ssam_##name, \
+ TP_PROTO(const struct ssh_request *request, int status),\
+ TP_ARGS(request, status) \
+ )
+
+
+DECLARE_EVENT_CLASS(ssam_alloc_class,
+ TP_PROTO(void *ptr, size_t len),
+
+ TP_ARGS(ptr, len),
+
+ TP_STRUCT__entry(
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ __field(size_t, len)
+ ),
+
+ TP_fast_assign(
+ ssam_trace_ptr_uid(ptr, __entry->uid);
+ __entry->len = len;
+ ),
+
+ TP_printk("uid=%s, len=%zu", __entry->uid, __entry->len)
+);
+
+#define DEFINE_SSAM_ALLOC_EVENT(name) \
+ DEFINE_EVENT(ssam_alloc_class, ssam_##name, \
+ TP_PROTO(void *ptr, size_t len), \
+ TP_ARGS(ptr, len) \
+ )
+
+
+DECLARE_EVENT_CLASS(ssam_free_class,
+ TP_PROTO(void *ptr),
+
+ TP_ARGS(ptr),
+
+ TP_STRUCT__entry(
+ __array(char, uid, SSAM_PTR_UID_LEN)
+ __field(size_t, len)
+ ),
+
+ TP_fast_assign(
+ ssam_trace_ptr_uid(ptr, __entry->uid);
+ ),
+
+ TP_printk("uid=%s", __entry->uid)
+);
+
+#define DEFINE_SSAM_FREE_EVENT(name) \
+ DEFINE_EVENT(ssam_free_class, ssam_##name, \
+ TP_PROTO(void *ptr), \
+ TP_ARGS(ptr) \
+ )
+
+
+DECLARE_EVENT_CLASS(ssam_generic_uint_class,
+ TP_PROTO(const char *property, unsigned int value),
+
+ TP_ARGS(property, value),
+
+ TP_STRUCT__entry(
+ __string(property, property)
+ __field(unsigned int, value)
+ ),
+
+ TP_fast_assign(
+ __assign_str(property, property);
+ __entry->value = value;
+ ),
+
+ TP_printk("%s=%u", __get_str(property), __entry->value)
+);
+
+#define DEFINE_SSAM_GENERIC_UINT_EVENT(name) \
+ DEFINE_EVENT(ssam_generic_uint_class, ssam_##name, \
+ TP_PROTO(const char *property, unsigned int value), \
+ TP_ARGS(property, value) \
+ )
+
+
+DEFINE_SSAM_FRAME_EVENT(rx_frame_received);
+DEFINE_SSAM_COMMAND_EVENT(rx_response_received);
+DEFINE_SSAM_COMMAND_EVENT(rx_event_received);
+
+DEFINE_SSAM_PACKET_EVENT(packet_release);
+DEFINE_SSAM_PACKET_EVENT(packet_submit);
+DEFINE_SSAM_PACKET_EVENT(packet_resubmit);
+DEFINE_SSAM_PACKET_EVENT(packet_timeout);
+DEFINE_SSAM_PACKET_EVENT(packet_cancel);
+DEFINE_SSAM_PACKET_STATUS_EVENT(packet_complete);
+DEFINE_SSAM_GENERIC_UINT_EVENT(ptl_timeout_reap);
+
+DEFINE_SSAM_REQUEST_EVENT(request_submit);
+DEFINE_SSAM_REQUEST_EVENT(request_timeout);
+DEFINE_SSAM_REQUEST_EVENT(request_cancel);
+DEFINE_SSAM_REQUEST_STATUS_EVENT(request_complete);
+DEFINE_SSAM_GENERIC_UINT_EVENT(rtl_timeout_reap);
+
+DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_ack_packet);
+DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_nak_packet);
+DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_dsq_packet);
+DEFINE_SSAM_PACKET_STATUS_EVENT(ei_tx_fail_write);
+DEFINE_SSAM_PACKET_EVENT(ei_tx_corrupt_data);
+DEFINE_SSAM_GENERIC_UINT_EVENT(ei_rx_corrupt_syn);
+DEFINE_SSAM_FRAME_EVENT(ei_rx_corrupt_data);
+DEFINE_SSAM_REQUEST_EVENT(ei_rx_drop_response);
+
+DEFINE_SSAM_ALLOC_EVENT(ctrl_packet_alloc);
+DEFINE_SSAM_FREE_EVENT(ctrl_packet_free);
+
+DEFINE_SSAM_ALLOC_EVENT(event_item_alloc);
+DEFINE_SSAM_FREE_EVENT(event_item_free);
+
+#endif /* _SURFACE_SAM_SSH_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE ssam_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/misc/surface_sam/ssh_msgb.h b/drivers/misc/surface_sam/ssh_msgb.h
new file mode 100644
index 0000000000000..ae8c3886722f6
--- /dev/null
+++ b/drivers/misc/surface_sam/ssh_msgb.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _SSAM_SSH_MSGB_H
+#define _SSAM_SSH_MSGB_H
+
+#include <asm/unaligned.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator_module.h>
+#include "ssh_protocol.h"
+
+
+/**
+ * struct msgbuf - Buffer struct to construct SSH messages.
+ * @begin: Pointer to the beginning of the allocated buffer space.
+ * @end: Pointer to the end (one past last element) of the allocated buffer
+ * space.
+ * @ptr: Pointer to the first free element in the buffer.
+ */
+struct msgbuf {
+ u8 *begin;
+ u8 *end;
+ u8 *ptr;
+};
+
+/**
+ * msgb_init() - Initialize the given message buffer struct.
+ * @msgb: The buffer struct to initialize
+ * @ptr: Pointer to the underlying memory by which the buffer will be backed.
+ * @cap: Size of the underlying memory.
+ *
+ * Initialize the given message buffer struct using the provided memory as
+ * backing.
+ */
+static inline void msgb_init(struct msgbuf *msgb, u8 *ptr, size_t cap)
+{
+ msgb->begin = ptr;
+ msgb->end = ptr + cap;
+ msgb->ptr = ptr;
+}
+
+/**
+ * msgb_bytes_used() - Return the current number of bytes used in the buffer.
+ * @msgb: The message buffer.
+ */
+static inline size_t msgb_bytes_used(const struct msgbuf *msgb)
+{
+ return msgb->ptr - msgb->begin;
+}
+
+/**
+ * msgb_push_u16() - Push a u16 value to the buffer.
+ * @msgb: The message buffer.
+ * @value: The value to push to the buffer.
+ */
+static inline void msgb_push_u16(struct msgbuf *msgb, u16 value)
+{
+ if (WARN_ON(msgb->ptr + sizeof(u16) > msgb->end))
+ return;
+
+ put_unaligned_le16(value, msgb->ptr);
+ msgb->ptr += sizeof(u16);
+}
+
+/**
+ * msgb_push_syn() - Push SSH SYN bytes to the buffer.
+ * @msgb: The message buffer.
+ */
+static inline void msgb_push_syn(struct msgbuf *msgb)
+{
+ msgb_push_u16(msgb, SSH_MSG_SYN);
+}
+
+/**
+ * msgb_push_buf() - Push raw data to the buffer.
+ * @msgb: The message buffer.
+ * @buf: The data to push to the buffer.
+ * @len: The length of the data to push to the buffer.
+ */
+static inline void msgb_push_buf(struct msgbuf *msgb, const u8 *buf, size_t len)
+{
+ msgb->ptr = memcpy(msgb->ptr, buf, len) + len;
+}
+
+/**
+ * msgb_push_crc() - Compute CRC and push it to the buffer.
+ * @msgb: The message buffer.
+ * @buf: The data for which the CRC should be computed.
+ * @len: The length of the data for which the CRC should be computed.
+ */
+static inline void msgb_push_crc(struct msgbuf *msgb, const u8 *buf, size_t len)
+{
+ msgb_push_u16(msgb, ssh_crc(buf, len));
+}
+
+/**
+ * msgb_push_frame() - Push a SSH message frame header to the buffer.
+ * @msgb: The message buffer
+ * @ty: The type of the frame.
+ * @len: The length of the payload of the frame.
+ * @seq: The sequence ID of the frame/packet.
+ */
+static inline void msgb_push_frame(struct msgbuf *msgb, u8 ty, u16 len, u8 seq)
+{
+ struct ssh_frame *frame = (struct ssh_frame *)msgb->ptr;
+ const u8 *const begin = msgb->ptr;
+
+ if (WARN_ON(msgb->ptr + sizeof(*frame) > msgb->end))
+ return;
+
+ frame->type = ty;
+ put_unaligned_le16(len, &frame->len);
+ frame->seq = seq;
+
+ msgb->ptr += sizeof(*frame);
+ msgb_push_crc(msgb, begin, msgb->ptr - begin);
+}
+
+/**
+ * msgb_push_ack() - Push a SSH ACK frame to the buffer.
+ * @msgb: The message buffer
+ * @seq: The sequence ID of the frame/packet to be ACKed.
+ */
+static inline void msgb_push_ack(struct msgbuf *msgb, u8 seq)
+{
+ // SYN
+ msgb_push_syn(msgb);
+
+ // ACK-type frame + CRC
+ msgb_push_frame(msgb, SSH_FRAME_TYPE_ACK, 0x00, seq);
+
+ // payload CRC (ACK-type frames do not have a payload)
+ msgb_push_crc(msgb, msgb->ptr, 0);
+}
+
+/**
+ * msgb_push_nak() - Push a SSH NAK frame to the buffer.
+ * @msgb: The message buffer
+ */
+static inline void msgb_push_nak(struct msgbuf *msgb)
+{
+ // SYN
+ msgb_push_syn(msgb);
+
+ // NAK-type frame + CRC
+ msgb_push_frame(msgb, SSH_FRAME_TYPE_NAK, 0x00, 0x00);
+
+ // payload CRC (ACK-type frames do not have a payload)
+ msgb_push_crc(msgb, msgb->ptr, 0);
+}
+
+/**
+ * msgb_push_cmd() - Push a SSH command frame with payload to the buffer.
+ * @msgb: The message buffer.
+ * @seq: The sequence ID (SEQ) of the frame/packet.
+ * @rqid: The request ID (RQID) of the request contained in the frame.
+ * @rqst: The request to wrap in the frame.
+ */
+static inline void msgb_push_cmd(struct msgbuf *msgb, u8 seq, u16 rqid,
+ const struct ssam_request *rqst)
+{
+ struct ssh_command *cmd;
+ const u8 *cmd_begin;
+ const u8 type = SSH_FRAME_TYPE_DATA_SEQ;
+
+ // SYN
+ msgb_push_syn(msgb);
+
+ // command frame + crc
+ msgb_push_frame(msgb, type, sizeof(*cmd) + rqst->length, seq);
+
+ // frame payload: command struct + payload
+ if (WARN_ON(msgb->ptr + sizeof(*cmd) > msgb->end))
+ return;
+
+ cmd_begin = msgb->ptr;
+ cmd = (struct ssh_command *)msgb->ptr;
+
+ cmd->type = SSH_PLD_TYPE_CMD;
+ cmd->tc = rqst->target_category;
+ cmd->tid_out = rqst->target_id;
+ cmd->tid_in = 0x00;
+ cmd->iid = rqst->instance_id;
+ put_unaligned_le16(rqid, &cmd->rqid);
+ cmd->cid = rqst->command_id;
+
+ msgb->ptr += sizeof(*cmd);
+
+ // command payload
+ msgb_push_buf(msgb, rqst->payload, rqst->length);
+
+ // crc for command struct + payload
+ msgb_push_crc(msgb, cmd_begin, msgb->ptr - cmd_begin);
+}
+
+#endif /* _SSAM_SSH_MSGB_H */
diff --git a/drivers/misc/surface_sam/ssh_packet_layer.c b/drivers/misc/surface_sam/ssh_packet_layer.c
new file mode 100644
index 0000000000000..1b7d59ec4f433
--- /dev/null
+++ b/drivers/misc/surface_sam/ssh_packet_layer.c
@@ -0,0 +1,1780 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <asm/unaligned.h>
+#include <linux/atomic.h>
+#include <linux/error-injection.h>
+#include <linux/jiffies.h>
+#include <linux/kfifo.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator_module.h>
+
+#include "ssh_packet_layer.h"
+#include "ssh_protocol.h"
+
+#include "ssam_trace.h"
+
+
+/*
+ * To simplify reasoning about the code below, we define a few concepts. The
+ * system below is similar to a state-machine for packets, however, there are
+ * too many states to explicitly write them down. To (somewhat) manage the
+ * states and packets we rely on flags, reference counting, and some simple
+ * concepts. State transitions are triggered by actions.
+ *
+ * >> Actions <<
+ *
+ * - submit
+ * - transmission start (process next item in queue)
+ * - transmission finished (guaranteed to never be parallel to transmission
+ * start)
+ * - ACK received
+ * - NAK received (this is equivalent to issuing re-submit for all pending
+ * packets)
+ * - timeout (this is equivalent to re-issuing a submit or canceling)
+ * - cancel (non-pending and pending)
+ *
+ * >> Data Structures, Packet Ownership, General Overview <<
+ *
+ * The code below employs two main data structures: The packet queue, containing
+ * all packets scheduled for transmission, and the set of pending packets,
+ * containing all packets awaiting an ACK.
+ *
+ * Shared ownership of a packet is controlled via reference counting. Inside the
+ * transmission system are a total of five packet owners:
+ *
+ * - the packet queue,
+ * - the pending set,
+ * - the transmitter thread,
+ * - the receiver thread (via ACKing), and
+ * - the timeout work item.
+ *
+ * Normal operation is as follows: The initial reference of the packet is
+ * obtained by submitting the packet and queueing it. The receiver thread
+ * takes packets from the queue. By doing this, it does not increment the
+ * refcount but takes over the reference (removing it from the queue).
+ * If the packet is sequenced (i.e. needs to be ACKed by the client), the
+ * transmitter thread sets-up the timeout and adds the packet to the pending set
+ * before starting to transmit it. As the timeout is handled by a reaper task,
+ * no additional reference for it is needed. After the transmit is done, the
+ * reference hold by the transmitter thread is dropped. If the packet is
+ * unsequenced (i.e. does not need an ACK), the packet is completed by the
+ * transmitter thread before dropping that reference.
+ *
+ * On receial of an ACK, the receiver thread removes and obtains the refernce to
+ * the packet from the pending set. On succes, the receiver thread will then
+ * complete the packet and drop its reference.
+ *
+ * On error, the completion callback is immediately run by on thread on which
+ * the error was detected.
+ *
+ * To ensure that a packet eventually leaves the system it is marked as "locked"
+ * directly before it is going to be completed or when it is canceled. Marking a
+ * packet as "locked" has the effect that passing and creating new references
+ * of the packet will be blocked. This means that the packet cannot be added
+ * to the queue, the pending set, and the timeout, or be picked up by the
+ * transmitter thread or receiver thread. To remove a packet from the system it
+ * has to be marked as locked and subsequently all references from the data
+ * structures (queue, pending) have to be removed. References held by threads
+ * will eventually be dropped automatically as their execution progresses.
+ *
+ * Note that the packet completion callback is, in case of success and for a
+ * sequenced packet, guaranteed to run on the receiver thread, thus providing a
+ * way to reliably identify responses to the packet. The packet completion
+ * callback is only run once and it does not indicate that the packet has fully
+ * left the system. In case of re-submission (and with somewhat unlikely
+ * timing), it may be possible that the packet is being re-transmitted while the
+ * completion callback runs. Completion will occur both on success and internal
+ * error, as well as when the packet is canceled.
+ *
+ * >> Flags <<
+ *
+ * Flags are used to indicate the state and progression of a packet. Some flags
+ * have stricter guarantees than other:
+ *
+ * - locked
+ * Indicates if the packet is locked. If the packet is locked, passing and/or
+ * creating additional references to the packet is forbidden. The packet thus
+ * may not be queued, dequeued, or removed or added to the pending set. Note
+ * that the packet state flags may still change (e.g. it may be marked as
+ * ACKed, transmitted, ...).
+ *
+ * - completed
+ * Indicates if the packet completion has been run or is about to be run. This
+ * flag is used to ensure that the packet completion callback is only run
+ * once.
+ *
+ * - queued
+ * Indicates if a packet is present in the submission queue or not. This flag
+ * must only be modified with the queue lock held, and must be coherent
+ * presence of the packet in the queue.
+ *
+ * - pending
+ * Indicates if a packet is present in the set of pending packets or not.
+ * This flag must only be modified with the pending lock held, and must be
+ * coherent presence of the packet in the pending set.
+ *
+ * - transmitting
+ * Indicates if the packet is currently transmitting. In case of
+ * re-transmissions, it is only safe to wait on the "transmitted" completion
+ * after this flag has been set. The completion will be set both in success
+ * and error case.
+ *
+ * - transmitted
+ * Indicates if the packet has been transmitted. This flag is not cleared by
+ * the system, thus it indicates the first transmission only.
+ *
+ * - acked
+ * Indicates if the packet has been acknowledged by the client. There are no
+ * other guarantees given. For example, the packet may still be canceled
+ * and/or the completion may be triggered an error even though this bit is
+ * set. Rely on the status provided by completion instead.
+ *
+ * - canceled
+ * Indicates if the packet has been canceled from the outside. There are no
+ * other guarantees given. Specifically, the packet may be completed by
+ * another part of the system before the cancellation attempts to complete it.
+ *
+ * >> General Notes <<
+ *
+ * To avoid deadlocks, if both queue and pending locks are required, the pending
+ * lock must be acquired before the queue lock.
+ */
+
+/*
+ * Maximum number transmission attempts per sequenced packet in case of
+ * time-outs. Must be smaller than 16.
+ */
+#define SSH_PTL_MAX_PACKET_TRIES 3
+
+/*
+ * Timeout as ktime_t delta for ACKs. If we have not received an ACK in this
+ * time-frame after starting transmission, the packet will be re-submitted.
+ */
+#define SSH_PTL_PACKET_TIMEOUT ms_to_ktime(1000)
+
+/*
+ * Maximum time resolution for timeouts. Should be larger than one jiffy to
+ * avoid direct re-scheduling of reaper work_struct.
+ */
+#define SSH_PTL_PACKET_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
+
+/*
+ * Maximum number of sequenced packets concurrently waiting for an ACK.
+ * Packets marked as blocking will not be transmitted while this limit is
+ * reached.
+ */
+#define SSH_PTL_MAX_PENDING 1
+
+#define SSH_PTL_RX_BUF_LEN 4096
+
+#define SSH_PTL_RX_FIFO_LEN 4096
+
+
+#ifdef CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION
+
+/**
+ * ssh_ptl_should_drop_ack_packet() - Error injection hook to drop ACK packets.
+ *
+ * Useful to test detection and handling of automated re-transmits by the EC.
+ * Specifically of packets that the EC consideres not-ACKed but the driver
+ * already consideres ACKed (due to dropped ACK). In this case, the EC
+ * re-transmits the packet-to-be-ACKed and the driver should detect it as
+ * duplicate/already handled. Note that the driver should still send an ACK
+ * for the re-transmitted packet.
+ */
+static noinline bool ssh_ptl_should_drop_ack_packet(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_ack_packet, TRUE);
+
+/**
+ * ssh_ptl_should_drop_nak_packet() - Error injection hook to drop NAK packets.
+ *
+ * Useful to test/force automated (timeout-based) re-transmit by the EC.
+ * Specifically, packets that have not reached the driver completely/with valid
+ * checksums. Only useful in combination with receival of (injected) bad data.
+ */
+static noinline bool ssh_ptl_should_drop_nak_packet(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_nak_packet, TRUE);
+
+/**
+ * ssh_ptl_should_drop_dsq_packet() - Error injection hook to drop sequenced
+ * data packet.
+ *
+ * Useful to test re-transmit timeout of the driver. If the data packet has not
+ * been ACKed after a certain time, the driver should re-transmit the packet up
+ * to limited number of times defined in SSH_PTL_MAX_PACKET_TRIES.
+ */
+static noinline bool ssh_ptl_should_drop_dsq_packet(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_dsq_packet, TRUE);
+
+/**
+ * ssh_ptl_should_fail_write() - Error injection hook to make
+ * serdev_device_write() fail.
+ *
+ * Hook to simulate errors in serdev_device_write when transmitting packets.
+ */
+static noinline int ssh_ptl_should_fail_write(void)
+{
+ return 0;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_fail_write, ERRNO);
+
+/**
+ * ssh_ptl_should_corrupt_tx_data() - Error injection hook to simualte invalid
+ * data being sent to the EC.
+ *
+ * Hook to simulate corrupt/invalid data being sent from host (driver) to EC.
+ * Causes the packet data to be actively corrupted by overwriting it with
+ * pre-defined values, such that it becomes invalid, causing the EC to respond
+ * with a NAK packet. Useful to test handling of NAK packets received by the
+ * driver.
+ */
+static noinline bool ssh_ptl_should_corrupt_tx_data(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_tx_data, TRUE);
+
+/**
+ * ssh_ptl_should_corrupt_rx_syn() - Error injection hook to simulate invalid
+ * data being sent by the EC.
+ *
+ * Hook to simulate invalid SYN bytes, i.e. an invalid start of messages and
+ * test handling thereof in the driver.
+ */
+static noinline bool ssh_ptl_should_corrupt_rx_syn(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_syn, TRUE);
+
+/**
+ * ssh_ptl_should_corrupt_rx_data() - Error injection hook to simulate invalid
+ * data being sent by the EC.
+ *
+ * Hook to simulate invalid data/checksum of the message frame and test handling
+ * thereof in the driver.
+ */
+static noinline bool ssh_ptl_should_corrupt_rx_data(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_data, TRUE);
+
+
+static inline bool __ssh_ptl_should_drop_ack_packet(struct ssh_packet *packet)
+{
+ if (likely(!ssh_ptl_should_drop_ack_packet()))
+ return false;
+
+ trace_ssam_ei_tx_drop_ack_packet(packet);
+ ptl_info(packet->ptl, "packet error injection: dropping ACK packet %p\n",
+ packet);
+
+ return true;
+}
+
+static inline bool __ssh_ptl_should_drop_nak_packet(struct ssh_packet *packet)
+{
+ if (likely(!ssh_ptl_should_drop_nak_packet()))
+ return false;
+
+ trace_ssam_ei_tx_drop_nak_packet(packet);
+ ptl_info(packet->ptl, "packet error injection: dropping NAK packet %p\n",
+ packet);
+
+ return true;
+}
+
+static inline bool __ssh_ptl_should_drop_dsq_packet(struct ssh_packet *packet)
+{
+ if (likely(!ssh_ptl_should_drop_dsq_packet()))
+ return false;
+
+ trace_ssam_ei_tx_drop_dsq_packet(packet);
+ ptl_info(packet->ptl,
+ "packet error injection: dropping sequenced data packet %p\n",
+ packet);
+
+ return true;
+}
+
+static bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
+{
+ // ignore packets that don't carry any data (i.e. flush)
+ if (!packet->data.ptr || !packet->data.len)
+ return false;
+
+ switch (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)]) {
+ case SSH_FRAME_TYPE_ACK:
+ return __ssh_ptl_should_drop_ack_packet(packet);
+
+ case SSH_FRAME_TYPE_NAK:
+ return __ssh_ptl_should_drop_nak_packet(packet);
+
+ case SSH_FRAME_TYPE_DATA_SEQ:
+ return __ssh_ptl_should_drop_dsq_packet(packet);
+
+ default:
+ return false;
+ }
+}
+
+static int ssh_ptl_write_buf(struct ssh_ptl *ptl, struct ssh_packet *packet,
+ const unsigned char *buf, size_t count)
+{
+ int status;
+
+ status = ssh_ptl_should_fail_write();
+ if (unlikely(status)) {
+ trace_ssam_ei_tx_fail_write(packet, status);
+ ptl_info(packet->ptl,
+ "packet error injection: simulating transmit error %d, packet %p\n",
+ status, packet);
+
+ return status;
+ }
+
+ return serdev_device_write_buf(ptl->serdev, buf, count);
+}
+
+static void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
+{
+ // ignore packets that don't carry any data (i.e. flush)
+ if (!packet->data.ptr || !packet->data.len)
+ return;
+
+ // only allow sequenced data packets to be modified
+ if (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)] != SSH_FRAME_TYPE_DATA_SEQ)
+ return;
+
+ if (likely(!ssh_ptl_should_corrupt_tx_data()))
+ return;
+
+ trace_ssam_ei_tx_corrupt_data(packet);
+ ptl_info(packet->ptl,
+ "packet error injection: simulating invalid transmit data on packet %p\n",
+ packet);
+
+ /*
+ * NB: The value 0xb3 has been chosen more or less randomly so that it
+ * doesn't have any (major) overlap with the SYN bytes (aa 55) and is
+ * non-trivial (i.e. non-zero, non-0xff).
+ */
+ memset(packet->data.ptr, 0xb3, packet->data.len);
+}
+
+static void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
+ struct ssam_span *data)
+{
+ struct ssam_span frame;
+
+ // check if there actually is something to corrupt
+ if (!sshp_find_syn(data, &frame))
+ return;
+
+ if (likely(!ssh_ptl_should_corrupt_rx_syn()))
+ return;
+
+ trace_ssam_ei_rx_corrupt_syn("data_length", data->len);
+
+ data->ptr[1] = 0xb3; // set second byte of SYN to "random" value
+}
+
+static void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
+ struct ssam_span *frame)
+{
+ size_t payload_len, message_len;
+ struct ssh_frame *sshf;
+
+ // ignore incomplete messages, will get handled once it's complete
+ if (frame->len < SSH_MESSAGE_LENGTH(0))
+ return;
+
+ // ignore incomplete messages, part 2
+ payload_len = get_unaligned_le16(&frame->ptr[SSH_MSGOFFSET_FRAME(len)]);
+ message_len = SSH_MESSAGE_LENGTH(payload_len);
+ if (frame->len < message_len)
+ return;
+
+ if (likely(!ssh_ptl_should_corrupt_rx_data()))
+ return;
+
+ sshf = (struct ssh_frame *)&frame->ptr[SSH_MSGOFFSET_FRAME(type)];
+ trace_ssam_ei_rx_corrupt_data(sshf);
+
+ /*
+ * Flip bits in first byte of payload checksum. This is basically
+ * equivalent to a payload/frame data error without us having to worry
+ * about (the, arguably pretty small, probability of) accidental
+ * checksum collisions.
+ */
+ frame->ptr[frame->len - 2] = ~frame->ptr[frame->len - 2];
+}
+
+#else /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */
+
+static inline bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
+{
+ return false;
+}
+
+static inline int ssh_ptl_write_buf(struct ssh_ptl *ptl,
+ struct ssh_packet *packet,
+ const unsigned char *buf,
+ size_t count)
+{
+ return serdev_device_write_buf(ptl->serdev, buf, count);
+}
+
+static inline void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
+{
+}
+
+static inline void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
+ struct ssam_span *data)
+{
+}
+
+static inline void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
+ struct ssam_span *frame)
+{
+}
+
+#endif /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */
+
+
+static void __ssh_ptl_packet_release(struct kref *kref)
+{
+ struct ssh_packet *p = to_ssh_packet(kref, refcnt);
+
+ trace_ssam_packet_release(p);
+
+ ptl_dbg_cond(p->ptl, "ptl: releasing packet %p\n", p);
+ p->ops->release(p);
+}
+
+void ssh_packet_put(struct ssh_packet *packet)
+{
+ kref_put(&packet->refcnt, __ssh_ptl_packet_release);
+}
+EXPORT_SYMBOL_GPL(ssh_packet_put);
+
+static inline u8 ssh_packet_get_seq(struct ssh_packet *packet)
+{
+ return packet->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
+}
+
+
+void ssh_packet_init(struct ssh_packet *packet,
+ const struct ssh_packet_args *args)
+{
+ kref_init(&packet->refcnt);
+
+ packet->ptl = NULL;
+ INIT_LIST_HEAD(&packet->queue_node);
+ INIT_LIST_HEAD(&packet->pending_node);
+
+ packet->state = args->type & SSH_PACKET_FLAGS_TY_MASK;
+ packet->priority = args->priority;
+ packet->timestamp = KTIME_MAX;
+
+ packet->data.ptr = NULL;
+ packet->data.len = 0;
+
+ packet->ops = args->ops;
+}
+
+
+static struct kmem_cache *ssh_ctrl_packet_cache;
+
+int ssh_ctrl_packet_cache_init(void)
+{
+ const unsigned int size = sizeof(struct ssh_packet) + SSH_MSG_LEN_CTRL;
+ const unsigned int align = __alignof__(struct ssh_packet);
+ struct kmem_cache *cache;
+
+ cache = kmem_cache_create("ssam_ctrl_packet", size, align, 0, NULL);
+ if (!cache)
+ return -ENOMEM;
+
+ ssh_ctrl_packet_cache = cache;
+ return 0;
+}
+
+void ssh_ctrl_packet_cache_destroy(void)
+{
+ kmem_cache_destroy(ssh_ctrl_packet_cache);
+ ssh_ctrl_packet_cache = NULL;
+}
+
+static int ssh_ctrl_packet_alloc(struct ssh_packet **packet,
+ struct ssam_span *buffer, gfp_t flags)
+{
+ *packet = kmem_cache_alloc(ssh_ctrl_packet_cache, flags);
+ if (!*packet)
+ return -ENOMEM;
+
+ buffer->ptr = (u8 *)(*packet + 1);
+ buffer->len = SSH_MSG_LEN_CTRL;
+
+ trace_ssam_ctrl_packet_alloc(*packet, buffer->len);
+ return 0;
+}
+
+static void ssh_ctrl_packet_free(struct ssh_packet *p)
+{
+ trace_ssam_ctrl_packet_free(p);
+ kmem_cache_free(ssh_ctrl_packet_cache, p);
+}
+
+static const struct ssh_packet_ops ssh_ptl_ctrl_packet_ops = {
+ .complete = NULL,
+ .release = ssh_ctrl_packet_free,
+};
+
+
+static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl *ptl, ktime_t now,
+ ktime_t expires)
+{
+ unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
+ ktime_t aexp = ktime_add(expires, SSH_PTL_PACKET_TIMEOUT_RESOLUTION);
+ ktime_t old;
+
+ // re-adjust / schedule reaper if it is above resolution delta
+ old = READ_ONCE(ptl->rtx_timeout.expires);
+ while (ktime_before(aexp, old))
+ old = cmpxchg64(&ptl->rtx_timeout.expires, old, expires);
+
+ // if we updated the reaper expiration, modify work timeout
+ if (old == expires)
+ mod_delayed_work(system_wq, &ptl->rtx_timeout.reaper, delta);
+}
+
+static void ssh_ptl_timeout_start(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+ ktime_t timestamp = ktime_get_coarse_boottime();
+ ktime_t timeout = ptl->rtx_timeout.timeout;
+
+ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state))
+ return;
+
+ WRITE_ONCE(packet->timestamp, timestamp);
+ /*
+ * Ensure timestamp is set before starting the reaper. Paired with
+ * implicit barrier following check on ssh_packet_get_expiration in
+ * ssh_ptl_timeout_reap.
+ */
+ smp_mb__after_atomic();
+
+ ssh_ptl_timeout_reaper_mod(packet->ptl, timestamp, timestamp + timeout);
+}
+
+
+static struct list_head *__ssh_ptl_queue_find_entrypoint(struct ssh_packet *p)
+{
+ struct list_head *head;
+ u8 priority = READ_ONCE(p->priority);
+
+ /*
+ * We generally assume that there are less control (ACK/NAK) packets and
+ * re-submitted data packets as there are normal data packets (at least
+ * in situations in which many packets are queued; if there aren't many
+ * packets queued the decision on how to iterate should be basically
+ * irrellevant; the number of control/data packets is more or less
+ * limited via the maximum number of pending packets). Thus, when
+ * inserting a control or re-submitted data packet, (determined by their
+ * priority), we search from front to back. Normal data packets are,
+ * usually queued directly at the tail of the queue, so for those search
+ * from back to front.
+ */
+
+ if (priority > SSH_PACKET_PRIORITY_DATA) {
+ list_for_each(head, &p->ptl->queue.head) {
+ p = list_entry(head, struct ssh_packet, queue_node);
+
+ if (READ_ONCE(p->priority) < priority)
+ break;
+ }
+ } else {
+ list_for_each_prev(head, &p->ptl->queue.head) {
+ p = list_entry(head, struct ssh_packet, queue_node);
+
+ if (READ_ONCE(p->priority) >= priority) {
+ head = head->next;
+ break;
+ }
+ }
+ }
+
+
+ return head;
+}
+
+static int ssh_ptl_queue_push(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+ struct list_head *head;
+
+ spin_lock(&ptl->queue.lock);
+
+ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state)) {
+ spin_unlock(&ptl->queue.lock);
+ return -ESHUTDOWN;
+ }
+
+ // avoid further transitions when cancelling/completing
+ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)) {
+ spin_unlock(&ptl->queue.lock);
+ return -EINVAL;
+ }
+
+ // if this packet has already been queued, do not add it
+ if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) {
+ spin_unlock(&ptl->queue.lock);
+ return -EALREADY;
+ }
+
+ head = __ssh_ptl_queue_find_entrypoint(packet);
+
+ list_add_tail(&ssh_packet_get(packet)->queue_node, &ptl->queue.head);
+
+ spin_unlock(&ptl->queue.lock);
+ return 0;
+}
+
+static void ssh_ptl_queue_remove(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+ bool remove;
+
+ spin_lock(&ptl->queue.lock);
+
+ remove = test_and_clear_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state);
+ if (remove)
+ list_del(&packet->queue_node);
+
+ spin_unlock(&ptl->queue.lock);
+
+ if (remove)
+ ssh_packet_put(packet);
+}
+
+
+static void ssh_ptl_pending_push(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+
+ spin_lock(&ptl->pending.lock);
+
+ // if we are cancelling/completing this packet, do not add it
+ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)) {
+ spin_unlock(&ptl->pending.lock);
+ return;
+ }
+
+ // in case it is already pending (e.g. re-submission), do not add it
+ if (test_and_set_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) {
+ spin_unlock(&ptl->pending.lock);
+ return;
+ }
+
+ atomic_inc(&ptl->pending.count);
+ list_add_tail(&ssh_packet_get(packet)->pending_node, &ptl->pending.head);
+
+ spin_unlock(&ptl->pending.lock);
+}
+
+static void ssh_ptl_pending_remove(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+ bool remove;
+
+ spin_lock(&ptl->pending.lock);
+
+ remove = test_and_clear_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state);
+ if (remove) {
+ list_del(&packet->pending_node);
+ atomic_dec(&ptl->pending.count);
+ }
+
+ spin_unlock(&ptl->pending.lock);
+
+ if (remove)
+ ssh_packet_put(packet);
+}
+
+
+static void __ssh_ptl_complete(struct ssh_packet *p, int status)
+{
+ struct ssh_ptl *ptl = READ_ONCE(p->ptl);
+
+ trace_ssam_packet_complete(p, status);
+
+ ptl_dbg_cond(ptl, "ptl: completing packet %p\n", p);
+ if (status && status != -ECANCELED)
+ ptl_dbg_cond(ptl, "ptl: packet error: %d\n", status);
+
+ if (p->ops->complete)
+ p->ops->complete(p, status);
+}
+
+static void ssh_ptl_remove_and_complete(struct ssh_packet *p, int status)
+{
+ /*
+ * A call to this function should in general be preceeded by
+ * set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->flags) to avoid re-adding the
+ * packet to the structures it's going to be removed from.
+ *
+ * The set_bit call does not need explicit memory barriers as the
+ * implicit barrier of the test_and_set_bit call below ensure that the
+ * flag is visible before we actually attempt to remove the packet.
+ */
+
+ if (test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
+ return;
+
+ ssh_ptl_queue_remove(p);
+ ssh_ptl_pending_remove(p);
+
+ __ssh_ptl_complete(p, status);
+}
+
+
+static bool ssh_ptl_tx_can_process(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+
+ if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &packet->state))
+ return !atomic_read(&ptl->pending.count);
+
+ // we can alwas process non-blocking packets
+ if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &packet->state))
+ return true;
+
+ // if we are already waiting for this packet, send it again
+ if (test_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state))
+ return true;
+
+ // otherwise: check if we have the capacity to send
+ return atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING;
+}
+
+static struct ssh_packet *ssh_ptl_tx_pop(struct ssh_ptl *ptl)
+{
+ struct ssh_packet *packet = ERR_PTR(-ENOENT);
+ struct ssh_packet *p, *n;
+
+ spin_lock(&ptl->queue.lock);
+ list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
+ /*
+ * If we are cancelling or completing this packet, ignore it.
+ * It's going to be removed from this queue shortly.
+ */
+ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
+ continue;
+
+ /*
+ * Packets should be ordered non-blocking/to-be-resent first.
+ * If we cannot process this packet, assume that we can't
+ * process any following packet either and abort.
+ */
+ if (!ssh_ptl_tx_can_process(p)) {
+ packet = ERR_PTR(-EBUSY);
+ break;
+ }
+
+ /*
+ * We are allowed to change the state now. Remove it from the
+ * queue and mark it as being transmitted. Note that we cannot
+ * add it to the set of pending packets yet, as queue locks must
+ * always be acquired before packet locks (otherwise we might
+ * run into a deadlock).
+ */
+
+ list_del(&p->queue_node);
+
+ set_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state);
+ // ensure that state never gets zero
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
+
+ packet = p;
+ break;
+ }
+ spin_unlock(&ptl->queue.lock);
+
+ return packet;
+}
+
+static struct ssh_packet *ssh_ptl_tx_next(struct ssh_ptl *ptl)
+{
+ struct ssh_packet *p;
+
+ p = ssh_ptl_tx_pop(ptl);
+ if (IS_ERR(p))
+ return p;
+
+ if (test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state)) {
+ ptl_dbg(ptl, "ptl: transmitting sequenced packet %p\n", p);
+ ssh_ptl_pending_push(p);
+ ssh_ptl_timeout_start(p);
+ } else {
+ ptl_dbg(ptl, "ptl: transmitting non-sequenced packet %p\n", p);
+ }
+
+ /*
+ * Update number of tries. This directly influences the priority in case
+ * the packet is re-submitted (e.g. via timeout/NAK). Note that this is
+ * the only place where we update the priority in-flight. As this runs
+ * only on the tx-thread, this read-modify-write procedure is safe.
+ */
+ WRITE_ONCE(p->priority, READ_ONCE(p->priority) + 1);
+
+ return p;
+}
+
+static void ssh_ptl_tx_compl_success(struct ssh_packet *packet)
+{
+ struct ssh_ptl *ptl = packet->ptl;
+
+ ptl_dbg(ptl, "ptl: successfully transmitted packet %p\n", packet);
+
+ // transition state to "transmitted"
+ set_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state);
+ // ensure that state never gets zero
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
+
+ // if the packet is unsequenced, we're done: lock and complete
+ if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &packet->state)) {
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
+ ssh_ptl_remove_and_complete(packet, 0);
+ }
+
+ /*
+ * Notify that a packet transmission has finished. In general we're only
+ * waiting for one packet (if any), so wake_up_all should be fine.
+ */
+ wake_up_all(&ptl->tx.packet_wq);
+}
+
+static void ssh_ptl_tx_compl_error(struct ssh_packet *packet, int status)
+{
+ // transmission failure: lock the packet and try to complete it
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
+ // ensure that state never gets zero
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
+
+ ptl_err(packet->ptl, "ptl: transmission error: %d\n", status);
+ ptl_dbg(packet->ptl, "ptl: failed to transmit packet: %p\n", packet);
+
+ ssh_ptl_remove_and_complete(packet, status);
+
+ /*
+ * Notify that a packet transmission has finished. In general we're only
+ * waiting for one packet (if any), so wake_up_all should be fine.
+ */
+ wake_up_all(&packet->ptl->tx.packet_wq);
+}
+
+static void ssh_ptl_tx_threadfn_wait(struct ssh_ptl *ptl)
+{
+ wait_event_interruptible(ptl->tx.thread_wq,
+ READ_ONCE(ptl->tx.thread_signal) || kthread_should_stop());
+ WRITE_ONCE(ptl->tx.thread_signal, false);
+}
+
+static int ssh_ptl_tx_threadfn(void *data)
+{
+ struct ssh_ptl *ptl = data;
+
+ while (!kthread_should_stop()) {
+ unsigned char *buf;
+ bool drop = false;
+ size_t len = 0;
+ int status = 0;
+
+ // if we don't have a packet, get the next and add it to pending
+ if (IS_ERR_OR_NULL(ptl->tx.packet)) {
+ ptl->tx.packet = ssh_ptl_tx_next(ptl);
+ ptl->tx.offset = 0;
+
+ // if no packet is available, we are done
+ if (IS_ERR(ptl->tx.packet)) {
+ ssh_ptl_tx_threadfn_wait(ptl);
+ continue;
+ }
+ }
+
+ // error injection: drop packet to simulate transmission problem
+ if (ptl->tx.offset == 0)
+ drop = ssh_ptl_should_drop_packet(ptl->tx.packet);
+
+ // error injection: simulate invalid packet data
+ if (ptl->tx.offset == 0 && !drop)
+ ssh_ptl_tx_inject_invalid_data(ptl->tx.packet);
+
+ // flush-packets don't have any data
+ if (likely(ptl->tx.packet->data.ptr && !drop)) {
+ buf = ptl->tx.packet->data.ptr + ptl->tx.offset;
+ len = ptl->tx.packet->data.len - ptl->tx.offset;
+
+ ptl_dbg(ptl, "tx: sending data (length: %zu)\n", len);
+ print_hex_dump_debug("tx: ", DUMP_PREFIX_OFFSET, 16, 1,
+ buf, len, false);
+
+ status = ssh_ptl_write_buf(ptl, ptl->tx.packet, buf, len);
+ }
+
+ if (status < 0) {
+ // complete packet with error
+ ssh_ptl_tx_compl_error(ptl->tx.packet, status);
+ ssh_packet_put(ptl->tx.packet);
+ ptl->tx.packet = NULL;
+
+ } else if (status == len) {
+ // complete packet and/or mark as transmitted
+ ssh_ptl_tx_compl_success(ptl->tx.packet);
+ ssh_packet_put(ptl->tx.packet);
+ ptl->tx.packet = NULL;
+
+ } else { // need more buffer space
+ ptl->tx.offset += status;
+ ssh_ptl_tx_threadfn_wait(ptl);
+ }
+ }
+
+ // cancel active packet before we actually stop
+ if (!IS_ERR_OR_NULL(ptl->tx.packet)) {
+ ssh_ptl_tx_compl_error(ptl->tx.packet, -ESHUTDOWN);
+ ssh_packet_put(ptl->tx.packet);
+ ptl->tx.packet = NULL;
+ }
+
+ return 0;
+}
+
+void ssh_ptl_tx_wakeup(struct ssh_ptl *ptl, bool force)
+{
+ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
+ return;
+
+ if (force || atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING) {
+ WRITE_ONCE(ptl->tx.thread_signal, true);
+ /*
+ * Ensure that the signal is set before we wake the transmitter
+ * thread to prevent lost updates: If the signal is not set,
+ * when the thread checks it in ssh_ptl_tx_threadfn_wait, it
+ * may go back to sleep.
+ */
+ smp_mb__after_atomic();
+ wake_up(&ptl->tx.thread_wq);
+ }
+}
+
+int ssh_ptl_tx_start(struct ssh_ptl *ptl)
+{
+ ptl->tx.thread = kthread_run(ssh_ptl_tx_threadfn, ptl, "surface-sh-tx");
+ if (IS_ERR(ptl->tx.thread))
+ return PTR_ERR(ptl->tx.thread);
+
+ return 0;
+}
+
+static int ssh_ptl_tx_stop(struct ssh_ptl *ptl)
+{
+ int status = 0;
+
+ if (ptl->tx.thread) {
+ status = kthread_stop(ptl->tx.thread);
+ ptl->tx.thread = NULL;
+ }
+
+ return status;
+}
+
+
+static struct ssh_packet *ssh_ptl_ack_pop(struct ssh_ptl *ptl, u8 seq_id)
+{
+ struct ssh_packet *packet = ERR_PTR(-ENOENT);
+ struct ssh_packet *p, *n;
+
+ spin_lock(&ptl->pending.lock);
+ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
+ /*
+ * We generally expect packets to be in order, so first packet
+ * to be added to pending is first to be sent, is first to be
+ * ACKed.
+ */
+ if (unlikely(ssh_packet_get_seq(p) != seq_id))
+ continue;
+
+ /*
+ * In case we receive an ACK while handling a transmission
+ * error completion. The packet will be removed shortly.
+ */
+ if (unlikely(test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
+ packet = ERR_PTR(-EPERM);
+ break;
+ }
+
+ /*
+ * Mark the packet as ACKed and remove it from pending by
+ * removing its node and decrementing the pending counter.
+ */
+ set_bit(SSH_PACKET_SF_ACKED_BIT, &p->state);
+ // ensure that state never gets zero
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
+
+ atomic_dec(&ptl->pending.count);
+ list_del(&p->pending_node);
+ packet = p;
+
+ break;
+ }
+ spin_unlock(&ptl->pending.lock);
+
+ return packet;
+}
+
+static void ssh_ptl_wait_until_transmitted(struct ssh_packet *packet)
+{
+ wait_event(packet->ptl->tx.packet_wq,
+ test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state)
+ || test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state));
+}
+
+static void ssh_ptl_acknowledge(struct ssh_ptl *ptl, u8 seq)
+{
+ struct ssh_packet *p;
+ int status = 0;
+
+ p = ssh_ptl_ack_pop(ptl, seq);
+ if (IS_ERR(p)) {
+ if (PTR_ERR(p) == -ENOENT) {
+ /*
+ * The packet has not been found in the set of pending
+ * packets.
+ */
+ ptl_warn(ptl, "ptl: received ACK for non-pending packet\n");
+ } else {
+ /*
+ * The packet is pending, but we are not allowed to take
+ * it because it has been locked.
+ */
+ }
+ return;
+ }
+
+ ptl_dbg(ptl, "ptl: received ACK for packet %p\n", p);
+
+ /*
+ * It is possible that the packet has been transmitted, but the state
+ * has not been updated from "transmitting" to "transmitted" yet.
+ * In that case, we need to wait for this transition to occur in order
+ * to determine between success or failure.
+ */
+ if (test_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state))
+ ssh_ptl_wait_until_transmitted(p);
+
+ /*
+ * The packet will already be locked in case of a transmission error or
+ * cancellation. Let the transmitter or cancellation issuer complete the
+ * packet.
+ */
+ if (unlikely(test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
+ ssh_packet_put(p);
+ return;
+ }
+
+ if (unlikely(!test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &p->state))) {
+ ptl_err(ptl, "ptl: received ACK before packet had been fully transmitted\n");
+ status = -EREMOTEIO;
+ }
+
+ ssh_ptl_remove_and_complete(p, status);
+ ssh_packet_put(p);
+
+ ssh_ptl_tx_wakeup(ptl, false);
+}
+
+
+int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p)
+{
+ struct ssh_ptl *ptl_old;
+ int status;
+
+ trace_ssam_packet_submit(p);
+
+ // validate packet fields
+ if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &p->state)) {
+ if (p->data.ptr || test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state))
+ return -EINVAL;
+ } else if (!p->data.ptr) {
+ return -EINVAL;
+ }
+
+ /*
+ * The ptl reference only gets set on or before the first submission.
+ * After the first submission, it has to be read-only.
+ */
+ ptl_old = READ_ONCE(p->ptl);
+ if (ptl_old == NULL)
+ WRITE_ONCE(p->ptl, ptl);
+ else if (ptl_old != ptl)
+ return -EALREADY;
+
+ status = ssh_ptl_queue_push(p);
+ if (status)
+ return status;
+
+ ssh_ptl_tx_wakeup(ptl, !test_bit(SSH_PACKET_TY_BLOCKING_BIT, &p->state));
+ return 0;
+}
+
+/*
+ * This function must be called with pending lock held.
+ */
+static void __ssh_ptl_resubmit(struct ssh_packet *packet)
+{
+ struct list_head *head;
+
+ trace_ssam_packet_resubmit(packet);
+
+ spin_lock(&packet->ptl->queue.lock);
+
+ // if this packet has already been queued, do not add it
+ if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) {
+ spin_unlock(&packet->ptl->queue.lock);
+ return;
+ }
+
+ // find first node with lower priority
+ head = __ssh_ptl_queue_find_entrypoint(packet);
+
+ /*
+ * Reset the timestamp. This must be called and executed before the
+ * pending lock is released. The lock release should be a sufficient
+ * barrier for this operation, thus there is no need to manually add
+ * one here.
+ */
+ WRITE_ONCE(packet->timestamp, KTIME_MAX);
+
+ // add packet
+ list_add_tail(&ssh_packet_get(packet)->queue_node, head);
+
+ spin_unlock(&packet->ptl->queue.lock);
+}
+
+static void ssh_ptl_resubmit_pending(struct ssh_ptl *ptl)
+{
+ struct ssh_packet *p;
+ bool resub = false;
+ u8 try;
+
+ /*
+ * Note: We deliberately do not remove/attempt to cancel and complete
+ * packets that are out of tires in this function. The packet will be
+ * eventually canceled and completed by the timeout. Removing the packet
+ * here could lead to overly eager cancelation if the packet has not
+ * been re-transmitted yet but the tries-counter already updated (i.e
+ * ssh_ptl_tx_next removed the packet from the queue and updated the
+ * counter, but re-transmission for the last try has not actually
+ * started yet).
+ */
+
+ spin_lock(&ptl->pending.lock);
+
+ // re-queue all pending packets
+ list_for_each_entry(p, &ptl->pending.head, pending_node) {
+ // avoid further transitions if locked
+ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
+ continue;
+
+ // do not re-schedule if packet is out of tries
+ try = ssh_packet_priority_get_try(READ_ONCE(p->priority));
+ if (try >= SSH_PTL_MAX_PACKET_TRIES)
+ continue;
+
+ resub = true;
+ __ssh_ptl_resubmit(p);
+ }
+
+ spin_unlock(&ptl->pending.lock);
+
+ ssh_ptl_tx_wakeup(ptl, resub);
+}
+
+void ssh_ptl_cancel(struct ssh_packet *p)
+{
+ if (test_and_set_bit(SSH_PACKET_SF_CANCELED_BIT, &p->state))
+ return;
+
+ trace_ssam_packet_cancel(p);
+
+ /*
+ * Lock packet and commit with memory barrier. If this packet has
+ * already been locked, it's going to be removed and completed by
+ * another party, which should have precedence.
+ */
+ if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
+ return;
+
+ /*
+ * By marking the packet as locked and employing the implicit memory
+ * barrier of test_and_set_bit, we have guaranteed that, at this point,
+ * the packet cannot be added to the queue any more.
+ *
+ * In case the packet has never been submitted, packet->ptl is NULL. If
+ * the packet is currently being submitted, packet->ptl may be NULL or
+ * non-NULL. Due marking the packet as locked above and committing with
+ * the memory barrier, we have guaranteed that, if packet->ptl is NULL,
+ * the packet will never be added to the queue. If packet->ptl is
+ * non-NULL, we don't have any guarantees.
+ */
+
+ if (READ_ONCE(p->ptl)) {
+ ssh_ptl_remove_and_complete(p, -ECANCELED);
+ ssh_ptl_tx_wakeup(p->ptl, false);
+ } else if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
+ __ssh_ptl_complete(p, -ECANCELED);
+ }
+}
+
+
+static ktime_t ssh_packet_get_expiration(struct ssh_packet *p, ktime_t timeout)
+{
+ ktime_t timestamp = READ_ONCE(p->timestamp);
+
+ if (timestamp != KTIME_MAX)
+ return ktime_add(timestamp, timeout);
+ else
+ return KTIME_MAX;
+}
+
+static void ssh_ptl_timeout_reap(struct work_struct *work)
+{
+ struct ssh_ptl *ptl = to_ssh_ptl(work, rtx_timeout.reaper.work);
+ struct ssh_packet *p, *n;
+ LIST_HEAD(claimed);
+ ktime_t now = ktime_get_coarse_boottime();
+ ktime_t timeout = ptl->rtx_timeout.timeout;
+ ktime_t next = KTIME_MAX;
+ bool resub = false;
+
+ trace_ssam_ptl_timeout_reap("pending", atomic_read(&ptl->pending.count));
+
+ /*
+ * Mark reaper as "not pending". This is done before checking any
+ * packets to avoid lost-update type problems.
+ */
+ WRITE_ONCE(ptl->rtx_timeout.expires, KTIME_MAX);
+ /*
+ * Ensure that the reaper is marked as deactivated before we continue
+ * checking packets to prevent lost-update problems when a packet is
+ * added to the pending set and ssh_ptl_timeout_reaper_mod is called
+ * during execution of the part below.
+ */
+ smp_mb__after_atomic();
+
+ spin_lock(&ptl->pending.lock);
+
+ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
+ ktime_t expires = ssh_packet_get_expiration(p, timeout);
+ u8 try;
+
+ /*
+ * Check if the timeout hasn't expired yet. Find out next
+ * expiration date to be handled after this run.
+ */
+ if (ktime_after(expires, now)) {
+ next = ktime_before(expires, next) ? expires : next;
+ continue;
+ }
+
+ // avoid further transitions if locked
+ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
+ continue;
+
+ trace_ssam_packet_timeout(p);
+
+ // check if we still have some tries left
+ try = ssh_packet_priority_get_try(READ_ONCE(p->priority));
+ if (likely(try < SSH_PTL_MAX_PACKET_TRIES)) {
+ resub = true;
+ __ssh_ptl_resubmit(p);
+ continue;
+ }
+
+ // no more tries left: cancel the packet
+
+ // if someone else has locked the packet already, don't use it
+ if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
+ continue;
+
+ /*
+ * We have now marked the packet as locked. Thus it cannot be
+ * added to the pending list again after we've removed it here.
+ * We can therefore re-use the pending_node of this packet
+ * temporarily.
+ */
+
+ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
+
+ atomic_dec(&ptl->pending.count);
+ list_del(&p->pending_node);
+
+ list_add_tail(&p->pending_node, &claimed);
+ }
+
+ spin_unlock(&ptl->pending.lock);
+
+ // cancel and complete the packet
+ list_for_each_entry_safe(p, n, &claimed, pending_node) {
+ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
+ ssh_ptl_queue_remove(p);
+ __ssh_ptl_complete(p, -ETIMEDOUT);
+ }
+
+ // drop the reference we've obtained by removing it from pending
+ list_del(&p->pending_node);
+ ssh_packet_put(p);
+ }
+
+ // ensure that reaper doesn't run again immediately
+ next = max(next, ktime_add(now, SSH_PTL_PACKET_TIMEOUT_RESOLUTION));
+ if (next != KTIME_MAX)
+ ssh_ptl_timeout_reaper_mod(ptl, now, next);
+
+ // force-wakeup to properly handle re-transmits if we've re-submitted
+ ssh_ptl_tx_wakeup(ptl, resub);
+}
+
+
+static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, u8 seq)
+{
+ int i;
+
+ // check if SEQ has been seen recently (i.e. packet was re-transmitted)
+ for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) {
+ if (likely(ptl->rx.blocked.seqs[i] != seq))
+ continue;
+
+ ptl_dbg(ptl, "ptl: ignoring repeated data packet\n");
+ return true;
+ }
+
+ // update list of blocked seuence IDs
+ ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = seq;
+ ptl->rx.blocked.offset = (ptl->rx.blocked.offset + 1)
+ % ARRAY_SIZE(ptl->rx.blocked.seqs);
+
+ return false;
+}
+
+static void ssh_ptl_rx_dataframe(struct ssh_ptl *ptl,
+ const struct ssh_frame *frame,
+ const struct ssam_span *payload)
+{
+ if (ssh_ptl_rx_retransmit_check(ptl, frame->seq))
+ return;
+
+ ptl->ops.data_received(ptl, payload);
+}
+
+static void ssh_ptl_send_ack(struct ssh_ptl *ptl, u8 seq)
+{
+ struct ssh_packet_args args;
+ struct ssh_packet *packet;
+ struct ssam_span buf;
+ struct msgbuf msgb;
+ int status;
+
+ status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
+ if (status) {
+ ptl_err(ptl, "ptl: failed to allocate ACK packet\n");
+ return;
+ }
+
+ args.type = 0;
+ args.priority = SSH_PACKET_PRIORITY(ACK, 0);
+ args.ops = &ssh_ptl_ctrl_packet_ops;
+ ssh_packet_init(packet, &args);
+
+ msgb_init(&msgb, buf.ptr, buf.len);
+ msgb_push_ack(&msgb, seq);
+ ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
+
+ ssh_ptl_submit(ptl, packet);
+ ssh_packet_put(packet);
+}
+
+static void ssh_ptl_send_nak(struct ssh_ptl *ptl)
+{
+ struct ssh_packet_args args;
+ struct ssh_packet *packet;
+ struct ssam_span buf;
+ struct msgbuf msgb;
+ int status;
+
+ status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
+ if (status) {
+ ptl_err(ptl, "ptl: failed to allocate NAK packet\n");
+ return;
+ }
+
+ args.type = 0;
+ args.priority = SSH_PACKET_PRIORITY(NAK, 0);
+ args.ops = &ssh_ptl_ctrl_packet_ops;
+ ssh_packet_init(packet, &args);
+
+ msgb_init(&msgb, buf.ptr, buf.len);
+ msgb_push_nak(&msgb);
+ ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
+
+ ssh_ptl_submit(ptl, packet);
+ ssh_packet_put(packet);
+}
+
+static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source)
+{
+ struct ssh_frame *frame;
+ struct ssam_span payload;
+ struct ssam_span aligned;
+ bool syn_found;
+ int status;
+
+ // error injection: modify data to simulate corrupt SYN bytes
+ ssh_ptl_rx_inject_invalid_syn(ptl, source);
+
+ // find SYN
+ syn_found = sshp_find_syn(source, &aligned);
+
+ if (unlikely(aligned.ptr - source->ptr) > 0) {
+ ptl_warn(ptl, "rx: parser: invalid start of frame, skipping\n");
+
+ /*
+ * Notes:
+ * - This might send multiple NAKs in case the communication
+ * starts with an invalid SYN and is broken down into multiple
+ * pieces. This should generally be handled fine, we just
+ * might receive duplicate data in this case, which is
+ * detected when handling data frames.
+ * - This path will also be executed on invalid CRCs: When an
+ * invalid CRC is encountered, the code below will skip data
+ * until direclty after the SYN. This causes the search for
+ * the next SYN, which is generally not placed directly after
+ * the last one.
+ */
+ ssh_ptl_send_nak(ptl);
+ }
+
+ if (unlikely(!syn_found))
+ return aligned.ptr - source->ptr;
+
+ // error injection: modify data to simulate corruption
+ ssh_ptl_rx_inject_invalid_data(ptl, &aligned);
+
+ // parse and validate frame
+ status = sshp_parse_frame(&ptl->serdev->dev, &aligned, &frame, &payload,
+ SSH_PTL_RX_BUF_LEN);
+ if (status) // invalid frame: skip to next syn
+ return aligned.ptr - source->ptr + sizeof(u16);
+ if (!frame) // not enough data
+ return aligned.ptr - source->ptr;
+
+ trace_ssam_rx_frame_received(frame);
+
+ switch (frame->type) {
+ case SSH_FRAME_TYPE_ACK:
+ ssh_ptl_acknowledge(ptl, frame->seq);
+ break;
+
+ case SSH_FRAME_TYPE_NAK:
+ ssh_ptl_resubmit_pending(ptl);
+ break;
+
+ case SSH_FRAME_TYPE_DATA_SEQ:
+ ssh_ptl_send_ack(ptl, frame->seq);
+ fallthrough;
+
+ case SSH_FRAME_TYPE_DATA_NSQ:
+ ssh_ptl_rx_dataframe(ptl, frame, &payload);
+ break;
+
+ default:
+ ptl_warn(ptl, "ptl: received frame with unknown type 0x%02x\n",
+ frame->type);
+ break;
+ }
+
+ return aligned.ptr - source->ptr + SSH_MESSAGE_LENGTH(frame->len);
+}
+
+static int ssh_ptl_rx_threadfn(void *data)
+{
+ struct ssh_ptl *ptl = data;
+
+ while (true) {
+ struct ssam_span span;
+ size_t offs = 0;
+ size_t n;
+
+ wait_event_interruptible(ptl->rx.wq,
+ !kfifo_is_empty(&ptl->rx.fifo)
+ || kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ // copy from fifo to evaluation buffer
+ n = sshp_buf_read_from_fifo(&ptl->rx.buf, &ptl->rx.fifo);
+
+ ptl_dbg(ptl, "rx: received data (size: %zu)\n", n);
+ print_hex_dump_debug("rx: ", DUMP_PREFIX_OFFSET, 16, 1,
+ ptl->rx.buf.ptr + ptl->rx.buf.len - n,
+ n, false);
+
+ // parse until we need more bytes or buffer is empty
+ while (offs < ptl->rx.buf.len) {
+ sshp_buf_span_from(&ptl->rx.buf, offs, &span);
+ n = ssh_ptl_rx_eval(ptl, &span);
+ if (n == 0)
+ break; // need more bytes
+
+ offs += n;
+ }
+
+ // throw away the evaluated parts
+ sshp_buf_drop(&ptl->rx.buf, offs);
+ }
+
+ return 0;
+}
+
+static inline void ssh_ptl_rx_wakeup(struct ssh_ptl *ptl)
+{
+ wake_up(&ptl->rx.wq);
+}
+
+int ssh_ptl_rx_start(struct ssh_ptl *ptl)
+{
+ if (ptl->rx.thread)
+ return 0;
+
+ ptl->rx.thread = kthread_run(ssh_ptl_rx_threadfn, ptl, "surface-sh-rx");
+ if (IS_ERR(ptl->rx.thread))
+ return PTR_ERR(ptl->rx.thread);
+
+ return 0;
+}
+
+static int ssh_ptl_rx_stop(struct ssh_ptl *ptl)
+{
+ int status = 0;
+
+ if (ptl->rx.thread) {
+ status = kthread_stop(ptl->rx.thread);
+ ptl->rx.thread = NULL;
+ }
+
+ return status;
+}
+
+int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n)
+{
+ int used;
+
+ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
+ return -ESHUTDOWN;
+
+ used = kfifo_in(&ptl->rx.fifo, buf, n);
+ if (used)
+ ssh_ptl_rx_wakeup(ptl);
+
+ return used;
+}
+
+
+/**
+ * ssh_ptl_shutdown() - Shut down the packet transmission layer.
+ * @ptl: The packet transmission layer.
+ *
+ * Shuts down the packet transmission layer, removing and canceling all queued
+ * and pending packets. Packets canceled by this operation will be completed
+ * with %-ESHUTDOWN as status. Receiver and transmitter threads will be
+ * stopped.
+ *
+ * As a result of this function, the transmission layer will be marked as shut
+ * down. Submission of packets after the transmission layer has been shut down
+ * will fail with %-ESHUTDOWN.
+ */
+void ssh_ptl_shutdown(struct ssh_ptl *ptl)
+{
+ LIST_HEAD(complete_q);
+ LIST_HEAD(complete_p);
+ struct ssh_packet *p, *n;
+ int status;
+
+ // ensure that no new packets (including ACK/NAK) can be submitted
+ set_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state);
+ /*
+ * Ensure that the layer gets marked as shut-down before actually
+ * stopping it. In combination with the check in ssh_ptl_queue_push,
+ * this guarantees that no new packets can be added and all already
+ * queued packets are properly cancelled. In combination with the check
+ * in ssh_ptl_rx_rcvbuf, this guarantees that received data is properly
+ * cut off.
+ */
+ smp_mb__after_atomic();
+
+ status = ssh_ptl_rx_stop(ptl);
+ if (status)
+ ptl_err(ptl, "ptl: failed to stop receiver thread\n");
+
+ status = ssh_ptl_tx_stop(ptl);
+ if (status)
+ ptl_err(ptl, "ptl: failed to stop transmitter thread\n");
+
+ cancel_delayed_work_sync(&ptl->rtx_timeout.reaper);
+
+ /*
+ * At this point, all threads have been stopped. This means that the
+ * only references to packets from inside the system are in the queue
+ * and pending set.
+ *
+ * Note: We still need locks here because someone could still be
+ * cancelling packets.
+ *
+ * Note 2: We can re-use queue_node (or pending_node) if we mark the
+ * packet as locked an then remove it from the queue (or pending set
+ * respecitvely). Marking the packet as locked avoids re-queueing
+ * (which should already be prevented by having stopped the treads...)
+ * and not setting QUEUED_BIT (or PENDING_BIT) prevents removal from a
+ * new list via other threads (e.g. canellation).
+ *
+ * Note 3: There may be overlap between complete_p and complete_q.
+ * This is handled via test_and_set_bit on the "completed" flag
+ * (also handles cancelation).
+ */
+
+ // mark queued packets as locked and move them to complete_q
+ spin_lock(&ptl->queue.lock);
+ list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
+ // ensure that state does not get zero
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
+
+ list_del(&p->queue_node);
+ list_add_tail(&p->queue_node, &complete_q);
+ }
+ spin_unlock(&ptl->queue.lock);
+
+ // mark pending packets as locked and move them to complete_p
+ spin_lock(&ptl->pending.lock);
+ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
+ // ensure that state does not get zero
+ smp_mb__before_atomic();
+ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
+
+ list_del(&p->pending_node);
+ list_add_tail(&p->pending_node, &complete_q);
+ }
+ atomic_set(&ptl->pending.count, 0);
+ spin_unlock(&ptl->pending.lock);
+
+ // complete and drop packets on complete_q
+ list_for_each_entry(p, &complete_q, queue_node) {
+ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
+ __ssh_ptl_complete(p, -ESHUTDOWN);
+
+ ssh_packet_put(p);
+ }
+
+ // complete and drop packets on complete_p
+ list_for_each_entry(p, &complete_p, pending_node) {
+ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
+ __ssh_ptl_complete(p, -ESHUTDOWN);
+
+ ssh_packet_put(p);
+ }
+
+ /*
+ * At this point we have guaranteed that the system doesn't reference
+ * any packets any more.
+ */
+}
+
+int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev,
+ struct ssh_ptl_ops *ops)
+{
+ int i, status;
+
+ ptl->serdev = serdev;
+ ptl->state = 0;
+
+ spin_lock_init(&ptl->queue.lock);
+ INIT_LIST_HEAD(&ptl->queue.head);
+
+ spin_lock_init(&ptl->pending.lock);
+ INIT_LIST_HEAD(&ptl->pending.head);
+ atomic_set_release(&ptl->pending.count, 0);
+
+ ptl->tx.thread = NULL;
+ ptl->tx.thread_signal = false;
+ ptl->tx.packet = NULL;
+ ptl->tx.offset = 0;
+ init_waitqueue_head(&ptl->tx.thread_wq);
+ init_waitqueue_head(&ptl->tx.packet_wq);
+
+ ptl->rx.thread = NULL;
+ init_waitqueue_head(&ptl->rx.wq);
+
+ ptl->rtx_timeout.timeout = SSH_PTL_PACKET_TIMEOUT;
+ ptl->rtx_timeout.expires = KTIME_MAX;
+ INIT_DELAYED_WORK(&ptl->rtx_timeout.reaper, ssh_ptl_timeout_reap);
+
+ ptl->ops = *ops;
+
+ // initialize list of recent/blocked SEQs with invalid sequence IDs
+ for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++)
+ ptl->rx.blocked.seqs[i] = 0xFFFF;
+ ptl->rx.blocked.offset = 0;
+
+ status = kfifo_alloc(&ptl->rx.fifo, SSH_PTL_RX_FIFO_LEN, GFP_KERNEL);
+ if (status)
+ return status;
+
+ status = sshp_buf_alloc(&ptl->rx.buf, SSH_PTL_RX_BUF_LEN, GFP_KERNEL);
+ if (status)
+ kfifo_free(&ptl->rx.fifo);
+
+ return status;
+}
+
+void ssh_ptl_destroy(struct ssh_ptl *ptl)
+{
+ kfifo_free(&ptl->rx.fifo);
+ sshp_buf_free(&ptl->rx.buf);
+}
diff --git a/drivers/misc/surface_sam/ssh_packet_layer.h b/drivers/misc/surface_sam/ssh_packet_layer.h
new file mode 100644
index 0000000000000..74e5adba888ea
--- /dev/null
+++ b/drivers/misc/surface_sam/ssh_packet_layer.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _SSAM_SSH_PACKET_LAYER_H
+#define _SSAM_SSH_PACKET_LAYER_H
+
+#include <linux/atomic.h>
+#include <linux/kfifo.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/serdev.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator_module.h>
+
+#include "ssh_msgb.h"
+#include "ssh_parser.h"
+
+
+enum ssh_ptl_state_flags {
+ SSH_PTL_SF_SHUTDOWN_BIT,
+};
+
+struct ssh_ptl_ops {
+ void (*data_received)(struct ssh_ptl *p, const struct ssam_span *data);
+};
+
+struct ssh_ptl {
+ struct serdev_device *serdev;
+ unsigned long state;
+
+ struct {
+ spinlock_t lock;
+ struct list_head head;
+ } queue;
+
+ struct {
+ spinlock_t lock;
+ struct list_head head;
+ atomic_t count;
+ } pending;
+
+ struct {
+ bool thread_signal;
+ struct task_struct *thread;
+ struct wait_queue_head thread_wq;
+ struct wait_queue_head packet_wq;
+ struct ssh_packet *packet;
+ size_t offset;
+ } tx;
+
+ struct {
+ struct task_struct *thread;
+ struct wait_queue_head wq;
+ struct kfifo fifo;
+ struct sshp_buf buf;
+
+ struct {
+ u16 seqs[8];
+ u16 offset;
+ } blocked;
+ } rx;
+
+ struct {
+ ktime_t timeout;
+ ktime_t expires;
+ struct delayed_work reaper;
+ } rtx_timeout;
+
+ struct ssh_ptl_ops ops;
+};
+
+struct ssh_packet_args {
+ unsigned long type;
+ u8 priority;
+ const struct ssh_packet_ops *ops;
+};
+
+
+#define __ssam_prcond(func, p, fmt, ...) \
+ do { \
+ if ((p)) \
+ func((p), fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define ptl_dbg(p, fmt, ...) dev_dbg(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
+#define ptl_info(p, fmt, ...) dev_info(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
+#define ptl_warn(p, fmt, ...) dev_warn(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
+#define ptl_err(p, fmt, ...) dev_err(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
+#define ptl_dbg_cond(p, fmt, ...) __ssam_prcond(ptl_dbg, p, fmt, ##__VA_ARGS__)
+
+#define to_ssh_ptl(ptr, member) \
+ container_of(ptr, struct ssh_ptl, member)
+
+
+int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev,
+ struct ssh_ptl_ops *ops);
+
+void ssh_ptl_destroy(struct ssh_ptl *ptl);
+
+static inline struct device *ssh_ptl_get_device(struct ssh_ptl *ptl)
+{
+ return ptl->serdev ? &ptl->serdev->dev : NULL;
+}
+
+int ssh_ptl_tx_start(struct ssh_ptl *ptl);
+int ssh_ptl_rx_start(struct ssh_ptl *ptl);
+void ssh_ptl_shutdown(struct ssh_ptl *ptl);
+
+int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p);
+void ssh_ptl_cancel(struct ssh_packet *p);
+
+int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n);
+void ssh_ptl_tx_wakeup(struct ssh_ptl *ptl, bool force);
+
+void ssh_packet_init(struct ssh_packet *packet,
+ const struct ssh_packet_args *args);
+
+int ssh_ctrl_packet_cache_init(void);
+void ssh_ctrl_packet_cache_destroy(void);
+
+#endif /* _SSAM_SSH_PACKET_LAYER_H */
diff --git a/drivers/misc/surface_sam/ssh_parser.c b/drivers/misc/surface_sam/ssh_parser.c
new file mode 100644
index 0000000000000..7ba879ca3e243
--- /dev/null
+++ b/drivers/misc/surface_sam/ssh_parser.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/compiler.h>
+
+#include "ssh_parser.h"
+#include "ssh_protocol.h"
+
+
+/**
+ * sshp_validate_crc() - Validate a CRC in raw message data.
+ * @src: The span of data over which the CRC should be computed.
+ * @crc: The pointer to the expected u16 CRC value.
+ *
+ * Computes the CRC of the provided data span (@src), compares it to the CRC
+ * stored at the given address (@crc), and returns the result of this
+ * comparison, i.e. true iff equal. This function is intended to run on raw
+ * input/message data.
+ */
+static inline bool sshp_validate_crc(const struct ssam_span *src, const u8 *crc)
+{
+ u16 actual = ssh_crc(src->ptr, src->len);
+ u16 expected = get_unaligned_le16(crc);
+
+ return actual == expected;
+}
+
+/**
+ * sshp_starts_with_syn() - Check if the given data starts with SSH SYN bytes.
+ * @src: The data span to check the start of.
+ */
+static inline bool sshp_starts_with_syn(const struct ssam_span *src)
+{
+ return src->len >= 2 && get_unaligned_le16(src->ptr) == SSH_MSG_SYN;
+}
+
+/**
+ * sshp_find_syn() - Find SSH SYN bytes in the given data span.
+ * @src: The data span to search in.
+ * @rem: The span (output) indicating the remaining data, starting with SSH
+ * SYN bytes, if found.
+ *
+ * Search for SSH SYN bytes in the given source span. If found, set the @rem
+ * span to the remaining data, starting with the first SYN bytes and capped by
+ * the source span length, and return ``true``. This function does not copy
+ * any data, but rather only sets pointers to the respecitve start addresses
+ * and length values.
+ *
+ * If no SSH SYN bytes could be found, set the @rem span to the zero-length
+ * span at the end of the source span and return false.
+ *
+ * If partial SSH SYN bytes could be found at the end of the source span, set
+ * the @rem span to cover these partial SYN bytes, capped by the end of the
+ * source span, and return false. This function should then be re-run once
+ * more data is available.
+ */
+bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem)
+{
+ size_t i;
+
+ for (i = 0; i < src->len - 1; i++) {
+ if (likely(get_unaligned_le16(src->ptr + i) == SSH_MSG_SYN)) {
+ rem->ptr = src->ptr + i;
+ rem->len = src->len - i;
+ return true;
+ }
+ }
+
+ if (unlikely(src->ptr[src->len - 1] == (SSH_MSG_SYN & 0xff))) {
+ rem->ptr = src->ptr + src->len - 1;
+ rem->len = 1;
+ return false;
+ }
+
+ rem->ptr = src->ptr + src->len;
+ rem->len = 0;
+ return false;
+}
+
+/**
+ * sshp_parse_frame() - Parse SSH frame.
+ * @dev: The device used for logging.
+ * @source: The source to parse from.
+ * @frame: The parsed frame (output).
+ * @payload: The parsed payload (output).
+ * @maxlen: The maximum supported message length.
+ *
+ * Parses and validates a SSH frame, including its payload, from the given
+ * source. Sets the provided @frame pointer to the start of the frame and
+ * writes the limits of the frame payload to the provided @payload span
+ * pointer.
+ *
+ * This function does not copy any data, but rather only validates the message
+ * data and sets pointers (and length values) to indicate the respective parts.
+ *
+ * If no complete SSH frame could be found, the frame pointer will be set to
+ * the %NULL pointer and the payload span will be set to the null span (start
+ * pointer %NULL, size zero).
+ *
+ * Return: Returns zero on success or if the frame is incomplete, %-ENOMSG if
+ * the start of the message is invalid, %-EBADMSG if any (frame-header or
+ * payload) CRC is ivnalid, or %-EMSGSIZE if the SSH message is bigger than
+ * the maximum message length specified in the @maxlen parameter.
+ */
+int sshp_parse_frame(const struct device *dev, const struct ssam_span *source,
+ struct ssh_frame **frame, struct ssam_span *payload,
+ size_t maxlen)
+{
+ struct ssam_span sf;
+ struct ssam_span sp;
+
+ // initialize output
+ *frame = NULL;
+ payload->ptr = NULL;
+ payload->len = 0;
+
+ if (!sshp_starts_with_syn(source)) {
+ dev_warn(dev, "rx: parser: invalid start of frame\n");
+ return -ENOMSG;
+ }
+
+ // check for minumum packet length
+ if (unlikely(source->len < SSH_MESSAGE_LENGTH(0))) {
+ dev_dbg(dev, "rx: parser: not enough data for frame\n");
+ return 0;
+ }
+
+ // pin down frame
+ sf.ptr = source->ptr + sizeof(u16);
+ sf.len = sizeof(struct ssh_frame);
+
+ // validate frame CRC
+ if (unlikely(!sshp_validate_crc(&sf, sf.ptr + sf.len))) {
+ dev_warn(dev, "rx: parser: invalid frame CRC\n");
+ return -EBADMSG;
+ }
+
+ // ensure packet does not exceed maximum length
+ sp.len = get_unaligned_le16(&((struct ssh_frame *)sf.ptr)->len);
+ if (unlikely(sp.len + SSH_MESSAGE_LENGTH(0) > maxlen)) {
+ dev_warn(dev, "rx: parser: frame too large: %u bytes\n",
+ ((struct ssh_frame *)sf.ptr)->len);
+ return -EMSGSIZE;
+ }
+
+ // pin down payload
+ sp.ptr = sf.ptr + sf.len + sizeof(u16);
+
+ // check for frame + payload length
+ if (source->len < SSH_MESSAGE_LENGTH(sp.len)) {
+ dev_dbg(dev, "rx: parser: not enough data for payload\n");
+ return 0;
+ }
+
+ // validate payload crc
+ if (unlikely(!sshp_validate_crc(&sp, sp.ptr + sp.len))) {
+ dev_warn(dev, "rx: parser: invalid payload CRC\n");
+ return -EBADMSG;
+ }
+
+ *frame = (struct ssh_frame *)sf.ptr;
+ *payload = sp;
+
+ dev_dbg(dev, "rx: parser: valid frame found (type: 0x%02x, len: %u)\n",
+ (*frame)->type, (*frame)->len);
+
+ return 0;
+}
+
+/**
+ * sshp_parse_command() - Parse SSH command frame payload.
+ * @dev: The device used for logging.
+ * @source: The source to parse from.
+ * @command: The parsed command (output).
+ * @command_data: The parsed command data/payload (output).
+ *
+ * Parses and validates a SSH command frame payload. Sets the @command pointer
+ * to the command header and the @command_data span to the command data (i.e.
+ * payload of the command). This will result in a zero-length span if the
+ * command does not have any associated data/payload. This function does not
+ * check the frame-payload-type field, which should be checked by the caller
+ * before calling this function.
+ *
+ * The @source parameter should be the complete frame payload, e.g. returned
+ * by the sshp_parse_frame() command.
+ *
+ * This function does not copy any data, but rather only validates the frame
+ * payload data and sets pointers (and length values) to indicate the
+ * respective parts.
+ *
+ * Return: Returns zero on success or %-ENOMSG if @source does not represent a
+ * valid command-type frame payload, i.e. is too short.
+ */
+int sshp_parse_command(const struct device *dev, const struct ssam_span *source,
+ struct ssh_command **command,
+ struct ssam_span *command_data)
+{
+ // check for minimum length
+ if (unlikely(source->len < sizeof(struct ssh_command))) {
+ *command = NULL;
+ command_data->ptr = NULL;
+ command_data->len = 0;
+
+ dev_err(dev, "rx: parser: command payload is too short\n");
+ return -ENOMSG;
+ }
+
+ *command = (struct ssh_command *)source->ptr;
+ command_data->ptr = source->ptr + sizeof(struct ssh_command);
+ command_data->len = source->len - sizeof(struct ssh_command);
+
+ dev_dbg(dev, "rx: parser: valid command found (tc: 0x%02x, cid: 0x%02x)\n",
+ (*command)->tc, (*command)->cid);
+
+ return 0;
+}
diff --git a/drivers/misc/surface_sam/ssh_parser.h b/drivers/misc/surface_sam/ssh_parser.h
new file mode 100644
index 0000000000000..4216ac4b45bcf
--- /dev/null
+++ b/drivers/misc/surface_sam/ssh_parser.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _SURFACE_SAM_SSH_PARSER_H
+#define _SURFACE_SAM_SSH_PARSER_H
+
+#include <linux/device.h>
+#include <linux/kfifo.h>
+#include <linux/slab.h>
+
+#include <linux/surface_aggregator_module.h>
+
+
+/**
+ * struct sshp_buf - Parser buffer for SSH messages.
+ * @ptr: Pointer to the beginning of the buffer.
+ * @len: Number of bytes used in the buffer.
+ * @cap: Maximum capacity of the buffer.
+ */
+struct sshp_buf {
+ u8 *ptr;
+ size_t len;
+ size_t cap;
+};
+
+/**
+ * sshp_buf_init() - Initialize a SSH parser buffer.
+ * @buf: The buffer to initialize.
+ * @ptr: The memory backing the buffer.
+ * @cap: The length of the memory backing the buffer, i.e. its capacity.
+ *
+ * Initializes the buffer with the given memory as backing and set its used
+ * length to zero.
+ */
+static inline void sshp_buf_init(struct sshp_buf *buf, u8 *ptr, size_t cap)
+{
+ buf->ptr = ptr;
+ buf->len = 0;
+ buf->cap = cap;
+}
+
+/**
+ * sshp_buf_alloc() - Allocate and initialize a SSH parser buffer.
+ * @buf: The buffer to initialize/allocate to.
+ * @cap: The desired capacity of the buffer.
+ * @flags: The flags used for allocating the memory.
+ *
+ * Allocates @cap bytes and initializes the provided buffer struct with the
+ * allocated memory.
+ *
+ * Return: Returns zero on success and %-ENOMEM if allocation failed.
+ */
+static inline int sshp_buf_alloc(struct sshp_buf *buf, size_t cap, gfp_t flags)
+{
+ u8 *ptr;
+
+ ptr = kzalloc(cap, flags);
+ if (!ptr)
+ return -ENOMEM;
+
+ sshp_buf_init(buf, ptr, cap);
+ return 0;
+
+}
+
+/**
+ * sshp_buf_free() - Free a SSH parser buffer.
+ * @buf: The buffer to free.
+ *
+ * Frees a SSH parser buffer by freeing the memory backing it and then
+ * reseting its pointer to %NULL and length and capacity to zero. Intended to
+ * free a buffer previously allocated with sshp_buf_alloc().
+ */
+static inline void sshp_buf_free(struct sshp_buf *buf)
+{
+ kfree(buf->ptr);
+ buf->ptr = NULL;
+ buf->len = 0;
+ buf->cap = 0;
+}
+
+/**
+ * sshp_buf_drop() - Drop data from the beginning of the buffer.
+ * @buf: The buffer to drop data from.
+ * @n: The number of bytes to drop.
+ *
+ * Drops the first @n bytes from the buffer. Re-aligns any remaining data to
+ * the beginning of the buffer.
+ */
+static inline void sshp_buf_drop(struct sshp_buf *buf, size_t n)
+{
+ memmove(buf->ptr, buf->ptr + n, buf->len - n);
+ buf->len -= n;
+}
+
+/**
+ * sshp_buf_read_from_fifo() - Transfer data from a fifo to the buffer.
+ * @buf: The buffer to write the data into.
+ * @fifo: The fifo to read the data from.
+ *
+ * Transfers the data contained in the fifo to the buffer, removing it from
+ * the fifo. This function will try to transfer as much data as possible,
+ * limited either by the remaining space in the buffer or by the number of
+ * bytes available in the fifo.
+ *
+ * Returns the number of bytes transfered.
+ */
+static inline size_t sshp_buf_read_from_fifo(struct sshp_buf *buf,
+ struct kfifo *fifo)
+{
+ size_t n;
+
+ n = kfifo_out(fifo, buf->ptr + buf->len, buf->cap - buf->len);
+ buf->len += n;
+
+ return n;
+}
+
+/**
+ * sshp_buf_span_from() - Initialize a span from the given buffer and offset.
+ * @buf: The buffer to create the span from.
+ * @offset: The offset in the buffer at which the span should start.
+ * @span: The span to initialize (output).
+ *
+ * Initializes the provided span to point to the memory at the given offset in
+ * the buffer, with the length of the span being capped by the number of bytes
+ * used in the buffer after the offset (i.e. bytes remaining after the
+ * offset).
+ *
+ * Warning: This function does not validate that @offset is less than or equal
+ * to the number of bytes used in the buffer or the buffer capacity. This must
+ * be guaranteed by the caller.
+ */
+static inline void sshp_buf_span_from(struct sshp_buf *buf, size_t offset,
+ struct ssam_span *span)
+{
+ span->ptr = buf->ptr + offset;
+ span->len = buf->len - offset;
+}
+
+
+bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem);
+
+int sshp_parse_frame(const struct device *dev, const struct ssam_span *source,
+ struct ssh_frame **frame, struct ssam_span *payload,
+ size_t maxlen);
+
+int sshp_parse_command(const struct device *dev, const struct ssam_span *source,
+ struct ssh_command **command,
+ struct ssam_span *command_data);
+
+#endif /* _SURFACE_SAM_SSH_PARSER_h */
diff --git a/drivers/misc/surface_sam/ssh_protocol.h b/drivers/misc/surface_sam/ssh_protocol.h
new file mode 100644
index 0000000000000..2192398b2e12d
--- /dev/null
+++ b/drivers/misc/surface_sam/ssh_protocol.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _SSAM_SSH_PROTOCOL_H
+#define _SSAM_SSH_PROTOCOL_H
+
+#include <asm/unaligned.h>
+#include <linux/types.h>
+#include <linux/crc-ccitt.h>
+
+
+/*
+ * SSH_NUM_EVENTS - The number of reserved event IDs.
+ *
+ * The number of reserved event IDs, used for registering an SSH event
+ * handler. Valid event IDs are numbers below or equal to this value, with
+ * exception of zero, which is not an event ID. Thus, this is also the
+ * absolute maximum number of event handlers that can be registered.
+ */
+#define SSH_NUM_EVENTS 34
+
+/*
+ * SSH_NUM_TARGETS - The number of communication targets used in the protocol.
+ */
+#define SSH_NUM_TARGETS 2
+
+/*
+ * SSH_MSG_SYN - SSH message synchronization (SYN) bytes as u16.
+ */
+#define SSH_MSG_SYN ((u16)0x55aa)
+
+
+/**
+ * ssh_crc() - Compute CRC for SSH messages.
+ * @buf: The pointer pointing to the data for which the CRC should be computed.
+ * @len: The length of the data for which the CRC should be computed.
+ *
+ * Compute and return the CRC of the provided data, as used for SSH messages.
+ */
+static inline u16 ssh_crc(const u8 *buf, size_t len)
+{
+ return crc_ccitt_false(0xffff, buf, len);
+}
+
+/**
+ * ssh_rqid_next_valid() - Return the next valid request ID.
+ * @rqid: The current request ID.
+ *
+ * Compute and return the next valid request ID, following the current request
+ * ID provided to this function. This function skips any request IDs reserved
+ * for events.
+ */
+static inline u16 ssh_rqid_next_valid(u16 rqid)
+{
+ return rqid > 0 ? rqid + 1u : rqid + SSH_NUM_EVENTS + 1u;
+}
+
+/**
+ * ssh_rqid_to_event() - Convert request ID to its corresponding event ID.
+ * @rqid: The request ID to convert.
+ */
+static inline u16 ssh_rqid_to_event(u16 rqid)
+{
+ return rqid - 1u;
+}
+
+/**
+ * ssh_rqid_is_event() - Check if given request ID is a valid event ID.
+ * @rqid: The request ID to check.
+ */
+static inline bool ssh_rqid_is_event(u16 rqid)
+{
+ return ssh_rqid_to_event(rqid) < SSH_NUM_EVENTS;
+}
+
+/**
+ * ssh_tc_to_rqid() - Convert target category to its corresponding request ID.
+ * @tc: The target category to convert.
+ */
+static inline int ssh_tc_to_rqid(u8 tc)
+{
+ return tc;
+}
+
+/**
+ * ssh_tid_to_index() - Convert target ID to its corresponding target index.
+ * @tid: The target ID to convert.
+ */
+static inline u8 ssh_tid_to_index(u8 tid)
+{
+ return tid - 1u;
+}
+
+/**
+ * ssh_tid_is_valid() - Check if target ID is valid/supported.
+ * @tid: The target ID to check.
+ */
+static inline bool ssh_tid_is_valid(u8 tid)
+{
+ return ssh_tid_to_index(tid) < SSH_NUM_TARGETS;
+}
+
+#endif /* _SSAM_SSH_PROTOCOL_H */
diff --git a/drivers/misc/surface_sam/ssh_request_layer.c b/drivers/misc/surface_sam/ssh_request_layer.c
new file mode 100644
index 0000000000000..b61c45b572978
--- /dev/null
+++ b/drivers/misc/surface_sam/ssh_request_layer.c
@@ -0,0 +1,1100 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/error-injection.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator_module.h>
+
+#include "ssh_packet_layer.h"
+#include "ssh_request_layer.h"
+
+#include "ssam_trace.h"
+
+
+#define SSH_RTL_REQUEST_TIMEOUT ms_to_ktime(3000)
+#define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
+
+#define SSH_RTL_MAX_PENDING 3
+
+
+#ifdef CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION
+
+/**
+ * ssh_rtl_should_drop_response() - Error injection hook to drop request
+ * responses.
+ *
+ * Useful to cause request transmission timeouts in the driver by dropping the
+ * response to a request.
+ */
+static noinline bool ssh_rtl_should_drop_response(void)
+{
+ return false;
+}
+ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response, TRUE);
+
+#else
+
+static inline bool ssh_rtl_should_drop_response(void)
+{
+ return false;
+}
+
+#endif
+
+
+static inline u16 ssh_request_get_rqid(struct ssh_request *rqst)
+{
+ return get_unaligned_le16(rqst->packet.data.ptr
+ + SSH_MSGOFFSET_COMMAND(rqid));
+}
+
+static inline u32 ssh_request_get_rqid_safe(struct ssh_request *rqst)
+{
+ if (!rqst->packet.data.ptr)
+ return -1;
+
+ return ssh_request_get_rqid(rqst);
+}
+
+
+static void ssh_rtl_queue_remove(struct ssh_request *rqst)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+ bool remove;
+
+ spin_lock(&rtl->queue.lock);
+
+ remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state);
+ if (remove)
+ list_del(&rqst->node);
+
+ spin_unlock(&rtl->queue.lock);
+
+ if (remove)
+ ssh_request_put(rqst);
+}
+
+static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl)
+{
+ bool empty;
+
+ spin_lock(&rtl->queue.lock);
+ empty = list_empty(&rtl->queue.head);
+ spin_unlock(&rtl->queue.lock);
+
+ return empty;
+}
+
+
+static void ssh_rtl_pending_remove(struct ssh_request *rqst)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+ bool remove;
+
+ spin_lock(&rtl->pending.lock);
+
+ remove = test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state);
+ if (remove) {
+ atomic_dec(&rtl->pending.count);
+ list_del(&rqst->node);
+ }
+
+ spin_unlock(&rtl->pending.lock);
+
+ if (remove)
+ ssh_request_put(rqst);
+}
+
+static int ssh_rtl_tx_pending_push(struct ssh_request *rqst)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ spin_lock(&rtl->pending.lock);
+
+ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
+ spin_unlock(&rtl->pending.lock);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
+ spin_unlock(&rtl->pending.lock);
+ return -EALREADY;
+ }
+
+ atomic_inc(&rtl->pending.count);
+ list_add_tail(&ssh_request_get(rqst)->node, &rtl->pending.head);
+
+ spin_unlock(&rtl->pending.lock);
+ return 0;
+}
+
+
+static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ trace_ssam_request_complete(rqst, status);
+
+ // rtl/ptl may not be set if we're cancelling before submitting
+ rtl_dbg_cond(rtl, "rtl: completing request (rqid: 0x%04x, status: %d)\n",
+ ssh_request_get_rqid_safe(rqst), status);
+
+ if (status && status != -ECANCELED)
+ rtl_dbg_cond(rtl, "rtl: request error: %d\n", status);
+
+ rqst->ops->complete(rqst, NULL, NULL, status);
+}
+
+static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ trace_ssam_request_complete(rqst, 0);
+
+ rtl_dbg(rtl, "rtl: completing request with response (rqid: 0x%04x)\n",
+ ssh_request_get_rqid(rqst));
+
+ rqst->ops->complete(rqst, cmd, data, 0);
+}
+
+
+static bool ssh_rtl_tx_can_process(struct ssh_request *rqst)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+
+ if (test_bit(SSH_REQUEST_TY_FLUSH_BIT, &rqst->state))
+ return !atomic_read(&rtl->pending.count);
+
+ return atomic_read(&rtl->pending.count) < SSH_RTL_MAX_PENDING;
+}
+
+static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl)
+{
+ struct ssh_request *rqst = ERR_PTR(-ENOENT);
+ struct ssh_request *p, *n;
+
+ spin_lock(&rtl->queue.lock);
+
+ // find first non-locked request and remove it
+ list_for_each_entry_safe(p, n, &rtl->queue.head, node) {
+ if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state)))
+ continue;
+
+ if (!ssh_rtl_tx_can_process(p)) {
+ rqst = ERR_PTR(-EBUSY);
+ break;
+ }
+
+ // remove from queue and mark as transmitting
+ set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &p->state);
+ // ensure state never gets zero
+ smp_mb__before_atomic();
+ clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &p->state);
+
+ list_del(&p->node);
+
+ rqst = p;
+ break;
+ }
+
+ spin_unlock(&rtl->queue.lock);
+ return rqst;
+}
+
+static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl)
+{
+ struct ssh_request *rqst;
+ int status;
+
+ // get and prepare next request for transmit
+ rqst = ssh_rtl_tx_next(rtl);
+ if (IS_ERR(rqst))
+ return PTR_ERR(rqst);
+
+ // add to/mark as pending
+ status = ssh_rtl_tx_pending_push(rqst);
+ if (status) {
+ ssh_request_put(rqst);
+ return -EAGAIN;
+ }
+
+ // submit packet
+ status = ssh_ptl_submit(&rtl->ptl, &rqst->packet);
+ if (status == -ESHUTDOWN) {
+ /*
+ * Packet has been refused due to the packet layer shutting
+ * down. Complete it here.
+ */
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state);
+ /*
+ * Note: A barrier is not required here, as there are only two
+ * references in the system at this point: The one that we have,
+ * and the other one that belongs to the pending set. Due to the
+ * request being marked as "transmitting", our process is the
+ * only one allowed to remove the pending node and change the
+ * state. Normally, the task would fall to the packet callback,
+ * but as this is a path where submission failed, this callback
+ * will never be executed.
+ */
+
+ ssh_rtl_pending_remove(rqst);
+ ssh_rtl_complete_with_status(rqst, -ESHUTDOWN);
+
+ ssh_request_put(rqst);
+ return -ESHUTDOWN;
+
+ } else if (status) {
+ /*
+ * If submitting the packet failed and the packet layer isn't
+ * shutting down, the packet has either been submmitted/queued
+ * before (-EALREADY, which cannot happen as we have guaranteed
+ * that requests cannot be re-submitted), or the packet was
+ * marked as locked (-EINVAL). To mark the packet locked at this
+ * stage, the request, and thus the packets itself, had to have
+ * been canceled. Simply drop the reference. Cancellation itself
+ * will remove it from the set of pending requests.
+ */
+
+ WARN_ON(status != -EINVAL);
+
+ ssh_request_put(rqst);
+ return -EAGAIN;
+ }
+
+ ssh_request_put(rqst);
+ return 0;
+}
+
+static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl)
+{
+ if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING)
+ return false;
+
+ if (ssh_rtl_queue_empty(rtl))
+ return false;
+
+ return schedule_work(&rtl->tx.work);
+}
+
+static void ssh_rtl_tx_work_fn(struct work_struct *work)
+{
+ struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work);
+ int i, status;
+
+ /*
+ * Try to be nice and not block the workqueue: Run a maximum of 10
+ * tries, then re-submit if necessary. This should not be neccesary,
+ * for normal execution, but guarantee it anyway.
+ */
+ for (i = 0; i < 10; i++) {
+ status = ssh_rtl_tx_try_process_one(rtl);
+ if (status == -ENOENT || status == -EBUSY)
+ return; // no more requests to process
+
+ if (status == -ESHUTDOWN) {
+ /*
+ * Packet system shutting down. No new packets can be
+ * transmitted. Return silently, the party initiating
+ * the shutdown should handle the rest.
+ */
+ return;
+ }
+
+ WARN_ON(status != 0 && status != -EAGAIN);
+ }
+
+ // out of tries, reschedule
+ ssh_rtl_tx_schedule(rtl);
+}
+
+
+int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst)
+{
+ trace_ssam_request_submit(rqst);
+
+ /*
+ * Ensure that requests expecting a response are sequenced. If this
+ * invariant ever changes, see the comment in ssh_rtl_complete on what
+ * is required to be changed in the code.
+ */
+ if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &rqst->state))
+ if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &rqst->packet.state))
+ return -EINVAL;
+
+ // try to set ptl and check if this request has already been submitted
+ if (cmpxchg(&rqst->packet.ptl, NULL, &rtl->ptl) != NULL)
+ return -EALREADY;
+
+ spin_lock(&rtl->queue.lock);
+
+ if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) {
+ spin_unlock(&rtl->queue.lock);
+ return -ESHUTDOWN;
+ }
+
+ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
+ spin_unlock(&rtl->queue.lock);
+ return -EINVAL;
+ }
+
+ set_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state);
+ list_add_tail(&ssh_request_get(rqst)->node, &rtl->queue.head);
+
+ spin_unlock(&rtl->queue.lock);
+
+ ssh_rtl_tx_schedule(rtl);
+ return 0;
+}
+
+static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now,
+ ktime_t expires)
+{
+ unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
+ ktime_t aexp = ktime_add(expires, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION);
+ ktime_t old;
+
+ // re-adjust / schedule reaper if it is above resolution delta
+ old = READ_ONCE(rtl->rtx_timeout.expires);
+ while (ktime_before(aexp, old))
+ old = cmpxchg64(&rtl->rtx_timeout.expires, old, expires);
+
+ // if we updated the reaper expiration, modify work timeout
+ if (old == expires)
+ mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta);
+}
+
+static void ssh_rtl_timeout_start(struct ssh_request *rqst)
+{
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
+ ktime_t timestamp = ktime_get_coarse_boottime();
+ ktime_t timeout = rtl->rtx_timeout.timeout;
+
+ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state))
+ return;
+
+ WRITE_ONCE(rqst->timestamp, timestamp);
+ /*
+ * Ensure timestamp is set before starting the reaper. Paired with
+ * implicit barrier following check on ssh_request_get_expiration in
+ * ssh_rtl_timeout_reap.
+ */
+ smp_mb__after_atomic();
+
+ ssh_rtl_timeout_reaper_mod(rtl, timestamp, timestamp + timeout);
+}
+
+
+static void ssh_rtl_complete(struct ssh_rtl *rtl,
+ const struct ssh_command *command,
+ const struct ssam_span *command_data)
+{
+ struct ssh_request *r = NULL;
+ struct ssh_request *p, *n;
+ u16 rqid = get_unaligned_le16(&command->rqid);
+
+ trace_ssam_rx_response_received(command, command_data->len);
+
+ /*
+ * Get request from pending based on request ID and mark it as response
+ * received and locked.
+ */
+ spin_lock(&rtl->pending.lock);
+ list_for_each_entry_safe(p, n, &rtl->pending.head, node) {
+ // we generally expect requests to be processed in order
+ if (unlikely(ssh_request_get_rqid(p) != rqid))
+ continue;
+
+ // simulate response timeout
+ if (ssh_rtl_should_drop_response()) {
+ spin_unlock(&rtl->pending.lock);
+
+ trace_ssam_ei_rx_drop_response(p);
+ rtl_info(rtl, "request error injection: dropping response for request %p\n",
+ &p->packet);
+ return;
+ }
+
+ /*
+ * Mark as "response received" and "locked" as we're going to
+ * complete it.
+ */
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state);
+ set_bit(SSH_REQUEST_SF_RSPRCVD_BIT, &p->state);
+ // ensure state never gets zero
+ smp_mb__before_atomic();
+ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &p->state);
+
+ atomic_dec(&rtl->pending.count);
+ list_del(&p->node);
+
+ r = p;
+ break;
+ }
+ spin_unlock(&rtl->pending.lock);
+
+ if (!r) {
+ rtl_warn(rtl, "rtl: dropping unexpected command message (rqid = 0x%04x)\n",
+ rqid);
+ return;
+ }
+
+ // if the request hasn't been completed yet, we will do this now
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) {
+ ssh_request_put(r);
+ ssh_rtl_tx_schedule(rtl);
+ return;
+ }
+
+ /*
+ * Make sure the request has been transmitted. In case of a sequenced
+ * request, we are guaranteed that the completion callback will run on
+ * the receiver thread directly when the ACK for the packet has been
+ * received. Similarly, this function is guaranteed to run on the
+ * receiver thread. Thus we are guaranteed that if the packet has been
+ * successfully transmitted and received an ACK, the transmitted flag
+ * has been set and is visible here.
+ *
+ * We are currently not handling unsequenced packets here, as those
+ * should never expect a response as ensured in ssh_rtl_submit. If this
+ * ever changes, one would have to test for
+ *
+ * (r->state & (transmitting | transmitted))
+ *
+ * on unsequenced packets to determine if they could have been
+ * transmitted. There are no synchronization guarantees as in the
+ * sequenced case, since, in this case, the callback function will not
+ * run on the same thread. Thus an exact determination is impossible.
+ */
+ if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state)) {
+ rtl_err(rtl, "rtl: received response before ACK for request (rqid = 0x%04x)\n",
+ rqid);
+
+ /*
+ * NB: Timeout has already been canceled, request already been
+ * removed from pending and marked as locked and completed. As
+ * we receive a "false" response, the packet might still be
+ * queued though.
+ */
+ ssh_rtl_queue_remove(r);
+
+ ssh_rtl_complete_with_status(r, -EREMOTEIO);
+ ssh_request_put(r);
+
+ ssh_rtl_tx_schedule(rtl);
+ return;
+ }
+
+ /*
+ * NB: Timeout has already been canceled, request already been
+ * removed from pending and marked as locked and completed. The request
+ * can also not be queued any more, as it has been marked as
+ * transmitting and later transmitted. Thus no need to remove it from
+ * anywhere.
+ */
+
+ ssh_rtl_complete_with_rsp(r, command, command_data);
+ ssh_request_put(r);
+
+ ssh_rtl_tx_schedule(rtl);
+}
+
+
+static bool ssh_rtl_cancel_nonpending(struct ssh_request *r)
+{
+ struct ssh_rtl *rtl;
+ unsigned long state, fixed;
+ bool remove;
+
+ /*
+ * Handle unsubmitted request: Try to mark the packet as locked,
+ * expecting the state to be zero (i.e. unsubmitted). Note that, if
+ * setting the state worked, we might still be adding the packet to the
+ * queue in a currently executing submit call. In that case, however,
+ * ptl reference must have been set previously, as locked is checked
+ * after setting ptl. Thus only if we successfully lock this request and
+ * ptl is NULL, we have successfully removed the request.
+ * Otherwise we need to try and grab it from the queue.
+ *
+ * Note that if the CMPXCHG fails, we are guaranteed that ptl has
+ * been set and is non-NULL, as states can only be nonzero after this
+ * has been set. Also note that we need to fetch the static (type) flags
+ * to ensure that they don't cause the cmpxchg to fail.
+ */
+ fixed = READ_ONCE(r->state) & SSH_REQUEST_FLAGS_TY_MASK;
+ state = cmpxchg(&r->state, fixed, SSH_REQUEST_SF_LOCKED_BIT);
+ if (!state && !READ_ONCE(r->packet.ptl)) {
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return true;
+
+ ssh_rtl_complete_with_status(r, -ECANCELED);
+ return true;
+ }
+
+ rtl = ssh_request_rtl(r);
+ spin_lock(&rtl->queue.lock);
+
+ /*
+ * Note: 1) Requests cannot be re-submitted. 2) If a request is queued,
+ * it cannot be "transmitting"/"pending" yet. Thus, if we successfully
+ * remove the the request here, we have removed all its occurences in
+ * the system.
+ */
+
+ remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
+ if (!remove) {
+ spin_unlock(&rtl->queue.lock);
+ return false;
+ }
+
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
+ list_del(&r->node);
+
+ spin_unlock(&rtl->queue.lock);
+
+ ssh_request_put(r); // drop reference obtained from queue
+
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return true;
+
+ ssh_rtl_complete_with_status(r, -ECANCELED);
+ return true;
+}
+
+static bool ssh_rtl_cancel_pending(struct ssh_request *r)
+{
+ // if the packet is already locked, it's going to be removed shortly
+ if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
+ return true;
+
+ /*
+ * Now that we have locked the packet, we have guaranteed that it can't
+ * be added to the system any more. If rtl is zero, the locked
+ * check in ssh_rtl_submit has not been run and any submission,
+ * currently in progress or called later, won't add the packet. Thus we
+ * can directly complete it.
+ */
+ if (!ssh_request_rtl(r)) {
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return true;
+
+ ssh_rtl_complete_with_status(r, -ECANCELED);
+ return true;
+ }
+
+ /*
+ * Try to cancel the packet. If the packet has not been completed yet,
+ * this will subsequently (and synchronously) call the completion
+ * callback of the packet, which will complete the request.
+ */
+ ssh_ptl_cancel(&r->packet);
+
+ /*
+ * If the packet has been completed with success, i.e. has not been
+ * canceled by the above call, the request may not have been completed
+ * yet (may be waiting for a response). Check if we need to do this
+ * here.
+ */
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return true;
+
+ ssh_rtl_queue_remove(r);
+ ssh_rtl_pending_remove(r);
+ ssh_rtl_complete_with_status(r, -ECANCELED);
+
+ return true;
+}
+
+bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending)
+{
+ struct ssh_rtl *rtl;
+ bool canceled;
+
+ if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state))
+ return true;
+
+ trace_ssam_request_cancel(rqst);
+
+ if (pending)
+ canceled = ssh_rtl_cancel_pending(rqst);
+ else
+ canceled = ssh_rtl_cancel_nonpending(rqst);
+
+ // note: rtl may be NULL if request has not been submitted yet
+ rtl = ssh_request_rtl(rqst);
+ if (canceled && rtl)
+ ssh_rtl_tx_schedule(rtl);
+
+ return canceled;
+}
+
+
+static void ssh_rtl_packet_callback(struct ssh_packet *p, int status)
+{
+ struct ssh_request *r = to_ssh_request(p, packet);
+
+ if (unlikely(status)) {
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
+
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return;
+
+ /*
+ * The packet may get cancelled even though it has not been
+ * submitted yet. The request may still be queued. Check the
+ * queue and remove it if necessary. As the timeout would have
+ * been started in this function on success, there's no need to
+ * cancel it here.
+ */
+ ssh_rtl_queue_remove(r);
+ ssh_rtl_pending_remove(r);
+ ssh_rtl_complete_with_status(r, status);
+
+ ssh_rtl_tx_schedule(ssh_request_rtl(r));
+ return;
+ }
+
+ // update state: mark as transmitted and clear transmitting
+ set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state);
+ // ensure state never gets zero
+ smp_mb__before_atomic();
+ clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &r->state);
+
+ // if we expect a response, we just need to start the timeout
+ if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &r->state)) {
+ ssh_rtl_timeout_start(r);
+ return;
+ }
+
+ /*
+ * If we don't expect a response, lock, remove, and complete the
+ * request. Note that, at this point, the request is guaranteed to have
+ * left the queue and no timeout has been started. Thus we only need to
+ * remove it from pending. If the request has already been completed (it
+ * may have been canceled) return.
+ */
+
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ return;
+
+ ssh_rtl_pending_remove(r);
+ ssh_rtl_complete_with_status(r, 0);
+
+ ssh_rtl_tx_schedule(ssh_request_rtl(r));
+}
+
+
+static ktime_t ssh_request_get_expiration(struct ssh_request *r, ktime_t timeo)
+{
+ ktime_t timestamp = READ_ONCE(r->timestamp);
+
+ if (timestamp != KTIME_MAX)
+ return ktime_add(timestamp, timeo);
+ else
+ return KTIME_MAX;
+}
+
+static void ssh_rtl_timeout_reap(struct work_struct *work)
+{
+ struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work);
+ struct ssh_request *r, *n;
+ LIST_HEAD(claimed);
+ ktime_t now = ktime_get_coarse_boottime();
+ ktime_t timeout = rtl->rtx_timeout.timeout;
+ ktime_t next = KTIME_MAX;
+
+ trace_ssam_rtl_timeout_reap("pending", atomic_read(&rtl->pending.count));
+
+ /*
+ * Mark reaper as "not pending". This is done before checking any
+ * requests to avoid lost-update type problems.
+ */
+ WRITE_ONCE(rtl->rtx_timeout.expires, KTIME_MAX);
+ /*
+ * Ensure that the reaper is marked as deactivated before we continue
+ * checking requests to prevent lost-update problems when a request is
+ * added to the pending set and ssh_rtl_timeout_reaper_mod is called
+ * during execution of the part below.
+ */
+ smp_mb__after_atomic();
+
+ spin_lock(&rtl->pending.lock);
+ list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
+ ktime_t expires = ssh_request_get_expiration(r, timeout);
+
+ /*
+ * Check if the timeout hasn't expired yet. Find out next
+ * expiration date to be handled after this run.
+ */
+ if (ktime_after(expires, now)) {
+ next = ktime_before(expires, next) ? expires : next;
+ continue;
+ }
+
+ // avoid further transitions if locked
+ if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
+ continue;
+
+ /*
+ * We have now marked the packet as locked. Thus it cannot be
+ * added to the pending or queued lists again after we've
+ * removed it here. We can therefore re-use the node of this
+ * packet temporarily.
+ */
+
+ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
+
+ atomic_dec(&rtl->pending.count);
+ list_del(&r->node);
+
+ list_add_tail(&r->node, &claimed);
+ }
+ spin_unlock(&rtl->pending.lock);
+
+ // cancel and complete the request
+ list_for_each_entry_safe(r, n, &claimed, node) {
+ trace_ssam_request_timeout(r);
+
+ /*
+ * At this point we've removed the packet from pending. This
+ * means that we've obtained the last (only) reference of the
+ * system to it. Thus we can just complete it.
+ */
+ if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ ssh_rtl_complete_with_status(r, -ETIMEDOUT);
+
+ // drop the reference we've obtained by removing it from pending
+ list_del(&r->node);
+ ssh_request_put(r);
+ }
+
+ // ensure that reaper doesn't run again immediately
+ next = max(next, ktime_add(now, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION));
+ if (next != KTIME_MAX)
+ ssh_rtl_timeout_reaper_mod(rtl, now, next);
+
+ ssh_rtl_tx_schedule(rtl);
+}
+
+
+static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd,
+ const struct ssam_span *data)
+{
+ trace_ssam_rx_event_received(cmd, data->len);
+
+ rtl_dbg(rtl, "rtl: handling event (rqid: 0x%04x)\n",
+ get_unaligned_le16(&cmd->rqid));
+
+ rtl->ops.handle_event(rtl, cmd, data);
+}
+
+static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
+{
+ struct ssh_rtl *rtl = to_ssh_rtl(p, ptl);
+ struct device *dev = &p->serdev->dev;
+ struct ssh_command *command;
+ struct ssam_span command_data;
+
+ if (sshp_parse_command(dev, data, &command, &command_data))
+ return;
+
+ if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
+ ssh_rtl_rx_event(rtl, command, &command_data);
+ else
+ ssh_rtl_complete(rtl, command, &command_data);
+}
+
+static void ssh_rtl_rx_data(struct ssh_ptl *p, const struct ssam_span *data)
+{
+ switch (data->ptr[0]) {
+ case SSH_PLD_TYPE_CMD:
+ ssh_rtl_rx_command(p, data);
+ break;
+
+ default:
+ ptl_err(p, "rtl: rx: unknown frame payload type (type: 0x%02x)\n",
+ data->ptr[0]);
+ break;
+ }
+}
+
+
+bool ssh_rtl_tx_flush(struct ssh_rtl *rtl)
+{
+ return flush_work(&rtl->tx.work);
+}
+
+int ssh_rtl_rx_start(struct ssh_rtl *rtl)
+{
+ return ssh_ptl_rx_start(&rtl->ptl);
+}
+
+int ssh_rtl_tx_start(struct ssh_rtl *rtl)
+{
+ int status;
+ bool sched;
+
+ status = ssh_ptl_tx_start(&rtl->ptl);
+ if (status)
+ return status;
+
+ /*
+ * If the packet layer has been shut down and restarted without shutting
+ * down the request layer, there may still be requests queued and not
+ * handled.
+ */
+ spin_lock(&rtl->queue.lock);
+ sched = !list_empty(&rtl->queue.head);
+ spin_unlock(&rtl->queue.lock);
+
+ if (sched)
+ ssh_rtl_tx_schedule(rtl);
+
+ return 0;
+}
+
+int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
+ const struct ssh_rtl_ops *ops)
+{
+ struct ssh_ptl_ops ptl_ops;
+ int status;
+
+ ptl_ops.data_received = ssh_rtl_rx_data;
+
+ status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops);
+ if (status)
+ return status;
+
+ spin_lock_init(&rtl->queue.lock);
+ INIT_LIST_HEAD(&rtl->queue.head);
+
+ spin_lock_init(&rtl->pending.lock);
+ INIT_LIST_HEAD(&rtl->pending.head);
+ atomic_set_release(&rtl->pending.count, 0);
+
+ INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn);
+
+ rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT;
+ rtl->rtx_timeout.expires = KTIME_MAX;
+ INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap);
+
+ rtl->ops = *ops;
+
+ return 0;
+}
+
+void ssh_rtl_destroy(struct ssh_rtl *rtl)
+{
+ ssh_ptl_destroy(&rtl->ptl);
+}
+
+
+static void ssh_rtl_packet_release(struct ssh_packet *p)
+{
+ struct ssh_request *rqst;
+
+ rqst = to_ssh_request(p, packet);
+ rqst->ops->release(rqst);
+}
+
+static const struct ssh_packet_ops ssh_rtl_packet_ops = {
+ .complete = ssh_rtl_packet_callback,
+ .release = ssh_rtl_packet_release,
+};
+
+void ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
+ const struct ssh_request_ops *ops)
+{
+ struct ssh_packet_args packet_args;
+
+ packet_args.type = BIT(SSH_PACKET_TY_BLOCKING_BIT);
+ if (!(flags & SSAM_REQUEST_UNSEQUENCED))
+ packet_args.type |= BIT(SSH_PACKET_TY_SEQUENCED_BIT);
+
+ packet_args.priority = SSH_PACKET_PRIORITY(DATA, 0);
+ packet_args.ops = &ssh_rtl_packet_ops;
+
+ ssh_packet_init(&rqst->packet, &packet_args);
+ INIT_LIST_HEAD(&rqst->node);
+
+ rqst->state = 0;
+ if (flags & SSAM_REQUEST_HAS_RESPONSE)
+ rqst->state |= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
+
+ rqst->timestamp = KTIME_MAX;
+ rqst->ops = ops;
+}
+
+
+struct ssh_flush_request {
+ struct ssh_request base;
+ struct completion completion;
+ int status;
+};
+
+static void ssh_rtl_flush_request_complete(struct ssh_request *r,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data,
+ int status)
+{
+ struct ssh_flush_request *rqst;
+
+ rqst = container_of(r, struct ssh_flush_request, base);
+ rqst->status = status;
+}
+
+static void ssh_rtl_flush_request_release(struct ssh_request *r)
+{
+ struct ssh_flush_request *rqst;
+
+ rqst = container_of(r, struct ssh_flush_request, base);
+ complete_all(&rqst->completion);
+}
+
+static const struct ssh_request_ops ssh_rtl_flush_request_ops = {
+ .complete = ssh_rtl_flush_request_complete,
+ .release = ssh_rtl_flush_request_release,
+};
+
+/**
+ * ssh_rtl_flush() - Flush the request transmission layer.
+ * @rtl: request transmission layer
+ * @timeout: timeout for the flush operation in jiffies
+ *
+ * Queue a special flush request and wait for its completion. This request
+ * will be completed after all other currently queued and pending requests
+ * have been completed. Instead of a normal data packet, this request submits
+ * a special flush packet, meaning that upon completion, also the underlying
+ * packet transmission layer has been flushed.
+ *
+ * Flushing the request layer gurarantees that all previously submitted
+ * requests have been fully completed before this call returns. Additinally,
+ * flushing blocks execution of all later submitted requests until the flush
+ * has been completed.
+ *
+ * If the caller ensures that no new requests are submitted after a call to
+ * this function, the request transmission layer is guaranteed to have no
+ * remaining requests when this call returns. The same guarantee does not hold
+ * for the packet layer, on which control packets may still be queued after
+ * this call.
+ *
+ * Return: Returns zero on success, %-ETIMEDOUT if the flush timed out and has
+ * been canceled as a result of the timeout, or %-ESHUTDOWN if the packet
+ * and/or request transmission layer has been shut down before this call. May
+ * also return %-EINTR if the underlying packet transmission has been
+ * interrupted.
+ */
+int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout)
+{
+ const unsigned int init_flags = SSAM_REQUEST_UNSEQUENCED;
+ struct ssh_flush_request rqst;
+ int status;
+
+ ssh_request_init(&rqst.base, init_flags, &ssh_rtl_flush_request_ops);
+ rqst.base.packet.state |= BIT(SSH_PACKET_TY_FLUSH_BIT);
+ rqst.base.packet.priority = SSH_PACKET_PRIORITY(FLUSH, 0);
+ rqst.base.state |= BIT(SSH_REQUEST_TY_FLUSH_BIT);
+
+ init_completion(&rqst.completion);
+
+ status = ssh_rtl_submit(rtl, &rqst.base);
+ if (status)
+ return status;
+
+ ssh_request_put(&rqst.base);
+
+ if (wait_for_completion_timeout(&rqst.completion, timeout))
+ return 0;
+
+ ssh_rtl_cancel(&rqst.base, true);
+ wait_for_completion(&rqst.completion);
+
+ WARN_ON(rqst.status != 0 && rqst.status != -ECANCELED
+ && rqst.status != -ESHUTDOWN && rqst.status != -EINTR);
+
+ return rqst.status == -ECANCELED ? -ETIMEDOUT : status;
+}
+
+
+void ssh_rtl_shutdown(struct ssh_rtl *rtl)
+{
+ struct ssh_request *r, *n;
+ LIST_HEAD(claimed);
+ int pending;
+
+ set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state);
+ /*
+ * Ensure that the layer gets marked as shut-down before actually
+ * stopping it. In combination with the check in ssh_rtl_sunmit, this
+ * guarantees that no new requests can be added and all already queued
+ * requests are properly cancelled.
+ */
+ smp_mb__after_atomic();
+
+ // remove requests from queue
+ spin_lock(&rtl->queue.lock);
+ list_for_each_entry_safe(r, n, &rtl->queue.head, node) {
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
+ // ensure state never gets zero
+ smp_mb__before_atomic();
+ clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
+
+ list_del(&r->node);
+ list_add_tail(&r->node, &claimed);
+ }
+ spin_unlock(&rtl->queue.lock);
+
+ /*
+ * We have now guaranteed that the queue is empty and no more new
+ * requests can be submitted (i.e. it will stay empty). This means that
+ * calling ssh_rtl_tx_schedule will not schedule tx.work any more. So we
+ * can simply call cancel_work_sync on tx.work here and when that
+ * returns, we've locked it down. This also means that after this call,
+ * we don't submit any more packets to the underlying packet layer, so
+ * we can also shut that down.
+ */
+
+ cancel_work_sync(&rtl->tx.work);
+ ssh_ptl_shutdown(&rtl->ptl);
+ cancel_delayed_work_sync(&rtl->rtx_timeout.reaper);
+
+ /*
+ * Shutting down the packet layer should also have caneled all requests.
+ * Thus the pending set should be empty. Attempt to handle this
+ * gracefully anyways, even though this should be dead code.
+ */
+
+ pending = atomic_read(&rtl->pending.count);
+ if (WARN_ON(pending)) {
+ spin_lock(&rtl->pending.lock);
+ list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
+ // ensure state never gets zero
+ smp_mb__before_atomic();
+ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
+
+ list_del(&r->node);
+ list_add_tail(&r->node, &claimed);
+ }
+ spin_unlock(&rtl->pending.lock);
+ }
+
+ // finally cancel and complete requests
+ list_for_each_entry_safe(r, n, &claimed, node) {
+ // test_and_set because we still might compete with cancellation
+ if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
+ ssh_rtl_complete_with_status(r, -ESHUTDOWN);
+
+ // drop the reference we've obtained by removing it from list
+ list_del(&r->node);
+ ssh_request_put(r);
+ }
+}
diff --git a/drivers/misc/surface_sam/ssh_request_layer.h b/drivers/misc/surface_sam/ssh_request_layer.h
new file mode 100644
index 0000000000000..ffbb43719f853
--- /dev/null
+++ b/drivers/misc/surface_sam/ssh_request_layer.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _SSAM_SSH_REQUEST_LAYER_H
+#define _SSAM_SSH_REQUEST_LAYER_H
+
+#include <linux/atomic.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <linux/surface_aggregator_module.h>
+
+#include "ssh_packet_layer.h"
+
+
+enum ssh_rtl_state_flags {
+ SSH_RTL_SF_SHUTDOWN_BIT,
+};
+
+struct ssh_rtl_ops {
+ void (*handle_event)(struct ssh_rtl *rtl, const struct ssh_command *cmd,
+ const struct ssam_span *data);
+};
+
+struct ssh_rtl {
+ struct ssh_ptl ptl;
+ unsigned long state;
+
+ struct {
+ spinlock_t lock;
+ struct list_head head;
+ } queue;
+
+ struct {
+ spinlock_t lock;
+ struct list_head head;
+ atomic_t count;
+ } pending;
+
+ struct {
+ struct work_struct work;
+ } tx;
+
+ struct {
+ ktime_t timeout;
+ ktime_t expires;
+ struct delayed_work reaper;
+ } rtx_timeout;
+
+ struct ssh_rtl_ops ops;
+};
+
+#define rtl_dbg(r, fmt, ...) ptl_dbg(&(r)->ptl, fmt, ##__VA_ARGS__)
+#define rtl_info(p, fmt, ...) ptl_info(&(p)->ptl, fmt, ##__VA_ARGS__)
+#define rtl_warn(r, fmt, ...) ptl_warn(&(r)->ptl, fmt, ##__VA_ARGS__)
+#define rtl_err(r, fmt, ...) ptl_err(&(r)->ptl, fmt, ##__VA_ARGS__)
+#define rtl_dbg_cond(r, fmt, ...) __ssam_prcond(rtl_dbg, r, fmt, ##__VA_ARGS__)
+
+#define to_ssh_rtl(ptr, member) \
+ container_of(ptr, struct ssh_rtl, member)
+
+static inline struct device *ssh_rtl_get_device(struct ssh_rtl *rtl)
+{
+ return ssh_ptl_get_device(&rtl->ptl);
+}
+
+static inline struct ssh_rtl *ssh_request_rtl(struct ssh_request *rqst)
+{
+ struct ssh_ptl *ptl;
+
+ ptl = READ_ONCE(rqst->packet.ptl);
+ return likely(ptl) ? to_ssh_rtl(ptl, ptl) : NULL;
+}
+
+int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst);
+bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending);
+
+int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
+ const struct ssh_rtl_ops *ops);
+
+bool ssh_rtl_tx_flush(struct ssh_rtl *rtl);
+int ssh_rtl_rx_start(struct ssh_rtl *rtl);
+int ssh_rtl_tx_start(struct ssh_rtl *rtl);
+
+int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout);
+void ssh_rtl_shutdown(struct ssh_rtl *rtl);
+void ssh_rtl_destroy(struct ssh_rtl *rtl);
+
+void ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
+ const struct ssh_request_ops *ops);
+
+#endif /* _SSAM_SSH_REQUEST_LAYER_H */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 8a84f11bf1246..838b33d7ed17a 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -834,4 +834,21 @@ struct mhi_device_id {
kernel_ulong_t driver_data;
};
+/* Surface System Aggregator Module */
+
+#define SSAM_MATCH_TARGET 0x1
+#define SSAM_MATCH_INSTANCE 0x2
+#define SSAM_MATCH_FUNCTION 0x4
+
+struct ssam_device_id {
+ __u8 match_flags;
+
+ __u8 category;
+ __u8 target;
+ __u8 instance;
+ __u8 function;
+
+ kernel_ulong_t driver_data;
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/surface_aggregator_module.h b/include/linux/surface_aggregator_module.h
new file mode 100644
index 0000000000000..a02d4996a604c
--- /dev/null
+++ b/include/linux/surface_aggregator_module.h
@@ -0,0 +1,1006 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Interface for Surface System Aggregator Module (SSAM) via Surface Serial
+ * Hub (SSH).
+ *
+ * The Surface Serial Hub (SSH) is the main communication hub for
+ * communication between host and the Surface/System Aggregator Module (SSAM),
+ * an embedded controller on newer Microsoft Surface devices (Book 2, Pro 5,
+ * Laptops, and later). Also referred to as SAM-over-SSH. Older devices (Book
+ * 1, Pro 4) use SAM-over-HID (via I2C), which this driver does not support.
+ */
+
+#ifndef _SURFACE_AGGREGATOR_MODULE_H
+#define _SURFACE_AGGREGATOR_MODULE_H
+
+#include <linux/completion.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/uuid.h>
+
+
+/* -- Data structures for SAM-over-SSH communication. ----------------------- */
+
+/**
+ * enum ssh_frame_type - Frame types for SSH frames.
+ *
+ * @SSH_FRAME_TYPE_DATA_SEQ:
+ * Indicates a data frame, followed by a payload with the length specified
+ * in the ssh_frame.len field. This frame is sequenced, meaning that an ACK
+ * is required.
+ *
+ * @SSH_FRAME_TYPE_DATA_NSQ:
+ * Same as %SSH_FRAME_TYPE_DATA_SEQ, but unsequenced, meaning that the
+ * message does not have to be ACKed.
+ *
+ * @SSH_FRAME_TYPE_ACK:
+ * Indicates an ACK message.
+ *
+ * @SSH_FRAME_TYPE_NAK:
+ * Indicates an error response for previously sent frame. In general, this
+ * means that the frame and/or payload is malformed, e.g. a CRC is wrong.
+ * For command-type payloads, this can also mean that the command is
+ * invalid.
+ */
+enum ssh_frame_type {
+ SSH_FRAME_TYPE_DATA_SEQ = 0x80,
+ SSH_FRAME_TYPE_DATA_NSQ = 0x00,
+ SSH_FRAME_TYPE_ACK = 0x40,
+ SSH_FRAME_TYPE_NAK = 0x04,
+};
+
+/**
+ * struct ssh_frame - SSH communication frame.
+ * @type: The type of the frame. See &enum ssh_frame_type.
+ * @len: The length of the frame payload directly following the CRC for this
+ * frame. Does not include the final CRC for that payload.
+ * @seq: The sequence number for this message/exchange.
+ */
+struct ssh_frame {
+ u8 type;
+ __le16 len;
+ u8 seq;
+} __packed;
+
+static_assert(sizeof(struct ssh_frame) == 4);
+
+/*
+ * SSH_FRAME_MAX_PAYLOAD_SIZE - Maximum SSH frame payload length in bytes.
+ *
+ * This is the physical maximum length of the protocol. Implementations may
+ * set a more constrained limit.
+ */
+#define SSH_FRAME_MAX_PAYLOAD_SIZE U16_MAX
+
+/**
+ * enum ssh_payload_type - Type indicator for the SSH payload.
+ * @SSH_PLD_TYPE_CMD: The payload is a command structure with optional command
+ * payload.
+ */
+enum ssh_payload_type {
+ SSH_PLD_TYPE_CMD = 0x80,
+};
+
+/**
+ * struct ssh_command - Payload of a command-type frame.
+ * @type: The type of the payload. See &enum ssh_payload_type. Should be
+ * SSH_PLD_TYPE_CMD for this struct.
+ * @tc: Command target category.
+ * @tid_out: Output target ID. Should be zero if this an incoming (EC to host)
+ * message.
+ * @tid_in: Input target ID. Should be zero if this is an outgoing (host to
+ * EC) message.
+ * @iid: Instance ID.
+ * @rqid: Request ID. Used to match requests with responses and differentiate
+ * between responses and events.
+ * @cid: Command ID.
+ */
+struct ssh_command {
+ u8 type;
+ u8 tc;
+ u8 tid_out;
+ u8 tid_in;
+ u8 iid;
+ __le16 rqid;
+ u8 cid;
+} __packed;
+
+static_assert(sizeof(struct ssh_command) == 8);
+
+/*
+ * SSH_COMMAND_MAX_PAYLOAD_SIZE - Maximum SSH command payload length in bytes.
+ *
+ * This is the physical maximum length of the protocol. Implementations may
+ * set a more constrained limit.
+ */
+#define SSH_COMMAND_MAX_PAYLOAD_SIZE \
+ (SSH_FRAME_MAX_PAYLOAD_SIZE - sizeof(struct ssh_command))
+
+/**
+ * struct ssh_notification_params - Command payload to enable/disable SSH
+ * notifications.
+ * @target_category: The target category for which notifications should be
+ * enabled/disabled.
+ * @flags: Flags determining how notifications are being sent.
+ * @request_id: The request ID that is used to send these notifications.
+ * @instance_id: The specific instance in the given target category for
+ * which notifications should be enabled.
+ */
+struct ssh_notification_params {
+ u8 target_category;
+ u8 flags;
+ __le16 request_id;
+ u8 instance_id;
+} __packed;
+
+static_assert(sizeof(struct ssh_notification_params) == 5);
+
+/*
+ * SSH_MSG_LEN_BASE - Base-length of a SSH message.
+ *
+ * This is the minimum number of bytes required to form a message. The actual
+ * message length is SSH_MSG_LEN_BASE plus the length of the frame payload.
+ */
+#define SSH_MSG_LEN_BASE (sizeof(struct ssh_frame) + 3ull * sizeof(u16))
+
+/*
+ * SSH_MSG_LEN_CTRL - Length of a SSH control message.
+ *
+ * This is the length of a SSH control message, which is equal to a SSH
+ * message without any payload.
+ */
+#define SSH_MSG_LEN_CTRL SSH_MSG_LEN_BASE
+
+/**
+ * SSH_MESSAGE_LENGTH() - Comute lenght of SSH message.
+ * @payload_size: Length of the payload inside the SSH frame.
+ *
+ * Return: Returns the length of a SSH message with payload of specified size.
+ */
+#define SSH_MESSAGE_LENGTH(payload_size) (SSH_MSG_LEN_BASE + payload_size)
+
+/**
+ * SSH_COMMAND_MESSAGE_LENGTH() - Compute length of SSH command message.
+ * @payload_size: Length of the command payload.
+ *
+ * Return: Returns the length of a SSH command message with command payload of
+ * specified size.
+ */
+#define SSH_COMMAND_MESSAGE_LENGTH(payload_size) \
+ SSH_MESSAGE_LENGTH(sizeof(struct ssh_command) + payload_size)
+
+/**
+ * SSH_MSGOFFSET_FRAME() - Compute offset in SSH message to specified field in
+ * frame.
+ * @field: The field for which the offset should be computed.
+ *
+ * Return: Returns the offset of the specified &struct ssh_frame field in the
+ * raw SSH message data as.
+ */
+#define SSH_MSGOFFSET_FRAME(field) \
+ (sizeof(u16) + offsetof(struct ssh_frame, field))
+
+/**
+ * SSH_MSGOFFSET_FRAME() - Compute offset in SSH message to specified field in
+ * command.
+ * @field: The field for which the offset should be computed.
+ *
+ * Return: Returns the offset of the specified &struct ssh_command field in
+ * the raw SSH message data.
+ */
+#define SSH_MSGOFFSET_COMMAND(field) \
+ (2ull * sizeof(u16) + sizeof(struct ssh_frame) \
+ + offsetof(struct ssh_command, field))
+
+/**
+ * struct ssam_span - Reference to a buffer region.
+ * @ptr: Pointer to the buffer region.
+ * @len: Length of the buffer region.
+ *
+ * A reference to a (non-owned) buffer segment, consisting of pointer and
+ * length. Use of this struct indicates non-owned data, i.e. data of which the
+ * life-time is managed (i.e. it is allocated/freed) via another pointer.
+ */
+struct ssam_span {
+ u8 *ptr;
+ size_t len;
+};
+
+
+/* -- Packet transport layer (ptl). ----------------------------------------- */
+
+/**
+ * enum ssh_packet_priority - Base priorities for &struct ssh_packet.
+ * @SSH_PACKET_PRIORITY_FLUSH: Base priority for flush packets.
+ * @SSH_PACKET_PRIORITY_DATA: Base priority for normal data paackets.
+ * @SSH_PACKET_PRIORITY_NAK: Base priority for NAK packets.
+ * @SSH_PACKET_PRIORITY_ACK: Base priority for ACK packets.
+ */
+enum ssh_packet_priority {
+ SSH_PACKET_PRIORITY_FLUSH = 0,
+ SSH_PACKET_PRIORITY_DATA = 0,
+ SSH_PACKET_PRIORITY_NAK = 1 << 4,
+ SSH_PACKET_PRIORITY_ACK = 2 << 4,
+};
+
+/**
+ * SSH_PACKET_PRIORITY() - Compute packet priority from base priority and
+ * number of tries.
+ * @base: The base priority as suffix of &enum ssh_packet_priority, e.g.
+ * ``FLUSH``, ``DATA``, ``ACK``, or ``NAK``.
+ * @try: The number of tries (must be less than 16).
+ *
+ * Compute the combined packet priority. The combined priority is dominated by
+ * the base priority, whereas the number of (re-)tries decides the precedence
+ * of packets with the same base priority, giving higher priority to packets
+ * that already have more tries.
+ *
+ * Return: Returns the computed priority as value fitting inside a &u8. A
+ * higher number means a higher priority.
+ */
+#define SSH_PACKET_PRIORITY(base, try) \
+ ((SSH_PACKET_PRIORITY_##base) | ((try) & 0x0f))
+
+/**
+ * ssh_packet_priority_get_try() - Get number of tries from packet priority.
+ * @p: The packet priority.
+ *
+ * Return: Returns the number of tries encoded in the specified packet
+ * priority.
+ */
+#define ssh_packet_priority_get_try(p) ((p) & 0x0f)
+
+
+enum ssh_packet_flags {
+ /* state flags */
+ SSH_PACKET_SF_LOCKED_BIT,
+ SSH_PACKET_SF_QUEUED_BIT,
+ SSH_PACKET_SF_PENDING_BIT,
+ SSH_PACKET_SF_TRANSMITTING_BIT,
+ SSH_PACKET_SF_TRANSMITTED_BIT,
+ SSH_PACKET_SF_ACKED_BIT,
+ SSH_PACKET_SF_CANCELED_BIT,
+ SSH_PACKET_SF_COMPLETED_BIT,
+
+ /* type flags */
+ SSH_PACKET_TY_FLUSH_BIT,
+ SSH_PACKET_TY_SEQUENCED_BIT,
+ SSH_PACKET_TY_BLOCKING_BIT,
+
+ /* mask for state flags */
+ SSH_PACKET_FLAGS_SF_MASK =
+ BIT(SSH_PACKET_SF_LOCKED_BIT)
+ | BIT(SSH_PACKET_SF_QUEUED_BIT)
+ | BIT(SSH_PACKET_SF_PENDING_BIT)
+ | BIT(SSH_PACKET_SF_TRANSMITTING_BIT)
+ | BIT(SSH_PACKET_SF_TRANSMITTED_BIT)
+ | BIT(SSH_PACKET_SF_ACKED_BIT)
+ | BIT(SSH_PACKET_SF_CANCELED_BIT)
+ | BIT(SSH_PACKET_SF_COMPLETED_BIT),
+
+ /* mask for type flags */
+ SSH_PACKET_FLAGS_TY_MASK =
+ BIT(SSH_PACKET_TY_FLUSH_BIT)
+ | BIT(SSH_PACKET_TY_SEQUENCED_BIT)
+ | BIT(SSH_PACKET_TY_BLOCKING_BIT),
+};
+
+
+struct ssh_ptl;
+struct ssh_packet;
+
+struct ssh_packet_ops {
+ void (*release)(struct ssh_packet *p);
+ void (*complete)(struct ssh_packet *p, int status);
+};
+
+struct ssh_packet {
+ struct ssh_ptl *ptl;
+ struct kref refcnt;
+
+ u8 priority;
+
+ struct {
+ size_t len;
+ u8 *ptr;
+ } data;
+
+ unsigned long state;
+ ktime_t timestamp;
+
+ struct list_head queue_node;
+ struct list_head pending_node;
+
+ const struct ssh_packet_ops *ops;
+};
+
+#define to_ssh_packet(ptr, member) \
+ container_of(ptr, struct ssh_packet, member)
+
+
+static inline struct ssh_packet *ssh_packet_get(struct ssh_packet *packet)
+{
+ kref_get(&packet->refcnt);
+ return packet;
+}
+
+void ssh_packet_put(struct ssh_packet *p);
+
+static inline void ssh_packet_set_data(struct ssh_packet *p, u8 *ptr, size_t len)
+{
+ p->data.ptr = ptr;
+ p->data.len = len;
+}
+
+
+/* -- Request transport layer (rtl). ---------------------------------------- */
+
+enum ssh_request_flags {
+ SSH_REQUEST_SF_LOCKED_BIT,
+ SSH_REQUEST_SF_QUEUED_BIT,
+ SSH_REQUEST_SF_PENDING_BIT,
+ SSH_REQUEST_SF_TRANSMITTING_BIT,
+ SSH_REQUEST_SF_TRANSMITTED_BIT,
+ SSH_REQUEST_SF_RSPRCVD_BIT,
+ SSH_REQUEST_SF_CANCELED_BIT,
+ SSH_REQUEST_SF_COMPLETED_BIT,
+
+ SSH_REQUEST_TY_FLUSH_BIT,
+ SSH_REQUEST_TY_HAS_RESPONSE_BIT,
+
+ SSH_REQUEST_FLAGS_SF_MASK =
+ BIT(SSH_REQUEST_SF_LOCKED_BIT)
+ | BIT(SSH_REQUEST_SF_QUEUED_BIT)
+ | BIT(SSH_REQUEST_SF_PENDING_BIT)
+ | BIT(SSH_REQUEST_SF_TRANSMITTING_BIT)
+ | BIT(SSH_REQUEST_SF_TRANSMITTED_BIT)
+ | BIT(SSH_REQUEST_SF_RSPRCVD_BIT)
+ | BIT(SSH_REQUEST_SF_CANCELED_BIT)
+ | BIT(SSH_REQUEST_SF_COMPLETED_BIT),
+
+ SSH_REQUEST_FLAGS_TY_MASK =
+ BIT(SSH_REQUEST_TY_FLUSH_BIT)
+ | BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT),
+};
+
+
+struct ssh_rtl;
+struct ssh_request;
+
+struct ssh_request_ops {
+ void (*release)(struct ssh_request *rqst);
+ void (*complete)(struct ssh_request *rqst,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data, int status);
+};
+
+struct ssh_request {
+ struct ssh_packet packet;
+ struct list_head node;
+
+ unsigned long state;
+ ktime_t timestamp;
+
+ const struct ssh_request_ops *ops;
+};
+
+#define to_ssh_request(ptr, member) \
+ container_of(ptr, struct ssh_request, member)
+
+
+static inline struct ssh_request *ssh_request_get(struct ssh_request *r)
+{
+ ssh_packet_get(&r->packet);
+ return r;
+}
+
+static inline void ssh_request_put(struct ssh_request *r)
+{
+ ssh_packet_put(&r->packet);
+}
+
+static inline void ssh_request_set_data(struct ssh_request *r, u8 *ptr, size_t len)
+{
+ ssh_packet_set_data(&r->packet, ptr, len);
+}
+
+
+/* -- Main data types and definitions --------------------------------------- */
+
+enum ssam_ssh_tc {
+ SSAM_SSH_TC_SAM = 0x01, // generic system functionality, real-time clock
+ SSAM_SSH_TC_BAT = 0x02, // battery/power subsystem
+ SSAM_SSH_TC_TMP = 0x03, // thermal subsystem
+ SSAM_SSH_TC_PMC = 0x04,
+ SSAM_SSH_TC_FAN = 0x05,
+ SSAM_SSH_TC_PoM = 0x06,
+ SSAM_SSH_TC_DBG = 0x07,
+ SSAM_SSH_TC_KBD = 0x08, // legacy keyboard (Laptop 1/2)
+ SSAM_SSH_TC_FWU = 0x09,
+ SSAM_SSH_TC_UNI = 0x0a,
+ SSAM_SSH_TC_LPC = 0x0b,
+ SSAM_SSH_TC_TCL = 0x0c,
+ SSAM_SSH_TC_SFL = 0x0d,
+ SSAM_SSH_TC_KIP = 0x0e,
+ SSAM_SSH_TC_EXT = 0x0f,
+ SSAM_SSH_TC_BLD = 0x10,
+ SSAM_SSH_TC_BAS = 0x11, // detachment system (Surface Book 2/3)
+ SSAM_SSH_TC_SEN = 0x12,
+ SSAM_SSH_TC_SRQ = 0x13,
+ SSAM_SSH_TC_MCU = 0x14,
+ SSAM_SSH_TC_HID = 0x15, // generic HID input subsystem
+ SSAM_SSH_TC_TCH = 0x16,
+ SSAM_SSH_TC_BKL = 0x17,
+ SSAM_SSH_TC_TAM = 0x18,
+ SSAM_SSH_TC_ACC = 0x19,
+ SSAM_SSH_TC_UFI = 0x1a,
+ SSAM_SSH_TC_USC = 0x1b,
+ SSAM_SSH_TC_PEN = 0x1c,
+ SSAM_SSH_TC_VID = 0x1d,
+ SSAM_SSH_TC_AUD = 0x1e,
+ SSAM_SSH_TC_SMC = 0x1f,
+ SSAM_SSH_TC_KPD = 0x20,
+ SSAM_SSH_TC_REG = 0x21,
+
+ SSAM_SSH_TC__HUB = 0x00, // not an actual ID, used in for hubs
+};
+
+struct ssam_controller;
+
+/**
+ * enum ssam_event_flags - Flags for enabling/disabling SAM-over-SSH events
+ * @SSAM_EVENT_SEQUENCED: The event will be sent via a sequenced data frame.
+ */
+enum ssam_event_flags {
+ SSAM_EVENT_SEQUENCED = BIT(0),
+};
+
+struct ssam_event {
+ u8 target_category;
+ u8 target_id;
+ u8 command_id;
+ u8 instance_id;
+ u16 length;
+ u8 data[0];
+};
+
+enum ssam_request_flags {
+ SSAM_REQUEST_HAS_RESPONSE = BIT(0),
+ SSAM_REQUEST_UNSEQUENCED = BIT(1),
+};
+
+struct ssam_request {
+ u8 target_category;
+ u8 target_id;
+ u8 command_id;
+ u8 instance_id;
+ u16 flags;
+ u16 length;
+ const u8 *payload;
+};
+
+struct ssam_response {
+ size_t capacity;
+ size_t length;
+ u8 *pointer;
+};
+
+
+struct ssam_controller *ssam_get_controller(void);
+int ssam_client_link(struct ssam_controller *ctrl, struct device *client);
+int ssam_client_bind(struct device *client, struct ssam_controller **ctrl);
+
+struct device *ssam_controller_device(struct ssam_controller *c);
+
+struct ssam_controller *ssam_controller_get(struct ssam_controller *c);
+void ssam_controller_put(struct ssam_controller *c);
+
+void ssam_controller_statelock(struct ssam_controller *c);
+void ssam_controller_stateunlock(struct ssam_controller *c);
+
+ssize_t ssam_request_write_data(struct ssam_span *buf,
+ struct ssam_controller *ctrl,
+ struct ssam_request *spec);
+
+
+/* -- Synchronous request interface. ---------------------------------------- */
+
+struct ssam_request_sync {
+ struct ssh_request base;
+ struct completion comp;
+ struct ssam_response *resp;
+ int status;
+};
+
+int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
+ struct ssam_request_sync **rqst,
+ struct ssam_span *buffer);
+
+void ssam_request_sync_free(struct ssam_request_sync *rqst);
+
+void ssam_request_sync_init(struct ssam_request_sync *rqst,
+ enum ssam_request_flags flags);
+
+/**
+ * ssam_request_sync_set_data - Set message data of a synchronous request.
+ * @rqst: The request.
+ * @ptr: Pointer to the request message data.
+ * @len: Length of the request message data.
+ *
+ * Set the request message data of a synchronous request. The provided buffer
+ * needs to live until the request has been completed.
+ */
+static inline void ssam_request_sync_set_data(struct ssam_request_sync *rqst,
+ u8 *ptr, size_t len)
+{
+ ssh_request_set_data(&rqst->base, ptr, len);
+}
+
+/**
+ * ssam_request_sync_set_resp - Set response buffer of a synchronous request.
+ * @rqst: The request.
+ * @resp: The response buffer.
+ *
+ * Sets the response buffer ot a synchronous request. This buffer will store
+ * the response of the request after it has been completed. May be %NULL if
+ * no response is expected.
+ */
+static inline void ssam_request_sync_set_resp(struct ssam_request_sync *rqst,
+ struct ssam_response *resp)
+{
+ rqst->resp = resp;
+}
+
+int ssam_request_sync_submit(struct ssam_controller *ctrl,
+ struct ssam_request_sync *rqst);
+
+/**
+ * ssam_request_sync_wait - Wait for completion of a synchronous request.
+ * @rqst: The request to wait for.
+ *
+ * Wait for completion and release of a synchronous request. After this
+ * function terminates, the request is guaranteed to have left the
+ * transmission system. After successful submission of a request, this
+ * function must be called before accessing the response of the request,
+ * freeing the request, or freeing any of the buffers associated with the
+ * request.
+ *
+ * Returns the status of the request.
+ */
+static inline int ssam_request_sync_wait(struct ssam_request_sync *rqst)
+{
+ wait_for_completion(&rqst->comp);
+ return rqst->status;
+}
+
+int ssam_request_sync(struct ssam_controller *ctrl, struct ssam_request *spec,
+ struct ssam_response *rsp);
+
+int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
+ struct ssam_request *spec,
+ struct ssam_response *rsp,
+ struct ssam_span *buf);
+
+
+/**
+ * ssam_request_sync_onstack - Execute a synchronous request on the stack.
+ * @ctrl: The controller via which the request is submitted.
+ * @rqst: The request specification.
+ * @rsp: The response buffer.
+ * @payload_len: The (maximum) request payload length.
+ *
+ * Allocates a synchronous request with specified payload length on the stack,
+ * fully intializes it via the provided request specification, submits it, and
+ * finally waits for its completion before returning its status. This helper
+ * macro essentially allocates the request message buffer on the stack and
+ * then calls ssam_request_sync_with_buffer().
+ *
+ * Note: The @payload_len parameter specifies the maximum payload length, used
+ * for buffer allocation. The actual payload length may be smaller.
+ *
+ * Returns the status of the request or any failure during setup.
+ */
+#define ssam_request_sync_onstack(ctrl, rqst, rsp, payload_len) \
+ ({ \
+ u8 __data[SSH_COMMAND_MESSAGE_LENGTH(payload_len)]; \
+ struct ssam_span __buf = { &__data[0], ARRAY_SIZE(__data) }; \
+ \
+ ssam_request_sync_with_buffer(ctrl, rqst, rsp, &__buf); \
+ })
+
+
+struct ssam_request_spec {
+ u8 target_category;
+ u8 target_id;
+ u8 command_id;
+ u8 instance_id;
+ u8 flags;
+};
+
+struct ssam_request_spec_md {
+ u8 target_category;
+ u8 command_id;
+ u8 flags;
+};
+
+#define SSAM_DEFINE_SYNC_REQUEST_N(name, spec...) \
+ int name(struct ssam_controller *ctrl) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags; \
+ rqst.length = 0; \
+ rqst.payload = NULL; \
+ \
+ return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0); \
+ }
+
+#define SSAM_DEFINE_SYNC_REQUEST_W(name, wtype, spec...) \
+ int name(struct ssam_controller *ctrl, const wtype *in) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags; \
+ rqst.length = sizeof(wtype); \
+ rqst.payload = (u8 *)in; \
+ \
+ return ssam_request_sync_onstack(ctrl, &rqst, NULL, \
+ sizeof(wtype)); \
+ }
+
+#define SSAM_DEFINE_SYNC_REQUEST_R(name, rtype, spec...) \
+ int name(struct ssam_controller *ctrl, rtype *out) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = 0; \
+ rqst.payload = NULL; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)out; \
+ \
+ status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, "rqst: invalid response length, expected " \
+ "%zu, got %zu (tc: 0x%02x, cid: 0x%02x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
+#define SSAM_DEFINE_SYNC_REQUEST_MD_W(name, wtype, spec...) \
+ int name(struct ssam_controller *ctrl, u8 tid, u8 iid, const wtype *in) \
+ { \
+ struct ssam_request_spec_md s \
+ = (struct ssam_request_spec_md)spec; \
+ struct ssam_request rqst; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = tid; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = iid; \
+ rqst.flags = s.flags; \
+ rqst.length = sizeof(wtype); \
+ rqst.payload = (u8 *)in; \
+ \
+ return ssam_request_sync_onstack(ctrl, &rqst, NULL, \
+ sizeof(wtype)); \
+ }
+
+#define SSAM_DEFINE_SYNC_REQUEST_MD_R(name, rtype, spec...) \
+ int name(struct ssam_controller *ctrl, u8 tid, u8 iid, rtype *out) \
+ { \
+ struct ssam_request_spec_md s \
+ = (struct ssam_request_spec_md)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = tid; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = iid; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = 0; \
+ rqst.payload = NULL; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)out; \
+ \
+ status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, "rqst: invalid response length, expected " \
+ "%zu, got %zu (tc: 0x%02x, cid: 0x%02x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
+
+/* -- Event notifier/callbacks. --------------------------------------------- */
+
+#define SSAM_NOTIF_STATE_SHIFT 2
+#define SSAM_NOTIF_STATE_MASK ((1 << SSAM_NOTIF_STATE_SHIFT) - 1)
+
+#define SSAM_NOTIF_HANDLED BIT(0)
+#define SSAM_NOTIF_STOP BIT(1)
+
+
+struct ssam_notifier_block;
+
+typedef u32 (*ssam_notifier_fn_t)(struct ssam_notifier_block *nb,
+ const struct ssam_event *event);
+
+struct ssam_notifier_block {
+ struct ssam_notifier_block __rcu *next;
+ ssam_notifier_fn_t fn;
+ int priority;
+};
+
+
+static inline u32 ssam_notifier_from_errno(int err)
+{
+ if (WARN_ON(err > 0) || err == 0)
+ return 0;
+ else
+ return ((-err) << SSAM_NOTIF_STATE_SHIFT) | SSAM_NOTIF_STOP;
+}
+
+static inline int ssam_notifier_to_errno(u32 ret)
+{
+ return -(ret >> SSAM_NOTIF_STATE_SHIFT);
+}
+
+
+/* -- Event/notification registry. ------------------------------------------ */
+
+struct ssam_event_registry {
+ u8 target_category;
+ u8 target_id;
+ u8 cid_enable;
+ u8 cid_disable;
+};
+
+struct ssam_event_id {
+ u8 target_category;
+ u8 instance;
+};
+
+
+#define SSAM_EVENT_REGISTRY(tc, tid, cid_en, cid_dis) \
+ ((struct ssam_event_registry) { \
+ .target_category = (tc), \
+ .target_id = (tid), \
+ .cid_enable = (cid_en), \
+ .cid_disable = (cid_dis), \
+ })
+
+#define SSAM_EVENT_ID(tc, iid) \
+ ((struct ssam_event_id) { \
+ .target_category = (tc), \
+ .instance = (iid), \
+ })
+
+
+#define SSAM_EVENT_REGISTRY_SAM \
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_SAM, 0x01, 0x0b, 0x0c)
+
+#define SSAM_EVENT_REGISTRY_KIP \
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, 0x02, 0x27, 0x28)
+
+#define SSAM_EVENT_REGISTRY_REG \
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02)
+
+
+struct ssam_event_notifier {
+ struct ssam_notifier_block base;
+
+ struct {
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ u8 flags;
+ } event;
+};
+
+int ssam_notifier_register(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n);
+
+int ssam_notifier_unregister(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n);
+
+
+/* -- Surface System Aggregator Module Bus. --------------------------------- */
+
+struct ssam_device_uid {
+ u8 category;
+ u8 target;
+ u8 instance;
+ u8 function;
+};
+
+#define SSAM_DUID(__cat, __tid, __iid, __fun) \
+ ((struct ssam_device_uid) { \
+ .category = SSAM_SSH_TC_##__cat, \
+ .target = (__tid), \
+ .instance = (__iid), \
+ .function = (__fun) \
+ })
+
+#define SSAM_DUID_NULL ((struct ssam_device_uid) { 0 })
+
+#define SSAM_ANY_TID 0xffff
+#define SSAM_ANY_IID 0xffff
+#define SSAM_ANY_FUN 0xffff
+
+#define SSAM_DEVICE(__cat, __tid, __iid, __fun) \
+ .match_flags = (((__tid) != SSAM_ANY_TID) ? SSAM_MATCH_TARGET : 0) \
+ | (((__iid) != SSAM_ANY_IID) ? SSAM_MATCH_INSTANCE : 0) \
+ | (((__fun) != SSAM_ANY_FUN) ? SSAM_MATCH_FUNCTION : 0), \
+ .category = SSAM_SSH_TC_##__cat, \
+ .target = ((__tid) != SSAM_ANY_TID) ? (__tid) : 0, \
+ .instance = ((__iid) != SSAM_ANY_IID) ? (__iid) : 0, \
+ .function = ((__fun) != SSAM_ANY_FUN) ? (__fun) : 0 \
+
+
+static inline bool ssam_device_uid_equal(const struct ssam_device_uid u1,
+ const struct ssam_device_uid u2)
+{
+ return memcmp(&u1, &u2, sizeof(struct ssam_device_uid)) == 0;
+}
+
+static inline bool ssam_device_uid_is_null(const struct ssam_device_uid uid)
+{
+ return ssam_device_uid_equal(uid, (struct ssam_device_uid){});
+}
+
+
+struct ssam_device {
+ struct device dev;
+ struct ssam_controller *ctrl;
+
+ struct ssam_device_uid uid;
+};
+
+struct ssam_device_driver {
+ struct device_driver driver;
+
+ const struct ssam_device_id *match_table;
+
+ int (*probe)(struct ssam_device *sdev);
+ void (*remove)(struct ssam_device *sdev);
+};
+
+extern struct bus_type ssam_bus_type;
+extern const struct device_type ssam_device_type;
+
+
+static inline bool is_ssam_device(struct device *device)
+{
+ return device->type == &ssam_device_type;
+}
+
+static inline struct ssam_device *to_ssam_device(struct device *d)
+{
+ return container_of(d, struct ssam_device, dev);
+}
+
+static inline
+struct ssam_device_driver *to_ssam_device_driver(struct device_driver *d)
+{
+ return container_of(d, struct ssam_device_driver, driver);
+}
+
+
+const struct ssam_device_id *ssam_device_id_match(
+ const struct ssam_device_id *table,
+ const struct ssam_device_uid uid);
+
+const struct ssam_device_id *ssam_device_get_match(
+ const struct ssam_device *dev);
+
+const void *ssam_device_get_match_data(const struct ssam_device *dev);
+
+struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl,
+ struct ssam_device_uid uid);
+
+int ssam_device_add(struct ssam_device *sdev);
+void ssam_device_remove(struct ssam_device *sdev);
+
+static inline void ssam_device_get(struct ssam_device *sdev)
+{
+ get_device(&sdev->dev);
+}
+
+static inline void ssam_device_put(struct ssam_device *sdev)
+{
+ put_device(&sdev->dev);
+}
+
+static inline void *ssam_device_get_drvdata(struct ssam_device *sdev)
+{
+ return dev_get_drvdata(&sdev->dev);
+}
+
+static inline void ssam_device_set_drvdata(struct ssam_device *sdev, void *data)
+{
+ dev_set_drvdata(&sdev->dev, data);
+}
+
+
+int __ssam_device_driver_register(struct ssam_device_driver *d, struct module *o);
+void ssam_device_driver_unregister(struct ssam_device_driver *d);
+
+#define ssam_device_driver_register(drv) \
+ __ssam_device_driver_register(drv, THIS_MODULE)
+
+#define module_ssam_device_driver(__drv) \
+ module_driver(__drv, ssam_device_driver_register, \
+ ssam_device_driver_unregister)
+
+
+/* -- Helpers for client-device requests. ----------------------------------- */
+
+#define SSAM_DEFINE_SYNC_REQUEST_CL_W(name, wtype, spec...) \
+ SSAM_DEFINE_SYNC_REQUEST_MD_W(__raw_##name, wtype, spec) \
+ int name(struct ssam_device *sdev, const wtype *in) \
+ { \
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
+ sdev->uid.instance, in); \
+ }
+
+#define SSAM_DEFINE_SYNC_REQUEST_CL_R(name, rtype, spec...) \
+ SSAM_DEFINE_SYNC_REQUEST_MD_R(__raw_##name, rtype, spec) \
+ int name(struct ssam_device *sdev, rtype *out) \
+ { \
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
+ sdev->uid.instance, out); \
+ }
+
+
+static inline bool ssam_event_matches_device(struct ssam_device_uid uid,
+ const struct ssam_event *event)
+{
+ return uid.category == event->target_category
+ && uid.target == event->target_id
+ && uid.instance == event->instance_id;
+}
+
+#endif /* _SURFACE_AGGREGATOR_MODULE_H */
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index 010be8ba21160..ad40fb4462803 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -241,5 +241,12 @@ int main(void)
DEVID(mhi_device_id);
DEVID_FIELD(mhi_device_id, chan);
+ DEVID(ssam_device_id);
+ DEVID_FIELD(ssam_device_id, match_flags);
+ DEVID_FIELD(ssam_device_id, category);
+ DEVID_FIELD(ssam_device_id, target);
+ DEVID_FIELD(ssam_device_id, instance);
+ DEVID_FIELD(ssam_device_id, function);
+
return 0;
}
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 02d5d79da2844..a11335aefc998 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -1362,6 +1362,27 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias)
return 1;
}
+/*
+ * Looks like: ssam:cNtNiNfN
+ *
+ * N is exactly 2 digits, where each is an upper-case hex digit.
+ */
+static int do_ssam_entry(const char *filename, void *symval, char *alias)
+{
+ DEF_FIELD(symval, ssam_device_id, match_flags);
+ DEF_FIELD(symval, ssam_device_id, category);
+ DEF_FIELD(symval, ssam_device_id, target);
+ DEF_FIELD(symval, ssam_device_id, instance);
+ DEF_FIELD(symval, ssam_device_id, function);
+
+ sprintf(alias, "ssam:c%02X", category);
+ ADD(alias, "t", match_flags & SSAM_MATCH_TARGET, target);
+ ADD(alias, "i", match_flags & SSAM_MATCH_INSTANCE, instance);
+ ADD(alias, "f", match_flags & SSAM_MATCH_FUNCTION, function);
+
+ return 1;
+}
+
/* Does namelen bytes of name exactly match the symbol? */
static bool sym_is(const char *name, unsigned namelen, const char *symbol)
{
@@ -1436,6 +1457,7 @@ static const struct devtable devtable[] = {
{"tee", SIZE_tee_client_device_id, do_tee_entry},
{"wmi", SIZE_wmi_device_id, do_wmi_entry},
{"mhi", SIZE_mhi_device_id, do_mhi_entry},
+ {"ssam", SIZE_ssam_device_id, do_ssam_entry},
};
/* Create MODULE_ALIAS() statements.
--
2.28.0