45540b9fab
Changes:
- Rebase onto v4.19.206
Links:
- kernel: 9c073cfc7c
20919 lines
659 KiB
Diff
20919 lines
659 KiB
Diff
From a3e9a6d1ac5954c21a42b7b252942bc08ddbaacc Mon Sep 17 00:00:00 2001
|
|
From: qzed <qzed@users.noreply.github.com>
|
|
Date: Mon, 26 Aug 2019 01:15:40 +0200
|
|
Subject: [PATCH] ACPI: Fix buffer/integer type mismatch
|
|
|
|
This is actually not a bug in the kernel, but rather Microsoft not
|
|
conforming with the ACPI specification.
|
|
|
|
Patchset: surface-sam
|
|
---
|
|
drivers/acpi/acpica/dsopcode.c | 2 +-
|
|
drivers/acpi/acpica/exfield.c | 26 ++++++++++----------------
|
|
2 files changed, 11 insertions(+), 17 deletions(-)
|
|
|
|
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
|
|
index 2f4641e5ecde..beb22d7e245e 100644
|
|
--- a/drivers/acpi/acpica/dsopcode.c
|
|
+++ b/drivers/acpi/acpica/dsopcode.c
|
|
@@ -123,7 +123,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
|
|
|
|
/* Offset is in bits, count is in bits */
|
|
|
|
- field_flags = AML_FIELD_ACCESS_BYTE;
|
|
+ field_flags = AML_FIELD_ACCESS_BUFFER;
|
|
bit_offset = offset;
|
|
bit_count = (u32) length_desc->integer.value;
|
|
|
|
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
|
|
index b272c329d45d..cf547883a993 100644
|
|
--- a/drivers/acpi/acpica/exfield.c
|
|
+++ b/drivers/acpi/acpica/exfield.c
|
|
@@ -102,6 +102,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
|
|
void *buffer;
|
|
u32 function;
|
|
u16 accessor_type;
|
|
+ u8 field_flags;
|
|
|
|
ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
|
|
|
|
@@ -199,11 +200,16 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
|
|
* Note: Field.length is in bits.
|
|
*/
|
|
length =
|
|
- (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
|
|
+ (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
|
|
+ field_flags = obj_desc->common_field.field_flags;
|
|
|
|
- if (length > acpi_gbl_integer_byte_width) {
|
|
+ if (length > acpi_gbl_integer_byte_width ||
|
|
+ (field_flags & AML_FIELD_ACCESS_TYPE_MASK) == AML_FIELD_ACCESS_BUFFER) {
|
|
|
|
- /* Field is too large for an Integer, create a Buffer instead */
|
|
+ /*
|
|
+ * Field is either too large for an Integer, or a actually of type
|
|
+ * buffer, so create a Buffer.
|
|
+ */
|
|
|
|
buffer_desc = acpi_ut_create_buffer_object(length);
|
|
if (!buffer_desc) {
|
|
@@ -366,19 +372,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
|
|
} else if (obj_desc->field.region_obj->region.space_id ==
|
|
ACPI_ADR_SPACE_GSBUS) {
|
|
accessor_type = obj_desc->field.attribute;
|
|
- length =
|
|
- acpi_ex_get_serial_access_length(accessor_type,
|
|
- obj_desc->field.
|
|
- access_length);
|
|
-
|
|
- /*
|
|
- * Add additional 2 bytes for the generic_serial_bus data buffer:
|
|
- *
|
|
- * Status; (Byte 0 of the data buffer)
|
|
- * Length; (Byte 1 of the data buffer)
|
|
- * Data[x-1]: (Bytes 2-x of the arbitrary length data buffer)
|
|
- */
|
|
- length += 2;
|
|
+ length = source_desc->buffer.length;
|
|
function = ACPI_WRITE | (accessor_type << 16);
|
|
} else { /* IPMI */
|
|
|
|
--
|
|
2.33.0
|
|
|
|
From e1162dd41c2e7a84804013cdc46a02f155da5047 Mon Sep 17 00:00:00 2001
|
|
From: Maximilian Luz <luzmaximilian@gmail.com>
|
|
Date: Tue, 24 Sep 2019 17:38:12 +0200
|
|
Subject: [PATCH] serdev: Add ACPI devices by ResourceSource field
|
|
|
|
When registering a serdev controller, ACPI needs to be checked for
|
|
devices attached to it. Currently, all immediate children of the ACPI
|
|
node of the controller are assumed to be UART client devices for this
|
|
controller. Furthermore, these devices are not searched elsewhere.
|
|
|
|
This is incorrect: Similar to SPI and I2C devices, the UART client
|
|
device definition (via UARTSerialBusV2) can reside anywhere in the ACPI
|
|
namespace as resource definition inside the _CRS method and points to
|
|
the controller via its ResourceSource field. This field may either
|
|
contain a fully qualified or relative path, indicating the controller
|
|
device. To address this, we need to walk over the whole ACPI namespace,
|
|
looking at each resource definition, and match the client device to the
|
|
controller via this field.
|
|
|
|
This patch is based on the existing acpi serial bus implementations in
|
|
drivers/i2c/i2c-core-acpi.c and drivers/spi/spi.c, specifically commit
|
|
4c3c59544f33e97cf8557f27e05a9904ead16363 ("spi/acpi: enumerate all SPI
|
|
slaves in the namespace").
|
|
|
|
Signed-off-by: Maximilian Luz <luzmaximilian@gmail.com>
|
|
Patchset: surface-sam
|
|
---
|
|
drivers/tty/serdev/core.c | 111 +++++++++++++++++++++++++++++++++-----
|
|
1 file changed, 99 insertions(+), 12 deletions(-)
|
|
|
|
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
|
|
index c66a04d24f1d..1b18d12d217f 100644
|
|
--- a/drivers/tty/serdev/core.c
|
|
+++ b/drivers/tty/serdev/core.c
|
|
@@ -496,16 +496,97 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
|
|
}
|
|
|
|
#ifdef CONFIG_ACPI
|
|
+
|
|
+#define SERDEV_ACPI_MAX_SCAN_DEPTH 32
|
|
+
|
|
+struct acpi_serdev_lookup {
|
|
+ acpi_handle device_handle;
|
|
+ acpi_handle controller_handle;
|
|
+ int n;
|
|
+ int index;
|
|
+};
|
|
+
|
|
+static int acpi_serdev_parse_resource(struct acpi_resource *ares, void *data)
|
|
+{
|
|
+ struct acpi_serdev_lookup *lookup = data;
|
|
+ struct acpi_resource_uart_serialbus *sb;
|
|
+ acpi_status status;
|
|
+
|
|
+ if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
|
|
+ return 1;
|
|
+
|
|
+ if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
|
|
+ return 1;
|
|
+
|
|
+ if (lookup->index != -1 && lookup->n++ != lookup->index)
|
|
+ return 1;
|
|
+
|
|
+ sb = &ares->data.uart_serial_bus;
|
|
+
|
|
+ status = acpi_get_handle(lookup->device_handle,
|
|
+ sb->resource_source.string_ptr,
|
|
+ &lookup->controller_handle);
|
|
+ if (ACPI_FAILURE(status))
|
|
+ return 1;
|
|
+
|
|
+ /*
|
|
+ * NOTE: Ideally, we would also want to retreive other properties here,
|
|
+ * once setting them before opening the device is supported by serdev.
|
|
+ */
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
+static int acpi_serdev_do_lookup(struct acpi_device *adev,
|
|
+ struct acpi_serdev_lookup *lookup)
|
|
+{
|
|
+ struct list_head resource_list;
|
|
+ int ret;
|
|
+
|
|
+ lookup->device_handle = acpi_device_handle(adev);
|
|
+ lookup->controller_handle = NULL;
|
|
+ lookup->n = 0;
|
|
+
|
|
+ INIT_LIST_HEAD(&resource_list);
|
|
+ ret = acpi_dev_get_resources(adev, &resource_list,
|
|
+ acpi_serdev_parse_resource, lookup);
|
|
+ acpi_dev_free_resource_list(&resource_list);
|
|
+
|
|
+ if (ret < 0)
|
|
+ return -EINVAL;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int acpi_serdev_check_resources(struct serdev_controller *ctrl,
|
|
+ struct acpi_device *adev)
|
|
+{
|
|
+ struct acpi_serdev_lookup lookup;
|
|
+ int ret;
|
|
+
|
|
+ if (acpi_bus_get_status(adev) || !adev->status.present)
|
|
+ return -EINVAL;
|
|
+
|
|
+ /* Look for UARTSerialBusV2 resource */
|
|
+ lookup.index = -1; // we only care for the last device
|
|
+
|
|
+ ret = acpi_serdev_do_lookup(adev, &lookup);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /* Make sure controller and ResourceSource handle match */
|
|
+ if (ACPI_HANDLE(ctrl->dev.parent) != lookup.controller_handle)
|
|
+ return -ENODEV;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
|
|
- struct acpi_device *adev)
|
|
+ struct acpi_device *adev)
|
|
{
|
|
- struct serdev_device *serdev = NULL;
|
|
+ struct serdev_device *serdev;
|
|
int err;
|
|
|
|
- if (acpi_bus_get_status(adev) || !adev->status.present ||
|
|
- acpi_device_enumerated(adev))
|
|
- return AE_OK;
|
|
-
|
|
serdev = serdev_device_alloc(ctrl);
|
|
if (!serdev) {
|
|
dev_err(&ctrl->dev, "failed to allocate serdev device for %s\n",
|
|
@@ -533,7 +614,7 @@ static const struct acpi_device_id serdev_acpi_devices_blacklist[] = {
|
|
};
|
|
|
|
static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
|
|
- void *data, void **return_value)
|
|
+ void *data, void **return_value)
|
|
{
|
|
struct serdev_controller *ctrl = data;
|
|
struct acpi_device *adev;
|
|
@@ -541,26 +622,32 @@ static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
|
|
if (acpi_bus_get_device(handle, &adev))
|
|
return AE_OK;
|
|
|
|
+ if (acpi_device_enumerated(adev))
|
|
+ return AE_OK;
|
|
+
|
|
/* Skip if black listed */
|
|
if (!acpi_match_device_ids(adev, serdev_acpi_devices_blacklist))
|
|
return AE_OK;
|
|
|
|
+ if (acpi_serdev_check_resources(ctrl, adev))
|
|
+ return AE_OK;
|
|
+
|
|
return acpi_serdev_register_device(ctrl, adev);
|
|
}
|
|
|
|
+
|
|
static int acpi_serdev_register_devices(struct serdev_controller *ctrl)
|
|
{
|
|
acpi_status status;
|
|
- acpi_handle handle;
|
|
|
|
- handle = ACPI_HANDLE(ctrl->dev.parent);
|
|
- if (!handle)
|
|
+ if (!has_acpi_companion(ctrl->dev.parent))
|
|
return -ENODEV;
|
|
|
|
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
|
|
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
|
|
+ SERDEV_ACPI_MAX_SCAN_DEPTH,
|
|
acpi_serdev_add_device, NULL, ctrl, NULL);
|
|
if (ACPI_FAILURE(status))
|
|
- dev_dbg(&ctrl->dev, "failed to enumerate serdev slaves\n");
|
|
+ dev_warn(&ctrl->dev, "failed to enumerate serdev slaves\n");
|
|
|
|
if (!ctrl->serdev)
|
|
return -ENODEV;
|
|
--
|
|
2.33.0
|
|
|
|
From 92a7407ceae7b34677e6fdef8c3d44bd95911c63 Mon Sep 17 00:00:00 2001
|
|
From: Maximilian Luz <luzmaximilian@gmail.com>
|
|
Date: Mon, 17 Aug 2020 01:23:20 +0200
|
|
Subject: [PATCH] Add file2alias support for Surface Aggregator devices
|
|
|
|
Implement file2alias support for Surface System Aggregator Module (SSAM)
|
|
devices. This allows modules to be auto-loaded for specific devices via
|
|
their respective module alias.
|
|
|
|
Patchset: surface-sam
|
|
---
|
|
include/linux/mod_devicetable.h | 17 +++++++++++++++++
|
|
scripts/mod/devicetable-offsets.c | 7 +++++++
|
|
scripts/mod/file2alias.c | 21 +++++++++++++++++++++
|
|
3 files changed, 45 insertions(+)
|
|
|
|
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
|
|
index 610cdf8082f2..69f4527315e7 100644
|
|
--- a/include/linux/mod_devicetable.h
|
|
+++ b/include/linux/mod_devicetable.h
|
|
@@ -768,4 +768,21 @@ struct typec_device_id {
|
|
kernel_ulong_t driver_data;
|
|
};
|
|
|
|
+/* Surface System Aggregator Module */
|
|
+
|
|
+#define SSAM_MATCH_CHANNEL 0x1
|
|
+#define SSAM_MATCH_INSTANCE 0x2
|
|
+#define SSAM_MATCH_FUNCTION 0x4
|
|
+
|
|
+struct ssam_device_id {
|
|
+ __u8 match_flags;
|
|
+
|
|
+ __u8 category;
|
|
+ __u8 channel;
|
|
+ __u8 instance;
|
|
+ __u8 function;
|
|
+
|
|
+ kernel_ulong_t driver_data;
|
|
+};
|
|
+
|
|
#endif /* LINUX_MOD_DEVICETABLE_H */
|
|
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
|
|
index 293004499b4d..13acbf55c6fd 100644
|
|
--- a/scripts/mod/devicetable-offsets.c
|
|
+++ b/scripts/mod/devicetable-offsets.c
|
|
@@ -225,5 +225,12 @@ int main(void)
|
|
DEVID_FIELD(typec_device_id, svid);
|
|
DEVID_FIELD(typec_device_id, mode);
|
|
|
|
+ DEVID(ssam_device_id);
|
|
+ DEVID_FIELD(ssam_device_id, match_flags);
|
|
+ DEVID_FIELD(ssam_device_id, category);
|
|
+ DEVID_FIELD(ssam_device_id, channel);
|
|
+ DEVID_FIELD(ssam_device_id, instance);
|
|
+ DEVID_FIELD(ssam_device_id, function);
|
|
+
|
|
return 0;
|
|
}
|
|
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
|
|
index 7f40b6aab689..76e3b1d7db45 100644
|
|
--- a/scripts/mod/file2alias.c
|
|
+++ b/scripts/mod/file2alias.c
|
|
@@ -1276,6 +1276,26 @@ static int do_typec_entry(const char *filename, void *symval, char *alias)
|
|
return 1;
|
|
}
|
|
|
|
+/* Looks like: ssam:cNtNiNfN
|
|
+ *
|
|
+ * N is exactly 2 digits, where each is an upper-case hex digit.
|
|
+ */
|
|
+static int do_ssam_entry(const char *filename, void *symval, char *alias)
|
|
+{
|
|
+ DEF_FIELD(symval, ssam_device_id, match_flags);
|
|
+ DEF_FIELD(symval, ssam_device_id, category);
|
|
+ DEF_FIELD(symval, ssam_device_id, channel);
|
|
+ DEF_FIELD(symval, ssam_device_id, instance);
|
|
+ DEF_FIELD(symval, ssam_device_id, function);
|
|
+
|
|
+ sprintf(alias, "ssam:c%02X", category);
|
|
+ ADD(alias, "t", match_flags & SSAM_MATCH_CHANNEL, channel);
|
|
+ ADD(alias, "i", match_flags & SSAM_MATCH_INSTANCE, instance);
|
|
+ ADD(alias, "f", match_flags & SSAM_MATCH_FUNCTION, function);
|
|
+
|
|
+ return 1;
|
|
+}
|
|
+
|
|
/* Does namelen bytes of name exactly match the symbol? */
|
|
static bool sym_is(const char *name, unsigned namelen, const char *symbol)
|
|
{
|
|
@@ -1346,6 +1366,7 @@ static const struct devtable devtable[] = {
|
|
{"fslmc", SIZE_fsl_mc_device_id, do_fsl_mc_entry},
|
|
{"tbsvc", SIZE_tb_service_id, do_tbsvc_entry},
|
|
{"typec", SIZE_typec_device_id, do_typec_entry},
|
|
+ {"ssam", SIZE_ssam_device_id, do_ssam_entry},
|
|
};
|
|
|
|
/* Create MODULE_ALIAS() statements.
|
|
--
|
|
2.33.0
|
|
|
|
From e2854086b083f87547d7737556ddbcfa3a76bd6a Mon Sep 17 00:00:00 2001
|
|
From: Maximilian Luz <luzmaximilian@gmail.com>
|
|
Date: Mon, 17 Aug 2020 01:44:30 +0200
|
|
Subject: [PATCH] platform/x86: Add support for Surface System Aggregator
|
|
Module
|
|
|
|
Add support for the Surface System Aggregator Module (SSAM), an embedded
|
|
controller that can be found on 5th and later generation Microsoft
|
|
Surface devices. The responsibilities of this EC vary from device to
|
|
device. It provides battery information on all 5th and later generation
|
|
devices, temperature sensor and cooling capability access, functionality
|
|
for clipboard detaching on the Surface Books (2 and 3), as well as
|
|
HID-over-SSAM input devices, including keyboard on the Surface Laptop 1
|
|
and 2, and keyboard as well as touchpad input on the Surface Laptop 3
|
|
and Surface Book 3.
|
|
|
|
Patchset: surface-sam
|
|
---
|
|
Documentation/driver-api/index.rst | 1 +
|
|
.../surface_aggregator/client-api.rst | 38 +
|
|
.../driver-api/surface_aggregator/client.rst | 393 +++
|
|
.../surface_aggregator/clients/cdev.rst | 204 ++
|
|
.../surface_aggregator/clients/dtx.rst | 712 +++++
|
|
.../surface_aggregator/clients/index.rst | 22 +
|
|
.../surface_aggregator/clients/san.rst | 44 +
|
|
.../driver-api/surface_aggregator/index.rst | 21 +
|
|
.../surface_aggregator/internal-api.rst | 67 +
|
|
.../surface_aggregator/internal.rst | 577 ++++
|
|
.../surface_aggregator/overview.rst | 77 +
|
|
.../driver-api/surface_aggregator/ssh.rst | 344 ++
|
|
drivers/hid/Kconfig | 2 +
|
|
drivers/hid/Makefile | 2 +
|
|
drivers/hid/surface-hid/Kconfig | 42 +
|
|
drivers/hid/surface-hid/Makefile | 7 +
|
|
drivers/hid/surface-hid/surface_hid.c | 251 ++
|
|
drivers/hid/surface-hid/surface_hid_core.c | 272 ++
|
|
drivers/hid/surface-hid/surface_hid_core.h | 73 +
|
|
drivers/hid/surface-hid/surface_kbd.c | 300 ++
|
|
drivers/platform/x86/Kconfig | 102 +
|
|
drivers/platform/x86/Makefile | 6 +
|
|
drivers/platform/x86/surface_acpi_notify.c | 886 ++++++
|
|
.../platform/x86/surface_aggregator/Kconfig | 69 +
|
|
.../platform/x86/surface_aggregator/Makefile | 17 +
|
|
drivers/platform/x86/surface_aggregator/bus.c | 415 +++
|
|
drivers/platform/x86/surface_aggregator/bus.h | 27 +
|
|
.../x86/surface_aggregator/controller.c | 2780 +++++++++++++++++
|
|
.../x86/surface_aggregator/controller.h | 285 ++
|
|
.../platform/x86/surface_aggregator/core.c | 850 +++++
|
|
.../x86/surface_aggregator/ssh_msgb.h | 205 ++
|
|
.../x86/surface_aggregator/ssh_packet_layer.c | 2074 ++++++++++++
|
|
.../x86/surface_aggregator/ssh_packet_layer.h | 190 ++
|
|
.../x86/surface_aggregator/ssh_parser.c | 228 ++
|
|
.../x86/surface_aggregator/ssh_parser.h | 154 +
|
|
.../surface_aggregator/ssh_request_layer.c | 1263 ++++++++
|
|
.../surface_aggregator/ssh_request_layer.h | 143 +
|
|
.../platform/x86/surface_aggregator/trace.h | 632 ++++
|
|
.../platform/x86/surface_aggregator_cdev.c | 810 +++++
|
|
.../x86/surface_aggregator_registry.c | 618 ++++
|
|
drivers/platform/x86/surface_dtx.c | 1281 ++++++++
|
|
drivers/platform/x86/surface_perfmode.c | 122 +
|
|
drivers/power/supply/Kconfig | 32 +
|
|
drivers/power/supply/Makefile | 2 +
|
|
drivers/power/supply/surface_battery.c | 816 +++++
|
|
drivers/power/supply/surface_charger.c | 282 ++
|
|
include/linux/mod_devicetable.h | 5 +-
|
|
include/linux/surface_acpi_notify.h | 39 +
|
|
include/linux/surface_aggregator/controller.h | 849 +++++
|
|
include/linux/surface_aggregator/device.h | 423 +++
|
|
include/linux/surface_aggregator/serial_hub.h | 668 ++++
|
|
include/uapi/linux/surface_aggregator/cdev.h | 147 +
|
|
include/uapi/linux/surface_aggregator/dtx.h | 146 +
|
|
scripts/mod/devicetable-offsets.c | 3 +-
|
|
scripts/mod/file2alias.c | 10 +-
|
|
55 files changed, 20021 insertions(+), 7 deletions(-)
|
|
create mode 100644 Documentation/driver-api/surface_aggregator/client-api.rst
|
|
create mode 100644 Documentation/driver-api/surface_aggregator/client.rst
|
|
create mode 100644 Documentation/driver-api/surface_aggregator/clients/cdev.rst
|
|
create mode 100644 Documentation/driver-api/surface_aggregator/clients/dtx.rst
|
|
create mode 100644 Documentation/driver-api/surface_aggregator/clients/index.rst
|
|
create mode 100644 Documentation/driver-api/surface_aggregator/clients/san.rst
|
|
create mode 100644 Documentation/driver-api/surface_aggregator/index.rst
|
|
create mode 100644 Documentation/driver-api/surface_aggregator/internal-api.rst
|
|
create mode 100644 Documentation/driver-api/surface_aggregator/internal.rst
|
|
create mode 100644 Documentation/driver-api/surface_aggregator/overview.rst
|
|
create mode 100644 Documentation/driver-api/surface_aggregator/ssh.rst
|
|
create mode 100644 drivers/hid/surface-hid/Kconfig
|
|
create mode 100644 drivers/hid/surface-hid/Makefile
|
|
create mode 100644 drivers/hid/surface-hid/surface_hid.c
|
|
create mode 100644 drivers/hid/surface-hid/surface_hid_core.c
|
|
create mode 100644 drivers/hid/surface-hid/surface_hid_core.h
|
|
create mode 100644 drivers/hid/surface-hid/surface_kbd.c
|
|
create mode 100644 drivers/platform/x86/surface_acpi_notify.c
|
|
create mode 100644 drivers/platform/x86/surface_aggregator/Kconfig
|
|
create mode 100644 drivers/platform/x86/surface_aggregator/Makefile
|
|
create mode 100644 drivers/platform/x86/surface_aggregator/bus.c
|
|
create mode 100644 drivers/platform/x86/surface_aggregator/bus.h
|
|
create mode 100644 drivers/platform/x86/surface_aggregator/controller.c
|
|
create mode 100644 drivers/platform/x86/surface_aggregator/controller.h
|
|
create mode 100644 drivers/platform/x86/surface_aggregator/core.c
|
|
create mode 100644 drivers/platform/x86/surface_aggregator/ssh_msgb.h
|
|
create mode 100644 drivers/platform/x86/surface_aggregator/ssh_packet_layer.c
|
|
create mode 100644 drivers/platform/x86/surface_aggregator/ssh_packet_layer.h
|
|
create mode 100644 drivers/platform/x86/surface_aggregator/ssh_parser.c
|
|
create mode 100644 drivers/platform/x86/surface_aggregator/ssh_parser.h
|
|
create mode 100644 drivers/platform/x86/surface_aggregator/ssh_request_layer.c
|
|
create mode 100644 drivers/platform/x86/surface_aggregator/ssh_request_layer.h
|
|
create mode 100644 drivers/platform/x86/surface_aggregator/trace.h
|
|
create mode 100644 drivers/platform/x86/surface_aggregator_cdev.c
|
|
create mode 100644 drivers/platform/x86/surface_aggregator_registry.c
|
|
create mode 100644 drivers/platform/x86/surface_dtx.c
|
|
create mode 100644 drivers/platform/x86/surface_perfmode.c
|
|
create mode 100644 drivers/power/supply/surface_battery.c
|
|
create mode 100644 drivers/power/supply/surface_charger.c
|
|
create mode 100644 include/linux/surface_acpi_notify.h
|
|
create mode 100644 include/linux/surface_aggregator/controller.h
|
|
create mode 100644 include/linux/surface_aggregator/device.h
|
|
create mode 100644 include/linux/surface_aggregator/serial_hub.h
|
|
create mode 100644 include/uapi/linux/surface_aggregator/cdev.h
|
|
create mode 100644 include/uapi/linux/surface_aggregator/dtx.h
|
|
|
|
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
|
|
index 6d9f2f9fe20e..e36fbb60c676 100644
|
|
--- a/Documentation/driver-api/index.rst
|
|
+++ b/Documentation/driver-api/index.rst
|
|
@@ -53,6 +53,7 @@ available subsections can be seen below.
|
|
slimbus
|
|
soundwire/index
|
|
fpga/index
|
|
+ surface_aggregator/index
|
|
|
|
.. only:: subproject and html
|
|
|
|
diff --git a/Documentation/driver-api/surface_aggregator/client-api.rst b/Documentation/driver-api/surface_aggregator/client-api.rst
|
|
new file mode 100644
|
|
index 000000000000..a1117d57036a
|
|
--- /dev/null
|
|
+++ b/Documentation/driver-api/surface_aggregator/client-api.rst
|
|
@@ -0,0 +1,38 @@
|
|
+.. SPDX-License-Identifier: GPL-2.0+
|
|
+
|
|
+===============================
|
|
+Client Driver API Documentation
|
|
+===============================
|
|
+
|
|
+.. contents::
|
|
+ :depth: 2
|
|
+
|
|
+
|
|
+Serial Hub Communication
|
|
+========================
|
|
+
|
|
+.. kernel-doc:: include/linux/surface_aggregator/serial_hub.h
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/ssh_packet_layer.c
|
|
+ :export:
|
|
+
|
|
+
|
|
+Controller and Core Interface
|
|
+=============================
|
|
+
|
|
+.. kernel-doc:: include/linux/surface_aggregator/controller.h
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/controller.c
|
|
+ :export:
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/core.c
|
|
+ :export:
|
|
+
|
|
+
|
|
+Client Bus and Client Device API
|
|
+================================
|
|
+
|
|
+.. kernel-doc:: include/linux/surface_aggregator/device.h
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/bus.c
|
|
+ :export:
|
|
diff --git a/Documentation/driver-api/surface_aggregator/client.rst b/Documentation/driver-api/surface_aggregator/client.rst
|
|
new file mode 100644
|
|
index 000000000000..26d13085a117
|
|
--- /dev/null
|
|
+++ b/Documentation/driver-api/surface_aggregator/client.rst
|
|
@@ -0,0 +1,393 @@
|
|
+.. SPDX-License-Identifier: GPL-2.0+
|
|
+
|
|
+.. |ssam_controller| replace:: :c:type:`struct ssam_controller <ssam_controller>`
|
|
+.. |ssam_device| replace:: :c:type:`struct ssam_device <ssam_device>`
|
|
+.. |ssam_device_driver| replace:: :c:type:`struct ssam_device_driver <ssam_device_driver>`
|
|
+.. |ssam_client_bind| replace:: :c:func:`ssam_client_bind`
|
|
+.. |ssam_client_link| replace:: :c:func:`ssam_client_link`
|
|
+.. |ssam_get_controller| replace:: :c:func:`ssam_get_controller`
|
|
+.. |ssam_controller_get| replace:: :c:func:`ssam_controller_get`
|
|
+.. |ssam_controller_put| replace:: :c:func:`ssam_controller_put`
|
|
+.. |ssam_device_alloc| replace:: :c:func:`ssam_device_alloc`
|
|
+.. |ssam_device_add| replace:: :c:func:`ssam_device_add`
|
|
+.. |ssam_device_remove| replace:: :c:func:`ssam_device_remove`
|
|
+.. |ssam_device_driver_register| replace:: :c:func:`ssam_device_driver_register`
|
|
+.. |ssam_device_driver_unregister| replace:: :c:func:`ssam_device_driver_unregister`
|
|
+.. |module_ssam_device_driver| replace:: :c:func:`module_ssam_device_driver`
|
|
+.. |SSAM_DEVICE| replace:: :c:func:`SSAM_DEVICE`
|
|
+.. |ssam_notifier_register| replace:: :c:func:`ssam_notifier_register`
|
|
+.. |ssam_notifier_unregister| replace:: :c:func:`ssam_notifier_unregister`
|
|
+.. |ssam_request_sync| replace:: :c:func:`ssam_request_sync`
|
|
+.. |ssam_event_mask| replace:: :c:type:`enum ssam_event_mask <ssam_event_mask>`
|
|
+
|
|
+
|
|
+======================
|
|
+Writing Client Drivers
|
|
+======================
|
|
+
|
|
+For the API documentation, refer to:
|
|
+
|
|
+.. toctree::
|
|
+ :maxdepth: 2
|
|
+
|
|
+ client-api
|
|
+
|
|
+
|
|
+Overview
|
|
+========
|
|
+
|
|
+Client drivers can be set up in two main ways, depending on how the
|
|
+corresponding device is made available to the system. We specifically
|
|
+differentiate between devices that are presented to the system via one of
|
|
+the conventional ways, e.g. as platform devices via ACPI, and devices that
|
|
+are non-discoverable and instead need to be explicitly provided by some
|
|
+other mechanism, as discussed further below.
|
|
+
|
|
+
|
|
+Non-SSAM Client Drivers
|
|
+=======================
|
|
+
|
|
+All communication with the SAM EC is handled via the |ssam_controller|
|
|
+representing that EC to the kernel. Drivers targeting a non-SSAM device (and
|
|
+thus not being a |ssam_device_driver|) need to explicitly establish a
|
|
+connection/relation to that controller. This can be done via the
|
|
+|ssam_client_bind| function. Said function returns a reference to the SSAM
|
|
+controller, but, more importantly, also establishes a device link between
|
|
+client device and controller (this can also be done separate via
|
|
+|ssam_client_link|). It is important to do this, as it, first, guarantees
|
|
+that the returned controller is valid for use in the client driver for as
|
|
+long as this driver is bound to its device, i.e. that the driver gets
|
|
+unbound before the controller ever becomes invalid, and, second, as it
|
|
+ensures correct suspend/resume ordering. This setup should be done in the
|
|
+driver's probe function, and may be used to defer probing in case the SSAM
|
|
+subsystem is not ready yet, for example:
|
|
+
|
|
+.. code-block:: c
|
|
+
|
|
+ static int client_driver_probe(struct platform_device *pdev)
|
|
+ {
|
|
+ struct ssam_controller *ctrl;
|
|
+
|
|
+ ctrl = ssam_client_bind(&pdev->dev);
|
|
+ if (IS_ERR(ctrl))
|
|
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
|
|
+
|
|
+ // ...
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+The controller may be separately obtained via |ssam_get_controller| and its
|
|
+lifetime be guaranteed via |ssam_controller_get| and |ssam_controller_put|.
|
|
+Note that none of these functions, however, guarantee that the controller
|
|
+will not be shut down or suspended. These functions essentially only operate
|
|
+on the reference, i.e. only guarantee a bare minimum of accessibility
|
|
+without any guarantees at all on practical operability.
|
|
+
|
|
+
|
|
+Adding SSAM Devices
|
|
+===================
|
|
+
|
|
+If a device does not already exist/is not already provided via conventional
|
|
+means, it should be provided as |ssam_device| via the SSAM client device
|
|
+hub. New devices can be added to this hub by entering their UID into the
|
|
+corresponding registry. SSAM devices can also be manually allocated via
|
|
+|ssam_device_alloc|, subsequently to which they have to be added via
|
|
+|ssam_device_add| and eventually removed via |ssam_device_remove|. By
|
|
+default, the parent of the device is set to the controller device provided
|
|
+for allocation, however this may be changed before the device is added. Note
|
|
+that, when changing the parent device, care must be taken to ensure that the
|
|
+controller lifetime and suspend/resume ordering guarantees, in the default
|
|
+setup provided through the parent-child relation, are preserved. If
|
|
+necessary, by use of |ssam_client_link| as is done for non-SSAM client
|
|
+drivers and described in more detail above.
|
|
+
|
|
+A client device must always be removed by the party which added the
|
|
+respective device before the controller shuts down. Such removal can be
|
|
+guaranteed by linking the driver providing the SSAM device to the controller
|
|
+via |ssam_client_link|, causing it to unbind before the controller driver
|
|
+unbinds. Client devices registered with the controller as parent are
|
|
+automatically removed when the controller shuts down, but this should not be
|
|
+relied upon, especially as this does not extend to client devices with a
|
|
+different parent.
|
|
+
|
|
+
|
|
+SSAM Client Drivers
|
|
+===================
|
|
+
|
|
+SSAM client device drivers are, in essence, no different than other device
|
|
+driver types. They are represented via |ssam_device_driver| and bind to a
|
|
+|ssam_device| via its UID (:c:type:`struct ssam_device.uid <ssam_device>`)
|
|
+member and the match table
|
|
+(:c:type:`struct ssam_device_driver.match_table <ssam_device_driver>`),
|
|
+which should be set when declaring the driver struct instance. Refer to the
|
|
+|SSAM_DEVICE| macro documentation for more details on how to define members
|
|
+of the driver's match table.
|
|
+
|
|
+The UID for SSAM client devices consists of a ``domain``, a ``category``,
|
|
+a ``target``, an ``instance``, and a ``function``. The ``domain`` is used
|
|
+differentiate between physical SAM devices
|
|
+(:c:type:`SSAM_DOMAIN_SERIALHUB <ssam_device_domain>`), i.e. devices that can
|
|
+be accessed via the Surface Serial Hub, and virtual ones
|
|
+(:c:type:`SSAM_DOMAIN_VIRTUAL <ssam_device_domain>`), such as client-device
|
|
+hubs, that have no real representation on the SAM EC and are solely used on
|
|
+the kernel/driver-side. For physical devices, ``category`` represents the
|
|
+target category, ``target`` the target ID, and ``instance`` the instance ID
|
|
+used to access the physical SAM device. In addition, ``function`` references
|
|
+a specific device functionality, but has no meaning to the SAM EC. The
|
|
+(default) name of a client device is generated based on its UID.
|
|
+
|
|
+A driver instance can be registered via |ssam_device_driver_register| and
|
|
+unregistered via |ssam_device_driver_unregister|. For convenience, the
|
|
+|module_ssam_device_driver| macro may be used to define module init- and
|
|
+exit-functions registering the driver.
|
|
+
|
|
+The controller associated with a SSAM client device can be found in its
|
|
+:c:type:`struct ssam_device.ctrl <ssam_device>` member. This reference is
|
|
+guaranteed to be valid for at least as long as the client driver is bound,
|
|
+but should also be valid for as long as the client device exists. Note,
|
|
+however, that access outside of the bound client driver must ensure that the
|
|
+controller device is not suspended while making any requests or
|
|
+(un-)registering event notifiers (and thus should generally be avoided). This
|
|
+is guaranteed when the controller is accessed from inside the bound client
|
|
+driver.
|
|
+
|
|
+
|
|
+Making Synchronous Requests
|
|
+===========================
|
|
+
|
|
+Synchronous requests are (currently) the main form of host-initiated
|
|
+communication with the EC. There are a couple of ways to define and execute
|
|
+such requests, however, most of them boil down to something similar as shown
|
|
+in the example below. This example defines a write-read request, meaning
|
|
+that the caller provides an argument to the SAM EC and receives a response.
|
|
+The caller needs to know the (maximum) length of the response payload and
|
|
+provide a buffer for it.
|
|
+
|
|
+Care must be taken to ensure that any command payload data passed to the SAM
|
|
+EC is provided in little-endian format and, similarly, any response payload
|
|
+data received from it is converted from little-endian to host endianness.
|
|
+
|
|
+.. code-block:: c
|
|
+
|
|
+ int perform_request(struct ssam_controller *ctrl, u32 arg, u32 *ret)
|
|
+ {
|
|
+ struct ssam_request rqst;
|
|
+ struct ssam_response resp;
|
|
+ int status;
|
|
+
|
|
+ /* Convert request argument to little-endian. */
|
|
+ __le32 arg_le = cpu_to_le32(arg);
|
|
+ __le32 ret_le = cpu_to_le32(0);
|
|
+
|
|
+ /*
|
|
+ * Initialize request specification. Replace this with your values.
|
|
+ * The rqst.payload field may be NULL if rqst.length is zero,
|
|
+ * indicating that the request does not have any argument.
|
|
+ *
|
|
+ * Note: The request parameters used here are not valid, i.e.
|
|
+ * they do not correspond to an actual SAM/EC request.
|
|
+ */
|
|
+ rqst.target_category = SSAM_SSH_TC_SAM;
|
|
+ rqst.target_id = 0x01;
|
|
+ rqst.command_id = 0x02;
|
|
+ rqst.instance_id = 0x03;
|
|
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
|
|
+ rqst.length = sizeof(arg_le);
|
|
+ rqst.payload = (u8 *)&arg_le;
|
|
+
|
|
+ /* Initialize request response. */
|
|
+ resp.capacity = sizeof(ret_le);
|
|
+ resp.length = 0;
|
|
+ resp.pointer = (u8 *)&ret_le;
|
|
+
|
|
+ /*
|
|
+ * Perform actual request. The response pointer may be null in case
|
|
+ * the request does not have any response. This must be consistent
|
|
+ * with the SSAM_REQUEST_HAS_RESPONSE flag set in the specification
|
|
+ * above.
|
|
+ */
|
|
+ status = ssam_request_sync(ctrl, &rqst, &resp);
|
|
+
|
|
+ /*
|
|
+ * Alternatively use
|
|
+ *
|
|
+ * ssam_request_sync_onstack(ctrl, &rqst, &resp, sizeof(arg_le));
|
|
+ *
|
|
+ * to perform the request, allocating the message buffer directly
|
|
+ * on the stack as opposed to allocation via kzalloc().
|
|
+ */
|
|
+
|
|
+ /*
|
|
+ * Convert request response back to native format. Note that in the
|
|
+ * error case, this value is not touched by the SSAM core, i.e.
|
|
+ * 'ret_le' will be zero as specified in its initialization.
|
|
+ */
|
|
+ *ret = le32_to_cpu(ret_le);
|
|
+
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+Note that |ssam_request_sync| in its essence is a wrapper over lower-level
|
|
+request primitives, which may also be used to perform requests. Refer to its
|
|
+implementation and documentation for more details.
|
|
+
|
|
+An arguably more user-friendly way of defining such functions is by using
|
|
+one of the generator macros, for example via:
|
|
+
|
|
+.. code-block:: c
|
|
+
|
|
+ SSAM_DEFINE_SYNC_REQUEST_W(__ssam_tmp_perf_mode_set, __le32, {
|
|
+ .target_category = SSAM_SSH_TC_TMP,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x03,
|
|
+ .instance_id = 0x00,
|
|
+ });
|
|
+
|
|
+This example defines a function
|
|
+
|
|
+.. code-block:: c
|
|
+
|
|
+ int __ssam_tmp_perf_mode_set(struct ssam_controller *ctrl, const __le32 *arg);
|
|
+
|
|
+executing the specified request, with the controller passed in when calling
|
|
+said function. In this example, the argument is provided via the ``arg``
|
|
+pointer. Note that the generated function allocates the message buffer on
|
|
+the stack. Thus, if the argument provided via the request is large, these
|
|
+kinds of macros should be avoided. Also note that, in contrast to the
|
|
+previous non-macro example, this function does not do any endianness
|
|
+conversion, which has to be handled by the caller. Apart from those
|
|
+differences the function generated by the macro is similar to the one
|
|
+provided in the non-macro example above.
|
|
+
|
|
+The full list of such function-generating macros is
|
|
+
|
|
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_N` for requests without return value and
|
|
+ without argument.
|
|
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_R` for requests with return value but no
|
|
+ argument.
|
|
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_W` for requests without return value but
|
|
+ with argument.
|
|
+
|
|
+Refer to their respective documentation for more details. For each one of
|
|
+these macros, a special variant is provided, which targets request types
|
|
+applicable to multiple instances of the same device type:
|
|
+
|
|
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_MD_N`
|
|
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_MD_R`
|
|
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_MD_W`
|
|
+
|
|
+The difference of those macros to the previously mentioned versions is, that
|
|
+the device target and instance IDs are not fixed for the generated function,
|
|
+but instead have to be provided by the caller of said function.
|
|
+
|
|
+Additionally, variants for direct use with client devices, i.e.
|
|
+|ssam_device|, are also provided. These can, for example, be used as
|
|
+follows:
|
|
+
|
|
+.. code-block:: c
|
|
+
|
|
+ SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_sta, __le32, {
|
|
+ .target_category = SSAM_SSH_TC_BAT,
|
|
+ .command_id = 0x01,
|
|
+ });
|
|
+
|
|
+This invocation of the macro defines a function
|
|
+
|
|
+.. code-block:: c
|
|
+
|
|
+ int ssam_bat_get_sta(struct ssam_device *sdev, __le32 *ret);
|
|
+
|
|
+executing the specified request, using the device IDs and controller given
|
|
+in the client device. The full list of such macros for client devices is:
|
|
+
|
|
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_CL_N`
|
|
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_CL_R`
|
|
+- :c:func:`SSAM_DEFINE_SYNC_REQUEST_CL_W`
|
|
+
|
|
+
|
|
+Handling Events
|
|
+===============
|
|
+
|
|
+To receive events from the SAM EC, an event notifier must be registered for
|
|
+the desired event via |ssam_notifier_register|. The notifier must be
|
|
+unregistered via |ssam_notifier_unregister| once it is not required any
|
|
+more.
|
|
+
|
|
+Event notifiers are registered by providing (at minimum) a callback to call
|
|
+in case an event has been received, the registry specifying how the event
|
|
+should be enabled, an event ID specifying for which target category and,
|
|
+optionally and depending on the registry used, for which instance ID events
|
|
+should be enabled, and finally, flags describing how the EC will send these
|
|
+events. If the specific registry does not enable events by instance ID, the
|
|
+instance ID must be set to zero. Additionally, a priority for the respective
|
|
+notifier may be specified, which determines its order in relation to any
|
|
+other notifier registered for the same target category.
|
|
+
|
|
+By default, event notifiers will receive all events for the specific target
|
|
+category, regardless of the instance ID specified when registering the
|
|
+notifier. The core may be instructed to only call a notifier if the target
|
|
+ID or instance ID (or both) of the event match the ones implied by the
|
|
+notifier IDs (in case of target ID, the target ID of the registry), by
|
|
+providing an event mask (see |ssam_event_mask|).
|
|
+
|
|
+In general, the target ID of the registry is also the target ID of the
|
|
+enabled event (with the notable exception being keyboard input events on the
|
|
+Surface Laptop 1 and 2, which are enabled via a registry with target ID 1,
|
|
+but provide events with target ID 2).
|
|
+
|
|
+A full example for registering an event notifier and handling received
|
|
+events is provided below:
|
|
+
|
|
+.. code-block:: c
|
|
+
|
|
+ u32 notifier_callback(struct ssam_event_notifier *nf,
|
|
+ const struct ssam_event *event)
|
|
+ {
|
|
+ int status = ...
|
|
+
|
|
+ /* Handle the event here ... */
|
|
+
|
|
+ /* Convert return value and indicate that we handled the event. */
|
|
+ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
|
|
+ }
|
|
+
|
|
+ int setup_notifier(struct ssam_device *sdev,
|
|
+ struct ssam_event_notifier *nf)
|
|
+ {
|
|
+ /* Set priority wrt. other handlers of same target category. */
|
|
+ nf->base.priority = 1;
|
|
+
|
|
+ /* Set event/notifier callback. */
|
|
+ nf->base.fn = notifier_callback;
|
|
+
|
|
+ /* Specify event registry, i.e. how events get enabled/disabled. */
|
|
+ nf->event.reg = SSAM_EVENT_REGISTRY_KIP;
|
|
+
|
|
+ /* Specify which event to enable/disable */
|
|
+ nf->event.id.target_category = sdev->uid.category;
|
|
+ nf->event.id.instance = sdev->uid.instance;
|
|
+
|
|
+ /*
|
|
+ * Specify for which events the notifier callback gets executed.
|
|
+ * This essentially tells the core if it can skip notifiers that
|
|
+ * don't have target or instance IDs matching those of the event.
|
|
+ */
|
|
+ nf->event.mask = SSAM_EVENT_MASK_STRICT;
|
|
+
|
|
+ /* Specify event flags. */
|
|
+ nf->event.flags = SSAM_EVENT_SEQUENCED;
|
|
+
|
|
+ return ssam_notifier_register(sdev->ctrl, nf);
|
|
+ }
|
|
+
|
|
+Multiple event notifiers can be registered for the same event. The event
|
|
+handler core takes care of enabling and disabling events when notifiers are
|
|
+registered and unregistered, by keeping track of how many notifiers for a
|
|
+specific event (combination of registry, event target category, and event
|
|
+instance ID) are currently registered. This means that a specific event will
|
|
+be enabled when the first notifier for it is being registered and disabled
|
|
+when the last notifier for it is being unregistered. Note that the event
|
|
+flags are therefore only used on the first registered notifier, however, one
|
|
+should take care that notifiers for a specific event are always registered
|
|
+with the same flag and it is considered a bug to do otherwise.
|
|
diff --git a/Documentation/driver-api/surface_aggregator/clients/cdev.rst b/Documentation/driver-api/surface_aggregator/clients/cdev.rst
|
|
new file mode 100644
|
|
index 000000000000..0134a841a079
|
|
--- /dev/null
|
|
+++ b/Documentation/driver-api/surface_aggregator/clients/cdev.rst
|
|
@@ -0,0 +1,204 @@
|
|
+.. SPDX-License-Identifier: GPL-2.0+
|
|
+
|
|
+.. |ssam_cdev_request| replace:: :c:type:`struct ssam_cdev_request <ssam_cdev_request>`
|
|
+.. |ssam_cdev_request_flags| replace:: :c:type:`enum ssam_cdev_request_flags <ssam_cdev_request_flags>`
|
|
+.. |ssam_cdev_event| replace:: :c:type:`struct ssam_cdev_event <ssam_cdev_event>`
|
|
+
|
|
+==============================
|
|
+User-Space EC Interface (cdev)
|
|
+==============================
|
|
+
|
|
+The ``surface_aggregator_cdev`` module provides a misc-device for the SSAM
|
|
+controller to allow for a (more or less) direct connection from user-space to
|
|
+the SAM EC. It is intended to be used for development and debugging, and
|
|
+therefore should not be used or relied upon in any other way. Note that this
|
|
+module is not loaded automatically, but instead must be loaded manually.
|
|
+
|
|
+The provided interface is accessible through the ``/dev/surface/aggregator``
|
|
+device-file. All functionality of this interface is provided via IOCTLs.
|
|
+These IOCTLs and their respective input/output parameter structs are defined in
|
|
+``include/uapi/linux/surface_aggregator/cdev.h``.
|
|
+
|
|
+A small python library and scripts for accessing this interface can be found
|
|
+at https://github.com/linux-surface/surface-aggregator-module/tree/master/scripts/ssam.
|
|
+
|
|
+.. contents::
|
|
+
|
|
+
|
|
+Receiving Events
|
|
+================
|
|
+
|
|
+Events can be received by reading from the device-file. The are represented by
|
|
+the |ssam_cdev_event| datatype.
|
|
+
|
|
+Before events are available to be read, however, the desired notifiers must be
|
|
+registered via the ``SSAM_CDEV_NOTIF_REGISTER`` IOCTL. Notifiers are, in
|
|
+essence, callbacks, called when the EC sends an event. They are, in this
|
|
+interface, associated with a specific target category and device-file-instance.
|
|
+They forward any event of this category to the buffer of the corresponding
|
|
+instance, from which it can then be read.
|
|
+
|
|
+Notifiers themselves do not enable events on the EC. Thus, it may additionally
|
|
+be necessary to enable events via the ``SSAM_CDEV_EVENT_ENABLE`` IOCTL. While
|
|
+notifiers work per-client (i.e. per-device-file-instance), events are enabled
|
|
+globally, for the EC and all of its clients (regardless of userspace or
|
|
+non-userspace). The ``SSAM_CDEV_EVENT_ENABLE`` and ``SSAM_CDEV_EVENT_DISABLE``
|
|
+IOCTLs take care of reference counting the events, such that an event is
|
|
+enabled as long as there is a client that has requested it.
|
|
+
|
|
+Note that enabled events are not automatically disabled once the client
|
|
+instance is closed. Therefore any client process (or group of processes) should
|
|
+balance their event enable calls with the corresponding event disable calls. It
|
|
+is, however, perfectly valid to enable and disable events on different client
|
|
+instances. For example, it is valid to set up notifiers and read events on
|
|
+client instance ``A``, enable those events on instance ``B`` (note that these
|
|
+will also be received by A since events are enabled/disabled globally), and
|
|
+after no more events are desired, disable the previously enabled events via
|
|
+instance ``C``.
|
|
+
|
|
+
|
|
+Controller IOCTLs
|
|
+=================
|
|
+
|
|
+The following IOCTLs are provided:
|
|
+
|
|
+.. flat-table:: Controller IOCTLs
|
|
+ :widths: 1 1 1 1 4
|
|
+ :header-rows: 1
|
|
+
|
|
+ * - Type
|
|
+ - Number
|
|
+ - Direction
|
|
+ - Name
|
|
+ - Description
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``1``
|
|
+ - ``WR``
|
|
+ - ``REQUEST``
|
|
+ - Perform synchronous SAM request.
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``2``
|
|
+ - ``W``
|
|
+ - ``NOTIF_REGISTER``
|
|
+ - Register event notifier.
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``3``
|
|
+ - ``W``
|
|
+ - ``NOTIF_UNREGISTER``
|
|
+ - Unregister event notifier.
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``4``
|
|
+ - ``W``
|
|
+ - ``EVENT_ENABLE``
|
|
+ - Enable event source.
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``5``
|
|
+ - ``W``
|
|
+ - ``EVENT_DISABLE``
|
|
+ - Disable event source.
|
|
+
|
|
+
|
|
+``SSAM_CDEV_REQUEST``
|
|
+---------------------
|
|
+
|
|
+Defined as ``_IOWR(0xA5, 1, struct ssam_cdev_request)``.
|
|
+
|
|
+Executes a synchronous SAM request. The request specification is passed in
|
|
+as argument of type |ssam_cdev_request|, which is then written to/modified
|
|
+by the IOCTL to return status and result of the request.
|
|
+
|
|
+Request payload data must be allocated separately and is passed in via the
|
|
+``payload.data`` and ``payload.length`` members. If a response is required,
|
|
+the response buffer must be allocated by the caller and passed in via the
|
|
+``response.data`` member. The ``response.length`` member must be set to the
|
|
+capacity of this buffer, or if no response is required, zero. Upon
|
|
+completion of the request, the call will write the response to the response
|
|
+buffer (if its capacity allows it) and overwrite the length field with the
|
|
+actual size of the response, in bytes.
|
|
+
|
|
+Additionally, if the request has a response, this must be indicated via the
|
|
+request flags, as is done with in-kernel requests. Request flags can be set
|
|
+via the ``flags`` member and the values correspond to the values found in
|
|
+|ssam_cdev_request_flags|.
|
|
+
|
|
+Finally, the status of the request itself is returned in the ``status``
|
|
+member (a negative errno value indicating failure). Note that failure
|
|
+indication of the IOCTL is separated from failure indication of the request:
|
|
+The IOCTL returns a negative status code if anything failed during setup of
|
|
+the request (``-EFAULT``) or if the provided argument or any of its fields
|
|
+are invalid (``-EINVAL``). In this case, the status value of the request
|
|
+argument may be set, providing more detail on what went wrong (e.g.
|
|
+``-ENOMEM`` for out-of-memory), but this value may also be zero. The IOCTL
|
|
+will return with a zero status code in case the request has been set up,
|
|
+submitted, and completed (i.e. handed back to user-space) successfully from
|
|
+inside the IOCTL, but the request ``status`` member may still be negative in
|
|
+case the actual execution of the request failed after it has been submitted.
|
|
+
|
|
+A full definition of the argument struct is provided below.
|
|
+
|
|
+``SSAM_CDEV_NOTIF_REGISTER``
|
|
+----------------------------
|
|
+
|
|
+Defined as ``_IOW(0xA5, 2, struct ssam_cdev_notifier_desc)``.
|
|
+
|
|
+Register a notifier for the event target category specified in the given
|
|
+notifier description with the specified priority. Notifiers registration is
|
|
+required to receive events, but does not enable events themselves. After a
|
|
+notifier for a specific target category has been registered, all events of that
|
|
+category will be forwarded to the userspace client and can then be read from
|
|
+the device file instance. Note that events may have to be enabled, e.g. via the
|
|
+``SSAM_CDEV_EVENT_ENABLE`` IOCTL, before the EC will send them.
|
|
+
|
|
+Only one notifier can be registered per target category and client instance. If
|
|
+a notifier has already been registered, this IOCTL will fail with ``-EEXIST``.
|
|
+
|
|
+Notifiers will automatically be removed when the device file instance is
|
|
+closed.
|
|
+
|
|
+``SSAM_CDEV_NOTIF_UNREGISTER``
|
|
+------------------------------
|
|
+
|
|
+Defined as ``_IOW(0xA5, 3, struct ssam_cdev_notifier_desc)``.
|
|
+
|
|
+Unregisters the notifier associated with the specified target category. The
|
|
+priority field will be ignored by this IOCTL. If no notifier has been
|
|
+registered for this client instance and the given category, this IOCTL will
|
|
+fail with ``-ENOENT``.
|
|
+
|
|
+``SSAM_CDEV_EVENT_ENABLE``
|
|
+--------------------------
|
|
+
|
|
+Defined as ``_IOW(0xA5, 4, struct ssam_cdev_event_desc)``.
|
|
+
|
|
+Enable the event associated with the given event descriptor.
|
|
+
|
|
+Note that this call will not register a notifier itself, it will only enable
|
|
+events on the controller. If you want to receive events by reading from the
|
|
+device file, you will need to register the corresponding notifier(s) on that
|
|
+instance.
|
|
+
|
|
+Events are not automatically disabled when the device file is closed. This must
|
|
+be done manually, via a call to the ``SSAM_CDEV_EVENT_DISABLE`` IOCTL.
|
|
+
|
|
+``SSAM_CDEV_EVENT_DISABLE``
|
|
+---------------------------
|
|
+
|
|
+Defined as ``_IOW(0xA5, 5, struct ssam_cdev_event_desc)``.
|
|
+
|
|
+Disable the event associated with the given event descriptor.
|
|
+
|
|
+Note that this will not unregister any notifiers. Events may still be received
|
|
+and forwarded to user-space after this call. The only safe way of stopping
|
|
+events from being received is unregistering all previously registered
|
|
+notifiers.
|
|
+
|
|
+
|
|
+Structures and Enums
|
|
+====================
|
|
+
|
|
+.. kernel-doc:: include/uapi/linux/surface_aggregator/cdev.h
|
|
diff --git a/Documentation/driver-api/surface_aggregator/clients/dtx.rst b/Documentation/driver-api/surface_aggregator/clients/dtx.rst
|
|
new file mode 100644
|
|
index 000000000000..e974c2b04e9f
|
|
--- /dev/null
|
|
+++ b/Documentation/driver-api/surface_aggregator/clients/dtx.rst
|
|
@@ -0,0 +1,712 @@
|
|
+.. SPDX-License-Identifier: GPL-2.0+
|
|
+
|
|
+.. |__u16| replace:: :c:type:`__u16 <__u16>`
|
|
+.. |sdtx_event| replace:: :c:type:`struct sdtx_event <sdtx_event>`
|
|
+.. |sdtx_event_code| replace:: :c:type:`enum sdtx_event_code <sdtx_event_code>`
|
|
+.. |sdtx_base_info| replace:: :c:type:`struct sdtx_base_info <sdtx_base_info>`
|
|
+.. |sdtx_device_mode| replace:: :c:type:`struct sdtx_device_mode <sdtx_device_mode>`
|
|
+
|
|
+======================================================
|
|
+User-Space DTX (Clipboard Detachment System) Interface
|
|
+======================================================
|
|
+
|
|
+The ``surface_dtx`` driver is responsible for proper clipboard detachment
|
|
+and re-attachment handling. To this end, it provides the ``/dev/surface/dtx``
|
|
+device file, through which it can interface with a user-space daemon. This
|
|
+daemon is then ultimately responsible for determining and taking necessary
|
|
+actions, such as unmounting devices attached to the base,
|
|
+unloading/reloading the graphics-driver, user-notifications, etc.
|
|
+
|
|
+There are two basic communication principles used in this driver: Commands
|
|
+(in other parts of the documentation also referred to as requests) and
|
|
+events. Commands are sent to the EC and may have a different implications in
|
|
+different contexts. Events are sent by the EC upon some internal state
|
|
+change. Commands are always driver-initiated, whereas events are always
|
|
+initiated by the EC.
|
|
+
|
|
+.. contents::
|
|
+
|
|
+Nomenclature
|
|
+============
|
|
+
|
|
+* **Clipboard:**
|
|
+ The detachable upper part of the Surface Book, housing the screen and CPU.
|
|
+
|
|
+* **Base:**
|
|
+ The lower part of the Surface Book from which the clipboard can be
|
|
+ detached, optionally (model dependent) housing the discrete GPU (dGPU).
|
|
+
|
|
+* **Latch:**
|
|
+ The mechanism keeping the clipboard attached to the base in normal
|
|
+ operation and allowing it to be detached when requested.
|
|
+
|
|
+* **Silently ignored commands:**
|
|
+ The command is accepted by the EC as a valid command and acknowledged
|
|
+ (following the standard communication protocol), but the EC does not act
|
|
+ upon it, i.e. ignores it.e upper part of the
|
|
+
|
|
+
|
|
+Detachment Process
|
|
+==================
|
|
+
|
|
+Warning: This part of the documentation is based on reverse engineering and
|
|
+testing and thus may contain errors or be incomplete.
|
|
+
|
|
+Latch States
|
|
+------------
|
|
+
|
|
+The latch mechanism has two major states: *open* and *closed*. In the
|
|
+*closed* state (default), the clipboard is secured to the base, whereas in
|
|
+the *open* state, the clipboard can be removed by a user.
|
|
+
|
|
+The latch can additionally be locked and, correspondingly, unlocked, which
|
|
+can influence the detachment procedure. Specifically, this locking mechanism
|
|
+is intended to prevent the the dGPU, positioned in the base of the device,
|
|
+from being hot-unplugged while in use. More details can be found in the
|
|
+documentation for the detachment procedure below. By default, the latch is
|
|
+unlocked.
|
|
+
|
|
+Detachment Procedure
|
|
+--------------------
|
|
+
|
|
+Note that the detachment process is governed fully by the EC. The
|
|
+``surface_dtx`` driver only relays events from the EC to user-space and
|
|
+commands from user-space to the EC, i.e. it does not influence this process.
|
|
+
|
|
+The detachment process is started with the user pressing the *detach* button
|
|
+on the base of the device or executing the ``SDTX_IOCTL_LATCH_REQUEST`` IOCTL.
|
|
+Following that:
|
|
+
|
|
+1. The EC turns on the indicator led on the detach-button, sends a
|
|
+ *detach-request* event (``SDTX_EVENT_REQUEST``), and awaits further
|
|
+ instructions/commands. In case the latch is unlocked, the led will flash
|
|
+ green. If the latch has been locked, the led will be solid red
|
|
+
|
|
+2. The event is, via the ``surface_dtx`` driver, relayed to user-space, where
|
|
+ an appropriate user-space daemon can handle it and send instructions back
|
|
+ to the EC via IOCTLs provided by this driver.
|
|
+
|
|
+3. The EC waits for instructions from user-space and acts according to them.
|
|
+ If the EC does not receive any instructions in a given period, it will
|
|
+ time out and continue as follows:
|
|
+
|
|
+ - If the latch is unlocked, the EC will open the latch and the clipboard
|
|
+ can be detached from the base. This is the exact behavior as without
|
|
+ this driver or any user-space daemon. See the ``SDTX_IOCTL_LATCH_CONFIRM``
|
|
+ description below for more details on the follow-up behavior of the EC.
|
|
+
|
|
+ - If the latch is locked, the EC will *not* open the latch, meaning the
|
|
+ clipboard cannot be detached from the base. Furthermore, the EC sends
|
|
+ an cancel event (``SDTX_EVENT_CANCEL``) detailing this with the cancel
|
|
+ reason ``SDTX_DETACH_TIMEDOUT`` (see :ref:`events` for details).
|
|
+
|
|
+Valid responses by a user-space daemon to a detachment request event are:
|
|
+
|
|
+- Execute ``SDTX_IOCTL_LATCH_REQUEST``. This will immediately abort the
|
|
+ detachment process. Furthermore, the EC will send a detach-request event,
|
|
+ similar to the user pressing the detach-button to cancel said process (see
|
|
+ below).
|
|
+
|
|
+- Execute ``SDTX_IOCTL_LATCH_CONFIRM``. This will cause the EC to open the
|
|
+ latch, after which the user can separate clipboard and base.
|
|
+
|
|
+ As this changes the latch state, a *latch-status* event
|
|
+ (``SDTX_EVENT_LATCH_STATUS``) will be sent once the latch has been opened
|
|
+ successfully. If the EC fails to open the latch, e.g. due to hardware
|
|
+ error or low battery, a latch-cancel event (``SDTX_EVENT_CANCEL``) will be
|
|
+ sent with the cancel reason indicating the specific failure.
|
|
+
|
|
+ If the latch is currently locked, the latch will automatically be
|
|
+ unlocked before it is opened.
|
|
+
|
|
+- Execute ``SDTX_IOCTL_LATCH_HEARTBEAT``. This will reset the internal timeout.
|
|
+ No other actions will be performed, i.e. the detachment process will neither
|
|
+ be completed nor canceled, and the EC will still be waiting for further
|
|
+ responses.
|
|
+
|
|
+- Execute ``SDTX_IOCTL_LATCH_CANCEL``. This will abort the detachment process,
|
|
+ similar to ``SDTX_IOCTL_LATCH_REQUEST``, described above, or the button
|
|
+ press, described below. A *generic request* event (``SDTX_EVENT_REQUEST``)
|
|
+ is send in response to this. In contrast to those, however, this command
|
|
+ does not trigger a new detachment process if none is currently in
|
|
+ progress.
|
|
+
|
|
+- Do nothing. The detachment process eventually times out as described in
|
|
+ point 3.
|
|
+
|
|
+See :ref:`ioctls` for more details on these responses.
|
|
+
|
|
+It is important to note that, if the user presses the detach button at any
|
|
+point when a detachment operation is in progress (i.e. after the the EC has
|
|
+sent the initial *detach-request* event (``SDTX_EVENT_REQUEST``) and before
|
|
+it received the corresponding response concluding the process), the
|
|
+detachment process is canceled on the EC-level and an identical event is
|
|
+being sent. Thus a *detach-request* event, by itself, does not signal the
|
|
+start of the detachment process.
|
|
+
|
|
+The detachment process may further be canceled by the EC due to hardware
|
|
+failures or a low clipboard battery. This is done via a cancel event
|
|
+(``SDTX_EVENT_CANCEL``) with the corresponding cancel reason.
|
|
+
|
|
+
|
|
+User-Space Interface Documentation
|
|
+==================================
|
|
+
|
|
+Error Codes and Status Values
|
|
+-----------------------------
|
|
+
|
|
+Error and status codes are divided into different categories, which can be
|
|
+used to determine if the status code is an error, and, if it is, the
|
|
+severity and type of that error. The current categories are:
|
|
+
|
|
+.. flat-table:: Overview of Status/Error Categories.
|
|
+ :widths: 2 1 3
|
|
+ :header-rows: 1
|
|
+
|
|
+ * - Name
|
|
+ - Value
|
|
+ - Short Description
|
|
+
|
|
+ * - ``STATUS``
|
|
+ - ``0x0000``
|
|
+ - Non-error status codes.
|
|
+
|
|
+ * - ``RUNTIME_ERROR``
|
|
+ - ``0x1000``
|
|
+ - Non-critical runtime errors.
|
|
+
|
|
+ * - ``HARDWARE_ERROR``
|
|
+ - ``0x2000``
|
|
+ - Critical hardware failures.
|
|
+
|
|
+ * - ``UNKNOWN``
|
|
+ - ``0xF000``
|
|
+ - Unknown error codes.
|
|
+
|
|
+Other categories are reserved for future use. The ``SDTX_CATEGORY()`` macro
|
|
+can be used to determine the category of any status value. The
|
|
+``SDTX_SUCCESS()`` macro can be used to check if the status value is a
|
|
+success value (``SDTX_CATEGORY_STATUS``) or if it indicates a failure.
|
|
+
|
|
+Unknown status or error codes sent by the EC are assigned to the ``UNKNOWN``
|
|
+category by the driver and may be implemented via their own code in the
|
|
+future.
|
|
+
|
|
+Currently used error codes are:
|
|
+
|
|
+.. flat-table:: Overview of Error Codes.
|
|
+ :widths: 2 1 1 3
|
|
+ :header-rows: 1
|
|
+
|
|
+ * - Name
|
|
+ - Category
|
|
+ - Value
|
|
+ - Short Description
|
|
+
|
|
+ * - ``SDTX_DETACH_NOT_FEASIBLE``
|
|
+ - ``RUNTIME``
|
|
+ - ``0x1001``
|
|
+ - Detachment not feasible due to low clipboard battery.
|
|
+
|
|
+ * - ``SDTX_DETACH_TIMEDOUT``
|
|
+ - ``RUNTIME``
|
|
+ - ``0x1002``
|
|
+ - Detachment process timed out while the latch was locked.
|
|
+
|
|
+ * - ``SDTX_ERR_FAILED_TO_OPEN``
|
|
+ - ``HARDWARE``
|
|
+ - ``0x2001``
|
|
+ - Failed to open latch.
|
|
+
|
|
+ * - ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``
|
|
+ - ``HARDWARE``
|
|
+ - ``0x2002``
|
|
+ - Failed to keep latch open.
|
|
+
|
|
+ * - ``SDTX_ERR_FAILED_TO_CLOSE``
|
|
+ - ``HARDWARE``
|
|
+ - ``0x2003``
|
|
+ - Failed to close latch.
|
|
+
|
|
+Other error codes are reserved for future use. Non-error status codes may
|
|
+overlap and are generally only unique within their use-case:
|
|
+
|
|
+.. flat-table:: Latch Status Codes.
|
|
+ :widths: 2 1 1 3
|
|
+ :header-rows: 1
|
|
+
|
|
+ * - Name
|
|
+ - Category
|
|
+ - Value
|
|
+ - Short Description
|
|
+
|
|
+ * - ``SDTX_LATCH_CLOSED``
|
|
+ - ``STATUS``
|
|
+ - ``0x0000``
|
|
+ - Latch is closed/has been closed.
|
|
+
|
|
+ * - ``SDTX_LATCH_OPENED``
|
|
+ - ``STATUS``
|
|
+ - ``0x0001``
|
|
+ - Latch is open/has been opened.
|
|
+
|
|
+.. flat-table:: Base State Codes.
|
|
+ :widths: 2 1 1 3
|
|
+ :header-rows: 1
|
|
+
|
|
+ * - Name
|
|
+ - Category
|
|
+ - Value
|
|
+ - Short Description
|
|
+
|
|
+ * - ``SDTX_BASE_DETACHED``
|
|
+ - ``STATUS``
|
|
+ - ``0x0000``
|
|
+ - Base has been detached/is not present.
|
|
+
|
|
+ * - ``SDTX_BASE_ATTACHED``
|
|
+ - ``STATUS``
|
|
+ - ``0x0001``
|
|
+ - Base has been attached/is present.
|
|
+
|
|
+Again, other codes are reserved for future use.
|
|
+
|
|
+.. _events:
|
|
+
|
|
+Events
|
|
+------
|
|
+
|
|
+Events can be received by reading from the device file. They are disabled by
|
|
+default and have to be enabled by executing ``SDTX_IOCTL_EVENTS_ENABLE``
|
|
+first. All events follow the layout prescribed by |sdtx_event|. Specific
|
|
+event types can be identified by their event code, described in
|
|
+|sdtx_event_code|. Note that other event codes are reserved for future use,
|
|
+thus an event parser must be able to handle any unknown/unsupported event
|
|
+types gracefully, by relying on the payload length given in the event header.
|
|
+
|
|
+Currently provided event types are:
|
|
+
|
|
+.. flat-table:: Overview of DTX events.
|
|
+ :widths: 2 1 1 3
|
|
+ :header-rows: 1
|
|
+
|
|
+ * - Name
|
|
+ - Code
|
|
+ - Payload
|
|
+ - Short Description
|
|
+
|
|
+ * - ``SDTX_EVENT_REQUEST``
|
|
+ - ``1``
|
|
+ - ``0`` bytes
|
|
+ - Detachment process initiated/aborted.
|
|
+
|
|
+ * - ``SDTX_EVENT_CANCEL``
|
|
+ - ``2``
|
|
+ - ``2`` bytes
|
|
+ - EC canceled detachment process.
|
|
+
|
|
+ * - ``SDTX_EVENT_BASE_CONNECTION``
|
|
+ - ``3``
|
|
+ - ``4`` bytes
|
|
+ - Base connection state changed.
|
|
+
|
|
+ * - ``SDTX_EVENT_LATCH_STATUS``
|
|
+ - ``4``
|
|
+ - ``2`` bytes
|
|
+ - Latch status changed.
|
|
+
|
|
+ * - ``SDTX_EVENT_DEVICE_MODE``
|
|
+ - ``5``
|
|
+ - ``2`` bytes
|
|
+ - Device mode changed.
|
|
+
|
|
+Individual events in more detail:
|
|
+
|
|
+``SDTX_EVENT_REQUEST``
|
|
+^^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Sent when a detachment process is started or, if in progress, aborted by the
|
|
+user, either via a detach button press or a detach request
|
|
+(``SDTX_IOCTL_LATCH_REQUEST``) being sent from user-space.
|
|
+
|
|
+Does not have any payload.
|
|
+
|
|
+``SDTX_EVENT_CANCEL``
|
|
+^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Sent when a detachment process is canceled by the EC due to unfulfilled
|
|
+preconditions (e.g. clipboard battery too low to detach) or hardware
|
|
+failure. The reason for cancellation is given in the event payload detailed
|
|
+below and can be one of
|
|
+
|
|
+* ``SDTX_DETACH_TIMEDOUT``: Detachment timed out while the latch was locked.
|
|
+ The latch has neither been opened nor unlocked.
|
|
+
|
|
+* ``SDTX_DETACH_NOT_FEASIBLE``: Detachment not feasible due to low clipboard
|
|
+ battery.
|
|
+
|
|
+* ``SDTX_ERR_FAILED_TO_OPEN``: Could not open the latch (hardware failure).
|
|
+
|
|
+* ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``: Could not keep the latch open (hardware
|
|
+ failure).
|
|
+
|
|
+* ``SDTX_ERR_FAILED_TO_CLOSE``: Could not close the latch (hardware failure).
|
|
+
|
|
+Other error codes in this context are reserved for future use.
|
|
+
|
|
+These codes can be classified via the ``SDTX_CATEGORY()`` macro to discern
|
|
+between critical hardware errors (``SDTX_CATEGORY_HARDWARE_ERROR``) or
|
|
+runtime errors (``SDTX_CATEGORY_RUNTIME_ERROR``), the latter of which may
|
|
+happen during normal operation if certain preconditions for detachment are
|
|
+not given.
|
|
+
|
|
+.. flat-table:: Detachment Cancel Event Payload
|
|
+ :widths: 1 1 4
|
|
+ :header-rows: 1
|
|
+
|
|
+ * - Field
|
|
+ - Type
|
|
+ - Description
|
|
+
|
|
+ * - ``reason``
|
|
+ - |__u16|
|
|
+ - Reason for cancellation.
|
|
+
|
|
+``SDTX_EVENT_BASE_CONNECTION``
|
|
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Sent when the base connection state has changed, i.e. when the base has been
|
|
+attached, detached, or detachment has become infeasible due to low clipboard
|
|
+battery. The new state and, if a base is connected, ID of the base is
|
|
+provided as payload of type |sdtx_base_info| with its layout presented
|
|
+below:
|
|
+
|
|
+.. flat-table:: Base-Connection-Change Event Payload
|
|
+ :widths: 1 1 4
|
|
+ :header-rows: 1
|
|
+
|
|
+ * - Field
|
|
+ - Type
|
|
+ - Description
|
|
+
|
|
+ * - ``state``
|
|
+ - |__u16|
|
|
+ - Base connection state.
|
|
+
|
|
+ * - ``base_id``
|
|
+ - |__u16|
|
|
+ - Type of base connected (zero if none).
|
|
+
|
|
+Possible values for ``state`` are:
|
|
+
|
|
+* ``SDTX_BASE_DETACHED``,
|
|
+* ``SDTX_BASE_ATTACHED``, and
|
|
+* ``SDTX_DETACH_NOT_FEASIBLE``.
|
|
+
|
|
+Other values are reserved for future use.
|
|
+
|
|
+``SDTX_EVENT_LATCH_STATUS``
|
|
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Sent when the latch status has changed, i.e. when the latch has been opened,
|
|
+closed, or an error occurred. The current status is provided as payload:
|
|
+
|
|
+.. flat-table:: Latch-Status-Change Event Payload
|
|
+ :widths: 1 1 4
|
|
+ :header-rows: 1
|
|
+
|
|
+ * - Field
|
|
+ - Type
|
|
+ - Description
|
|
+
|
|
+ * - ``status``
|
|
+ - |__u16|
|
|
+ - Latch status.
|
|
+
|
|
+Possible values for ``status`` are:
|
|
+
|
|
+* ``SDTX_LATCH_CLOSED``,
|
|
+* ``SDTX_LATCH_OPENED``,
|
|
+* ``SDTX_ERR_FAILED_TO_OPEN``,
|
|
+* ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``, and
|
|
+* ``SDTX_ERR_FAILED_TO_CLOSE``.
|
|
+
|
|
+Other values are reserved for future use.
|
|
+
|
|
+``SDTX_EVENT_DEVICE_MODE``
|
|
+^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Sent when the device mode has changed. The new device mode is provided as
|
|
+payload:
|
|
+
|
|
+.. flat-table:: Device-Mode-Change Event Payload
|
|
+ :widths: 1 1 4
|
|
+ :header-rows: 1
|
|
+
|
|
+ * - Field
|
|
+ - Type
|
|
+ - Description
|
|
+
|
|
+ * - ``mode``
|
|
+ - |__u16|
|
|
+ - Device operation mode.
|
|
+
|
|
+Possible values for ``mode`` are:
|
|
+
|
|
+* ``SDTX_DEVICE_MODE_TABLET``,
|
|
+* ``SDTX_DEVICE_MODE_LAPTOP``, and
|
|
+* ``SDTX_DEVICE_MODE_STUDIO``.
|
|
+
|
|
+Other values are reserved for future use.
|
|
+
|
|
+.. _ioctls:
|
|
+
|
|
+IOCTLs
|
|
+------
|
|
+
|
|
+The following IOCTLs are provided:
|
|
+
|
|
+.. flat-table:: Overview of DTX IOCTLs
|
|
+ :widths: 1 1 1 1 4
|
|
+ :header-rows: 1
|
|
+
|
|
+ * - Type
|
|
+ - Number
|
|
+ - Direction
|
|
+ - Name
|
|
+ - Description
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``0x21``
|
|
+ - ``-``
|
|
+ - ``EVENTS_ENABLE``
|
|
+ - Enable events for the current file descriptor.
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``0x22``
|
|
+ - ``-``
|
|
+ - ``EVENTS_DISABLE``
|
|
+ - Disable events for the current file descriptor.
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``0x23``
|
|
+ - ``-``
|
|
+ - ``LATCH_LOCK``
|
|
+ - Lock the latch.
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``0x24``
|
|
+ - ``-``
|
|
+ - ``LATCH_UNLOCK``
|
|
+ - Unlock the latch.
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``0x25``
|
|
+ - ``-``
|
|
+ - ``LATCH_REQUEST``
|
|
+ - Request clipboard detachment.
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``0x26``
|
|
+ - ``-``
|
|
+ - ``LATCH_CONFIRM``
|
|
+ - Confirm clipboard detachment request.
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``0x27``
|
|
+ - ``-``
|
|
+ - ``LATCH_HEARTBEAT``
|
|
+ - Send heartbeat signal to EC.
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``0x28``
|
|
+ - ``-``
|
|
+ - ``LATCH_CANCEL``
|
|
+ - Cancel detachment process.
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``0x29``
|
|
+ - ``R``
|
|
+ - ``GET_BASE_INFO``
|
|
+ - Get current base/connection information.
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``0x2A``
|
|
+ - ``R``
|
|
+ - ``GET_DEVICE_MODE``
|
|
+ - Get current device operation mode.
|
|
+
|
|
+ * - ``0xA5``
|
|
+ - ``0x2B``
|
|
+ - ``R``
|
|
+ - ``GET_LATCH_STATUS``
|
|
+ - Get current device latch status.
|
|
+
|
|
+``SDTX_IOCTL_EVENTS_ENABLE``
|
|
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Defined as ``_IO(0xA5, 0x22)``.
|
|
+
|
|
+Enable events for the current file descriptor. Events can be obtained by
|
|
+reading from the device, if enabled. Events are disabled by default.
|
|
+
|
|
+``SDTX_IOCTL_EVENTS_DISABLE``
|
|
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Defined as ``_IO(0xA5, 0x22)``.
|
|
+
|
|
+Disable events for the current file descriptor. Events can be obtained by
|
|
+reading from the device, if enabled. Events are disabled by default.
|
|
+
|
|
+``SDTX_IOCTL_LATCH_LOCK``
|
|
+^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Defined as ``_IO(0xA5, 0x23)``.
|
|
+
|
|
+Locks the latch, causing the detachment procedure to abort without opening
|
|
+the latch on timeout. The latch is unlocked by default. This command will be
|
|
+silently ignored if the latch is already locked.
|
|
+
|
|
+``SDTX_IOCTL_LATCH_UNLOCK``
|
|
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Defined as ``_IO(0xA5, 0x24)``.
|
|
+
|
|
+Unlocks the latch, causing the detachment procedure to open the latch on
|
|
+timeout. The latch is unlocked by default. This command will not open the
|
|
+latch when sent during an ongoing detachment process. It will be silently
|
|
+ignored if the latch is already unlocked.
|
|
+
|
|
+``SDTX_IOCTL_LATCH_REQUEST``
|
|
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Defined as ``_IO(0xA5, 0x25)``.
|
|
+
|
|
+Generic latch request. Behavior depends on the context: If no
|
|
+detachment-process is active, detachment is requested. Otherwise the
|
|
+currently active detachment-process will be aborted.
|
|
+
|
|
+If a detachment process is canceled by this operation, a generic detachment
|
|
+request event (``SDTX_EVENT_REQUEST``) will be sent.
|
|
+
|
|
+This essentially behaves the same as a detachment button press.
|
|
+
|
|
+``SDTX_IOCTL_LATCH_CONFIRM``
|
|
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Defined as ``_IO(0xA5, 0x26)``.
|
|
+
|
|
+Acknowledges and confirms a latch request. If sent during an ongoing
|
|
+detachment process, this command causes the latch to be opened immediately.
|
|
+The latch will also be opened if it has been locked. In this case, the latch
|
|
+lock is reset to the unlocked state.
|
|
+
|
|
+This command will be silently ignored if there is currently no detachment
|
|
+procedure in progress.
|
|
+
|
|
+``SDTX_IOCTL_LATCH_HEARTBEAT``
|
|
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Defined as ``_IO(0xA5, 0x27)``.
|
|
+
|
|
+Sends a heartbeat, essentially resetting the detachment timeout. This
|
|
+command can be used to keep the detachment process alive while work required
|
|
+for the detachment to succeed is still in progress.
|
|
+
|
|
+This command will be silently ignored if there is currently no detachment
|
|
+procedure in progress.
|
|
+
|
|
+``SDTX_IOCTL_LATCH_CANCEL``
|
|
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Defined as ``_IO(0xA5, 0x28)``.
|
|
+
|
|
+Cancels detachment in progress (if any). If a detachment process is canceled
|
|
+by this operation, a generic detachment request event
|
|
+(``SDTX_EVENT_REQUEST``) will be sent.
|
|
+
|
|
+This command will be silently ignored if there is currently no detachment
|
|
+procedure in progress.
|
|
+
|
|
+``SDTX_IOCTL_GET_BASE_INFO``
|
|
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Defined as ``_IOR(0xA5, 0x29, struct sdtx_base_info)``.
|
|
+
|
|
+Get the current base connection state (i.e. attached/detached) and the type
|
|
+of the base connected to the clipboard. This is command essentially provides
|
|
+a way to query the information provided by the base connection change event
|
|
+(``SDTX_EVENT_BASE_CONNECTION``).
|
|
+
|
|
+Possible values for ``struct sdtx_base_info.state`` are:
|
|
+
|
|
+* ``SDTX_BASE_DETACHED``,
|
|
+* ``SDTX_BASE_ATTACHED``, and
|
|
+* ``SDTX_DETACH_NOT_FEASIBLE``.
|
|
+
|
|
+Other values are reserved for future use.
|
|
+
|
|
+``SDTX_IOCTL_GET_DEVICE_MODE``
|
|
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Defined as ``_IOR(0xA5, 0x2A, __u16)``.
|
|
+
|
|
+Returns the device operation mode, indicating if and how the base is
|
|
+attached to the clipboard. This is command essentially provides a way to
|
|
+query the information provided by the device mode change event
|
|
+(``SDTX_EVENT_DEVICE_MODE``).
|
|
+
|
|
+Returned values are:
|
|
+
|
|
+* ``SDTX_DEVICE_MODE_LAPTOP``
|
|
+* ``SDTX_DEVICE_MODE_TABLET``
|
|
+* ``SDTX_DEVICE_MODE_STUDIO``
|
|
+
|
|
+See |sdtx_device_mode| for details. Other values are reserved for future
|
|
+use.
|
|
+
|
|
+
|
|
+``SDTX_IOCTL_GET_LATCH_STATUS``
|
|
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
+
|
|
+Defined as ``_IOR(0xA5, 0x2B, __u16)``.
|
|
+
|
|
+Get the current latch status or (presumably) the last error encountered when
|
|
+trying to open/close the latch. This is command essentially provides a way
|
|
+to query the information provided by the latch status change event
|
|
+(``SDTX_EVENT_LATCH_STATUS``).
|
|
+
|
|
+Returned values are:
|
|
+
|
|
+* ``SDTX_LATCH_CLOSED``,
|
|
+* ``SDTX_LATCH_OPENED``,
|
|
+* ``SDTX_ERR_FAILED_TO_OPEN``,
|
|
+* ``SDTX_ERR_FAILED_TO_REMAIN_OPEN``, and
|
|
+* ``SDTX_ERR_FAILED_TO_CLOSE``.
|
|
+
|
|
+Other values are reserved for future use.
|
|
+
|
|
+A Note on Base IDs
|
|
+------------------
|
|
+
|
|
+Base types/IDs provided via ``SDTX_EVENT_BASE_CONNECTION`` or
|
|
+``SDTX_IOCTL_GET_BASE_INFO`` are directly forwarded from from the EC in the
|
|
+lower byte of the combined |__u16| value, with the driver storing the EC
|
|
+type from which this ID comes in the high byte (without this, base IDs over
|
|
+different types of ECs may be overlapping).
|
|
+
|
|
+The ``SDTX_DEVICE_TYPE()`` macro can be used to determine the EC device
|
|
+type. This can be one of
|
|
+
|
|
+* ``SDTX_DEVICE_TYPE_HID``, for Surface Aggregator Module over HID, and
|
|
+
|
|
+* ``SDTX_DEVICE_TYPE_SSH``, for Surface Aggregator Module over Surface Serial
|
|
+ Hub.
|
|
+
|
|
+Note that currently only the ``SSH`` type EC is supported, however ``HID``
|
|
+type is reserved for future use.
|
|
+
|
|
+Structures and Enums
|
|
+--------------------
|
|
+
|
|
+.. kernel-doc:: include/uapi/linux/surface_aggregator/dtx.h
|
|
diff --git a/Documentation/driver-api/surface_aggregator/clients/index.rst b/Documentation/driver-api/surface_aggregator/clients/index.rst
|
|
new file mode 100644
|
|
index 000000000000..98ea9946b8a2
|
|
--- /dev/null
|
|
+++ b/Documentation/driver-api/surface_aggregator/clients/index.rst
|
|
@@ -0,0 +1,22 @@
|
|
+.. SPDX-License-Identifier: GPL-2.0+
|
|
+
|
|
+===========================
|
|
+Client Driver Documentation
|
|
+===========================
|
|
+
|
|
+This is the documentation for client drivers themselves. Refer to
|
|
+:doc:`../client` for documentation on how to write client drivers.
|
|
+
|
|
+.. toctree::
|
|
+ :maxdepth: 1
|
|
+
|
|
+ cdev
|
|
+ dtx
|
|
+ san
|
|
+
|
|
+.. only:: subproject and html
|
|
+
|
|
+ Indices
|
|
+ =======
|
|
+
|
|
+ * :ref:`genindex`
|
|
diff --git a/Documentation/driver-api/surface_aggregator/clients/san.rst b/Documentation/driver-api/surface_aggregator/clients/san.rst
|
|
new file mode 100644
|
|
index 000000000000..1bf830ad367d
|
|
--- /dev/null
|
|
+++ b/Documentation/driver-api/surface_aggregator/clients/san.rst
|
|
@@ -0,0 +1,44 @@
|
|
+.. SPDX-License-Identifier: GPL-2.0+
|
|
+
|
|
+.. |san_client_link| replace:: :c:func:`san_client_link`
|
|
+.. |san_dgpu_notifier_register| replace:: :c:func:`san_dgpu_notifier_register`
|
|
+.. |san_dgpu_notifier_unregister| replace:: :c:func:`san_dgpu_notifier_unregister`
|
|
+
|
|
+===================
|
|
+Surface ACPI Notify
|
|
+===================
|
|
+
|
|
+The Surface ACPI Notify (SAN) device provides the bridge between ACPI and
|
|
+SAM controller. Specifically, ACPI code can execute requests and handle
|
|
+battery and thermal events via this interface. In addition to this, events
|
|
+relating to the discrete GPU (dGPU) of the Surface Book 2 can be sent from
|
|
+ACPI code (note: the Surface Book 3 uses a different method for this). The
|
|
+only currently known event sent via this interface is a dGPU power-on
|
|
+notification. While this driver handles the former part internally, it only
|
|
+relays the dGPU events to any other driver interested via its public API and
|
|
+does not handle them.
|
|
+
|
|
+The public interface of this driver is split into two parts: Client
|
|
+registration and notifier-block registration.
|
|
+
|
|
+A client to the SAN interface can be linked as consumer to the SAN device
|
|
+via |san_client_link|. This can be used to ensure that the a client
|
|
+receiving dGPU events does not miss any events due to the SAN interface not
|
|
+being set up as this forces the client driver to unbind once the SAN driver
|
|
+is unbound.
|
|
+
|
|
+Notifier-blocks can be registered by any device for as long as the module is
|
|
+loaded, regardless of being linked as client or not. Registration is done
|
|
+with |san_dgpu_notifier_register|. If the notifier is not needed any more, it
|
|
+should be unregistered via |san_dgpu_notifier_unregister|.
|
|
+
|
|
+Consult the API documentation below for more details.
|
|
+
|
|
+
|
|
+API Documentation
|
|
+=================
|
|
+
|
|
+.. kernel-doc:: include/linux/surface_acpi_notify.h
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/clients/surface_acpi_notify.c
|
|
+ :export:
|
|
diff --git a/Documentation/driver-api/surface_aggregator/index.rst b/Documentation/driver-api/surface_aggregator/index.rst
|
|
new file mode 100644
|
|
index 000000000000..6f3e1094904d
|
|
--- /dev/null
|
|
+++ b/Documentation/driver-api/surface_aggregator/index.rst
|
|
@@ -0,0 +1,21 @@
|
|
+.. SPDX-License-Identifier: GPL-2.0+
|
|
+
|
|
+=======================================
|
|
+Surface System Aggregator Module (SSAM)
|
|
+=======================================
|
|
+
|
|
+.. toctree::
|
|
+ :maxdepth: 2
|
|
+
|
|
+ overview
|
|
+ client
|
|
+ clients/index
|
|
+ ssh
|
|
+ internal
|
|
+
|
|
+.. only:: subproject and html
|
|
+
|
|
+ Indices
|
|
+ =======
|
|
+
|
|
+ * :ref:`genindex`
|
|
diff --git a/Documentation/driver-api/surface_aggregator/internal-api.rst b/Documentation/driver-api/surface_aggregator/internal-api.rst
|
|
new file mode 100644
|
|
index 000000000000..db6a70119f49
|
|
--- /dev/null
|
|
+++ b/Documentation/driver-api/surface_aggregator/internal-api.rst
|
|
@@ -0,0 +1,67 @@
|
|
+.. SPDX-License-Identifier: GPL-2.0+
|
|
+
|
|
+==========================
|
|
+Internal API Documentation
|
|
+==========================
|
|
+
|
|
+.. contents::
|
|
+ :depth: 2
|
|
+
|
|
+
|
|
+Packet Transport Layer
|
|
+======================
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/ssh_parser.h
|
|
+ :internal:
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/ssh_parser.c
|
|
+ :internal:
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/ssh_msgb.h
|
|
+ :internal:
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/ssh_packet_layer.h
|
|
+ :internal:
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/ssh_packet_layer.c
|
|
+ :internal:
|
|
+
|
|
+
|
|
+Request Transport Layer
|
|
+=======================
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/ssh_request_layer.h
|
|
+ :internal:
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/ssh_request_layer.c
|
|
+ :internal:
|
|
+
|
|
+
|
|
+Controller
|
|
+==========
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/controller.h
|
|
+ :internal:
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/controller.c
|
|
+ :internal:
|
|
+
|
|
+
|
|
+Client Device Bus
|
|
+=================
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/bus.c
|
|
+ :internal:
|
|
+
|
|
+
|
|
+Core
|
|
+====
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/core.c
|
|
+ :internal:
|
|
+
|
|
+
|
|
+Trace Helpers
|
|
+=============
|
|
+
|
|
+.. kernel-doc:: drivers/misc/surface_aggregator/trace.h
|
|
diff --git a/Documentation/driver-api/surface_aggregator/internal.rst b/Documentation/driver-api/surface_aggregator/internal.rst
|
|
new file mode 100644
|
|
index 000000000000..72704734982a
|
|
--- /dev/null
|
|
+++ b/Documentation/driver-api/surface_aggregator/internal.rst
|
|
@@ -0,0 +1,577 @@
|
|
+.. SPDX-License-Identifier: GPL-2.0+
|
|
+
|
|
+.. |ssh_ptl| replace:: :c:type:`struct ssh_ptl <ssh_ptl>`
|
|
+.. |ssh_ptl_submit| replace:: :c:func:`ssh_ptl_submit`
|
|
+.. |ssh_ptl_cancel| replace:: :c:func:`ssh_ptl_cancel`
|
|
+.. |ssh_ptl_shutdown| replace:: :c:func:`ssh_ptl_shutdown`
|
|
+.. |ssh_ptl_rx_rcvbuf| replace:: :c:func:`ssh_ptl_rx_rcvbuf`
|
|
+.. |ssh_rtl| replace:: :c:type:`struct ssh_rtl <ssh_rtl>`
|
|
+.. |ssh_rtl_submit| replace:: :c:func:`ssh_rtl_submit`
|
|
+.. |ssh_rtl_cancel| replace:: :c:func:`ssh_rtl_cancel`
|
|
+.. |ssh_rtl_shutdown| replace:: :c:func:`ssh_rtl_shutdown`
|
|
+.. |ssh_packet| replace:: :c:type:`struct ssh_packet <ssh_packet>`
|
|
+.. |ssh_packet_get| replace:: :c:func:`ssh_packet_get`
|
|
+.. |ssh_packet_put| replace:: :c:func:`ssh_packet_put`
|
|
+.. |ssh_packet_ops| replace:: :c:type:`struct ssh_packet_ops <ssh_packet_ops>`
|
|
+.. |ssh_packet_base_priority| replace:: :c:type:`enum ssh_packet_base_priority <ssh_packet_base_priority>`
|
|
+.. |ssh_packet_flags| replace:: :c:type:`enum ssh_packet_flags <ssh_packet_flags>`
|
|
+.. |SSH_PACKET_PRIORITY| replace:: :c:func:`SSH_PACKET_PRIORITY`
|
|
+.. |ssh_frame| replace:: :c:type:`struct ssh_frame <ssh_frame>`
|
|
+.. |ssh_command| replace:: :c:type:`struct ssh_command <ssh_command>`
|
|
+.. |ssh_request| replace:: :c:type:`struct ssh_request <ssh_request>`
|
|
+.. |ssh_request_get| replace:: :c:func:`ssh_request_get`
|
|
+.. |ssh_request_put| replace:: :c:func:`ssh_request_put`
|
|
+.. |ssh_request_ops| replace:: :c:type:`struct ssh_request_ops <ssh_request_ops>`
|
|
+.. |ssh_request_init| replace:: :c:func:`ssh_request_init`
|
|
+.. |ssh_request_flags| replace:: :c:type:`enum ssh_request_flags <ssh_request_flags>`
|
|
+.. |ssam_controller| replace:: :c:type:`struct ssam_controller <ssam_controller>`
|
|
+.. |ssam_device| replace:: :c:type:`struct ssam_device <ssam_device>`
|
|
+.. |ssam_device_driver| replace:: :c:type:`struct ssam_device_driver <ssam_device_driver>`
|
|
+.. |ssam_client_bind| replace:: :c:func:`ssam_client_bind`
|
|
+.. |ssam_client_link| replace:: :c:func:`ssam_client_link`
|
|
+.. |ssam_request_sync| replace:: :c:type:`struct ssam_request_sync <ssam_request_sync>`
|
|
+.. |ssam_event_registry| replace:: :c:type:`struct ssam_event_registry <ssam_event_registry>`
|
|
+.. |ssam_event_id| replace:: :c:type:`struct ssam_event_id <ssam_event_id>`
|
|
+.. |ssam_nf| replace:: :c:type:`struct ssam_nf <ssam_nf>`
|
|
+.. |ssam_nf_refcount_inc| replace:: :c:func:`ssam_nf_refcount_inc`
|
|
+.. |ssam_nf_refcount_dec| replace:: :c:func:`ssam_nf_refcount_dec`
|
|
+.. |ssam_notifier_register| replace:: :c:func:`ssam_notifier_register`
|
|
+.. |ssam_notifier_unregister| replace:: :c:func:`ssam_notifier_unregister`
|
|
+.. |ssam_cplt| replace:: :c:type:`struct ssam_cplt <ssam_cplt>`
|
|
+.. |ssam_event_queue| replace:: :c:type:`struct ssam_event_queue <ssam_event_queue>`
|
|
+.. |ssam_request_sync_submit| replace:: :c:func:`ssam_request_sync_submit`
|
|
+
|
|
+=====================
|
|
+Core Driver Internals
|
|
+=====================
|
|
+
|
|
+Architectural overview of the Surface System Aggregator Module (SSAM) core
|
|
+and Surface Serial Hub (SSH) driver. For the API documentation, refer to:
|
|
+
|
|
+.. toctree::
|
|
+ :maxdepth: 2
|
|
+
|
|
+ internal-api
|
|
+
|
|
+
|
|
+Overview
|
|
+========
|
|
+
|
|
+The SSAM core implementation is structured in layers, somewhat following the
|
|
+SSH protocol structure:
|
|
+
|
|
+Lower-level packet transport is implemented in the *packet transport layer
|
|
+(PTL)*, directly building on top of the serial device (serdev)
|
|
+infrastructure of the kernel. As the name indicates, this layer deals with
|
|
+the packet transport logic and handles things like packet validation, packet
|
|
+acknowledgment (ACKing), packet (retransmission) timeouts, and relaying
|
|
+packet payloads to higher-level layers.
|
|
+
|
|
+Above this sits the *request transport layer (RTL)*. This layer is centered
|
|
+around command-type packet payloads, i.e. requests (sent from host to EC),
|
|
+responses of the EC to those requests, and events (sent from EC to host).
|
|
+It, specifically, distinguishes events from request responses, matches
|
|
+responses to their corresponding requests, and implements request timeouts.
|
|
+
|
|
+The *controller* layer is building on top of this and essentially decides
|
|
+how request responses and, especially, events are dealt with. It provides an
|
|
+event notifier system, handles event activation/deactivation, provides a
|
|
+workqueue for event and asynchronous request completion, and also manages
|
|
+the message counters required for building command messages (``SEQ``,
|
|
+``RQID``). This layer basically provides a fundamental interface to the SAM
|
|
+EC for use in other kernel drivers.
|
|
+
|
|
+While the controller layer already provides an interface for other kernel
|
|
+drivers, the client *bus* extends this interface to provide support for
|
|
+native SSAM devices, i.e. devices that are not defined in ACPI and not
|
|
+implemented as platform devices, via |ssam_device| and |ssam_device_driver|
|
|
+simplify management of client devices and client drivers.
|
|
+
|
|
+Refer to :doc:`client` for documentation regarding the client device/driver
|
|
+API and interface options for other kernel drivers. It is recommended to
|
|
+familiarize oneself with that chapter and the :doc:`ssh` before continuing
|
|
+with the architectural overview below.
|
|
+
|
|
+
|
|
+Packet Transport Layer
|
|
+======================
|
|
+
|
|
+The packet transport layer is represented via |ssh_ptl| and is structured
|
|
+around the following key concepts:
|
|
+
|
|
+Packets
|
|
+-------
|
|
+
|
|
+Packets are the fundamental transmission unit of the SSH protocol. They are
|
|
+managed by the packet transport layer, which is essentially the lowest layer
|
|
+of the driver and is built upon by other components of the SSAM core.
|
|
+Packets to be transmitted by the SSAM core are represented via |ssh_packet|
|
|
+(in contrast, packets received by the core do not have any specific
|
|
+structure and are managed entirely via the raw |ssh_frame|).
|
|
+
|
|
+This structure contains the required fields to manage the packet inside the
|
|
+transport layer, as well as a reference to the buffer containing the data to
|
|
+be transmitted (i.e. the message wrapped in |ssh_frame|). Most notably, it
|
|
+contains an internal reference count, which is used for managing its
|
|
+lifetime (accessible via |ssh_packet_get| and |ssh_packet_put|). When this
|
|
+counter reaches zero, the ``release()`` callback provided to the packet via
|
|
+its |ssh_packet_ops| reference is executed, which may then deallocate the
|
|
+packet or its enclosing structure (e.g. |ssh_request|).
|
|
+
|
|
+In addition to the ``release`` callback, the |ssh_packet_ops| reference also
|
|
+provides a ``complete()`` callback, which is run once the packet has been
|
|
+completed and provides the status of this completion, i.e. zero on success
|
|
+or a negative errno value in case of an error. Once the packet has been
|
|
+submitted to the packet transport layer, the ``complete()`` callback is
|
|
+always guaranteed to be executed before the ``release()`` callback, i.e. the
|
|
+packet will always be completed, either successfully, with an error, or due
|
|
+to cancellation, before it will be released.
|
|
+
|
|
+The state of a packet is managed via its ``state`` flags
|
|
+(|ssh_packet_flags|), which also contains the packet type. In particular,
|
|
+the following bits are noteworthy:
|
|
+
|
|
+* ``SSH_PACKET_SF_LOCKED_BIT``: This bit is set when completion, either
|
|
+ through error or success, is imminent. It indicates that no further
|
|
+ references of the packet should be taken and any existing references
|
|
+ should be dropped as soon as possible. The process setting this bit is
|
|
+ responsible for removing any references to this packet from the packet
|
|
+ queue and pending set.
|
|
+
|
|
+* ``SSH_PACKET_SF_COMPLETED_BIT``: This bit is set by the process running the
|
|
+ ``complete()`` callback and is used to ensure that this callback only runs
|
|
+ once.
|
|
+
|
|
+* ``SSH_PACKET_SF_QUEUED_BIT``: This bit is set when the packet is queued on
|
|
+ the packet queue and cleared when it is dequeued.
|
|
+
|
|
+* ``SSH_PACKET_SF_PENDING_BIT``: This bit is set when the packet is added to
|
|
+ the pending set and cleared when it is removed from it.
|
|
+
|
|
+Packet Queue
|
|
+------------
|
|
+
|
|
+The packet queue is the first of the two fundamental collections in the
|
|
+packet transport layer. It is a priority queue, with priority of the
|
|
+respective packets based on the packet type (major) and number of tries
|
|
+(minor). See |SSH_PACKET_PRIORITY| for more details on the priority value.
|
|
+
|
|
+All packets to be transmitted by the transport layer must be submitted to
|
|
+this queue via |ssh_ptl_submit|. Note that this includes control packets
|
|
+sent by the transport layer itself. Internally, data packets can be
|
|
+re-submitted to this queue due to timeouts or NAK packets sent by the EC.
|
|
+
|
|
+Pending Set
|
|
+-----------
|
|
+
|
|
+The pending set is the second of the two fundamental collections in the
|
|
+packet transport layer. It stores references to packets that have already
|
|
+been transmitted, but wait for acknowledgment (e.g. the corresponding ACK
|
|
+packet) by the EC.
|
|
+
|
|
+Note that a packet may both be pending and queued if it has been
|
|
+re-submitted due to a packet acknowledgment timeout or NAK. On such a
|
|
+re-submission, packets are not removed from the pending set.
|
|
+
|
|
+Transmitter Thread
|
|
+------------------
|
|
+
|
|
+The transmitter thread is responsible for most of the actual work regarding
|
|
+packet transmission. In each iteration, it (waits for and) checks if the
|
|
+next packet on the queue (if any) can be transmitted and, if so, removes it
|
|
+from the queue and increments its counter for the number of transmission
|
|
+attempts, i.e. tries. If the packet is sequenced, i.e. requires an ACK by
|
|
+the EC, the packet is added to the pending set. Next, the packet's data is
|
|
+submitted to the serdev subsystem. In case of an error or timeout during
|
|
+this submission, the packet is completed by the transmitter thread with the
|
|
+status value of the callback set accordingly. In case the packet is
|
|
+unsequenced, i.e. does not require an ACK by the EC, the packet is completed
|
|
+with success on the transmitter thread.
|
|
+
|
|
+Transmission of sequenced packets is limited by the number of concurrently
|
|
+pending packets, i.e. a limit on how many packets may be waiting for an ACK
|
|
+from the EC in parallel. This limit is currently set to one (see :doc:`ssh`
|
|
+for the reasoning behind this). Control packets (i.e. ACK and NAK) can
|
|
+always be transmitted.
|
|
+
|
|
+Receiver Thread
|
|
+---------------
|
|
+
|
|
+Any data received from the EC is put into a FIFO buffer for further
|
|
+processing. This processing happens on the receiver thread. The receiver
|
|
+thread parses and validates the received message into its |ssh_frame| and
|
|
+corresponding payload. It prepares and submits the necessary ACK (and on
|
|
+validation error or invalid data NAK) packets for the received messages.
|
|
+
|
|
+This thread also handles further processing, such as matching ACK messages
|
|
+to the corresponding pending packet (via sequence ID) and completing it, as
|
|
+well as initiating re-submission of all currently pending packets on
|
|
+receival of a NAK message (re-submission in case of a NAK is similar to
|
|
+re-submission due to timeout, see below for more details on that). Note that
|
|
+the successful completion of a sequenced packet will always run on the
|
|
+receiver thread (whereas any failure-indicating completion will run on the
|
|
+process where the failure occurred).
|
|
+
|
|
+Any payload data is forwarded via a callback to the next upper layer, i.e.
|
|
+the request transport layer.
|
|
+
|
|
+Timeout Reaper
|
|
+--------------
|
|
+
|
|
+The packet acknowledgment timeout is a per-packet timeout for sequenced
|
|
+packets, started when the respective packet begins (re-)transmission (i.e.
|
|
+this timeout is armed once per transmission attempt on the transmitter
|
|
+thread). It is used to trigger re-submission or, when the number of tries
|
|
+has been exceeded, cancellation of the packet in question.
|
|
+
|
|
+This timeout is handled via a dedicated reaper task, which is essentially a
|
|
+work item (re-)scheduled to run when the next packet is set to time out. The
|
|
+work item then checks the set of pending packets for any packets that have
|
|
+exceeded the timeout and, if there are any remaining packets, re-schedules
|
|
+itself to the next appropriate point in time.
|
|
+
|
|
+If a timeout has been detected by the reaper, the packet will either be
|
|
+re-submitted if it still has some remaining tries left, or completed with
|
|
+``-ETIMEDOUT`` as status if not. Note that re-submission, in this case and
|
|
+triggered by receival of a NAK, means that the packet is added to the queue
|
|
+with a now incremented number of tries, yielding a higher priority. The
|
|
+timeout for the packet will be disabled until the next transmission attempt
|
|
+and the packet remains on the pending set.
|
|
+
|
|
+Note that due to transmission and packet acknowledgment timeouts, the packet
|
|
+transport layer is always guaranteed to make progress, if only through
|
|
+timing out packets, and will never fully block.
|
|
+
|
|
+Concurrency and Locking
|
|
+-----------------------
|
|
+
|
|
+There are two main locks in the packet transport layer: One guarding access
|
|
+to the packet queue and one guarding access to the pending set. These
|
|
+collections may only be accessed and modified under the respective lock. If
|
|
+access to both collections is needed, the pending lock must be acquired
|
|
+before the queue lock to avoid deadlocks.
|
|
+
|
|
+In addition to guarding the collections, after initial packet submission
|
|
+certain packet fields may only be accessed under one of the locks.
|
|
+Specifically, the packet priority must only be accessed while holding the
|
|
+queue lock and the packet timestamp must only be accessed while holding the
|
|
+pending lock.
|
|
+
|
|
+Other parts of the packet transport layer are guarded independently. State
|
|
+flags are managed by atomic bit operations and, if necessary, memory
|
|
+barriers. Modifications to the timeout reaper work item and expiration date
|
|
+are guarded by their own lock.
|
|
+
|
|
+The reference of the packet to the packet transport layer (``ptl``) is
|
|
+somewhat special. It is either set when the upper layer request is submitted
|
|
+or, if there is none, when the packet is first submitted. After it is set,
|
|
+it will not change its value. Functions that may run concurrently with
|
|
+submission, i.e. cancellation, can not rely on the ``ptl`` reference to be
|
|
+set. Access to it in these functions is guarded by ``READ_ONCE()``, whereas
|
|
+setting ``ptl`` is equally guarded with ``WRITE_ONCE()`` for symmetry.
|
|
+
|
|
+Some packet fields may be read outside of the respective locks guarding
|
|
+them, specifically priority and state for tracing. In those cases, proper
|
|
+access is ensured by employing ``WRITE_ONCE()`` and ``READ_ONCE()``. Such
|
|
+read-only access is only allowed when stale values are not critical.
|
|
+
|
|
+With respect to the interface for higher layers, packet submission
|
|
+(|ssh_ptl_submit|), packet cancellation (|ssh_ptl_cancel|), data receival
|
|
+(|ssh_ptl_rx_rcvbuf|), and layer shutdown (|ssh_ptl_shutdown|) may always be
|
|
+executed concurrently with respect to each other. Note that packet
|
|
+submission may not run concurrently with itself for the same packet.
|
|
+Equally, shutdown and data receival may also not run concurrently with
|
|
+themselves (but may run concurrently with each other).
|
|
+
|
|
+
|
|
+Request Transport Layer
|
|
+=======================
|
|
+
|
|
+The request transport layer is represented via |ssh_rtl| and builds on top
|
|
+of the packet transport layer. It deals with requests, i.e. SSH packets sent
|
|
+by the host containing a |ssh_command| as frame payload. This layer
|
|
+separates responses to requests from events, which are also sent by the EC
|
|
+via a |ssh_command| payload. While responses are handled in this layer,
|
|
+events are relayed to the next upper layer, i.e. the controller layer, via
|
|
+the corresponding callback. The request transport layer is structured around
|
|
+the following key concepts:
|
|
+
|
|
+Request
|
|
+-------
|
|
+
|
|
+Requests are packets with a command-type payload, sent from host to EC to
|
|
+query data from or trigger an action on it (or both simultaneously). They
|
|
+are represented by |ssh_request|, wrapping the underlying |ssh_packet|
|
|
+storing its message data (i.e. SSH frame with command payload). Note that
|
|
+all top-level representations, e.g. |ssam_request_sync| are built upon this
|
|
+struct.
|
|
+
|
|
+As |ssh_request| extends |ssh_packet|, its lifetime is also managed by the
|
|
+reference counter inside the packet struct (which can be accessed via
|
|
+|ssh_request_get| and |ssh_request_put|). Once the counter reaches zero, the
|
|
+``release()`` callback of the |ssh_request_ops| reference of the request is
|
|
+called.
|
|
+
|
|
+Requests can have an optional response that is equally sent via a SSH
|
|
+message with command-type payload (from EC to host). The party constructing
|
|
+the request must know if a response is expected and mark this in the request
|
|
+flags provided to |ssh_request_init|, so that the request transport layer
|
|
+can wait for this response.
|
|
+
|
|
+Similar to |ssh_packet|, |ssh_request| also has a ``complete()`` callback
|
|
+provided via its request ops reference and is guaranteed to be completed
|
|
+before it is released once it has been submitted to the request transport
|
|
+layer via |ssh_rtl_submit|. For a request without a response, successful
|
|
+completion will occur once the underlying packet has been successfully
|
|
+transmitted by the packet transport layer (i.e. from within the packet
|
|
+completion callback). For a request with response, successful completion
|
|
+will occur once the response has been received and matched to the request
|
|
+via its request ID (which happens on the packet layer's data-received
|
|
+callback running on the receiver thread). If the request is completed with
|
|
+an error, the status value will be set to the corresponding (negative) errno
|
|
+value.
|
|
+
|
|
+The state of a request is again managed via its ``state`` flags
|
|
+(|ssh_request_flags|), which also encode the request type. In particular,
|
|
+the following bits are noteworthy:
|
|
+
|
|
+* ``SSH_REQUEST_SF_LOCKED_BIT``: This bit is set when completion, either
|
|
+ through error or success, is imminent. It indicates that no further
|
|
+ references of the request should be taken and any existing references
|
|
+ should be dropped as soon as possible. The process setting this bit is
|
|
+ responsible for removing any references to this request from the request
|
|
+ queue and pending set.
|
|
+
|
|
+* ``SSH_REQUEST_SF_COMPLETED_BIT``: This bit is set by the process running the
|
|
+ ``complete()`` callback and is used to ensure that this callback only runs
|
|
+ once.
|
|
+
|
|
+* ``SSH_REQUEST_SF_QUEUED_BIT``: This bit is set when the request is queued on
|
|
+ the request queue and cleared when it is dequeued.
|
|
+
|
|
+* ``SSH_REQUEST_SF_PENDING_BIT``: This bit is set when the request is added to
|
|
+ the pending set and cleared when it is removed from it.
|
|
+
|
|
+Request Queue
|
|
+-------------
|
|
+
|
|
+The request queue is the first of the two fundamental collections in the
|
|
+request transport layer. In contrast to the packet queue of the packet
|
|
+transport layer, it is not a priority queue and the simple first come first
|
|
+serve principle applies.
|
|
+
|
|
+All requests to be transmitted by the request transport layer must be
|
|
+submitted to this queue via |ssh_rtl_submit|. Once submitted, requests may
|
|
+not be re-submitted, and will not be re-submitted automatically on timeout.
|
|
+Instead, the request is completed with a timeout error. If desired, the
|
|
+caller can create and submit a new request for another try, but it must not
|
|
+submit the same request again.
|
|
+
|
|
+Pending Set
|
|
+-----------
|
|
+
|
|
+The pending set is the second of the two fundamental collections in the
|
|
+request transport layer. This collection stores references to all pending
|
|
+requests, i.e. requests awaiting a response from the EC (similar to what the
|
|
+pending set of the packet transport layer does for packets).
|
|
+
|
|
+Transmitter Task
|
|
+----------------
|
|
+
|
|
+The transmitter task is scheduled when a new request is available for
|
|
+transmission. It checks if the next request on the request queue can be
|
|
+transmitted and, if so, submits its underlying packet to the packet
|
|
+transport layer. This check ensures that only a limited number of
|
|
+requests can be pending, i.e. waiting for a response, at the same time. If
|
|
+the request requires a response, the request is added to the pending set
|
|
+before its packet is submitted.
|
|
+
|
|
+Packet Completion Callback
|
|
+--------------------------
|
|
+
|
|
+The packet completion callback is executed once the underlying packet of a
|
|
+request has been completed. In case of an error completion, the
|
|
+corresponding request is completed with the error value provided in this
|
|
+callback.
|
|
+
|
|
+On successful packet completion, further processing depends on the request.
|
|
+If the request expects a response, it is marked as transmitted and the
|
|
+request timeout is started. If the request does not expect a response, it is
|
|
+completed with success.
|
|
+
|
|
+Data-Received Callback
|
|
+----------------------
|
|
+
|
|
+The data received callback notifies the request transport layer of data
|
|
+being received by the underlying packet transport layer via a data-type
|
|
+frame. In general, this is expected to be a command-type payload.
|
|
+
|
|
+If the request ID of the command is one of the request IDs reserved for
|
|
+events (one to ``SSH_NUM_EVENTS``, inclusively), it is forwarded to the
|
|
+event callback registered in the request transport layer. If the request ID
|
|
+indicates a response to a request, the respective request is looked up in
|
|
+the pending set and, if found and marked as transmitted, completed with
|
|
+success.
|
|
+
|
|
+Timeout Reaper
|
|
+--------------
|
|
+
|
|
+The request-response-timeout is a per-request timeout for requests expecting
|
|
+a response. It is used to ensure that a request does not wait indefinitely
|
|
+on a response from the EC and is started after the underlying packet has
|
|
+been successfully completed.
|
|
+
|
|
+This timeout is, similar to the packet acknowledgment timeout on the packet
|
|
+transport layer, handled via a dedicated reaper task. This task is
|
|
+essentially a work-item (re-)scheduled to run when the next request is set
|
|
+to time out. The work item then scans the set of pending requests for any
|
|
+requests that have timed out and completes them with ``-ETIMEDOUT`` as
|
|
+status. Requests will not be re-submitted automatically. Instead, the issuer
|
|
+of the request must construct and submit a new request, if so desired.
|
|
+
|
|
+Note that this timeout, in combination with packet transmission and
|
|
+acknowledgment timeouts, guarantees that the request layer will always make
|
|
+progress, even if only through timing out packets, and never fully block.
|
|
+
|
|
+Concurrency and Locking
|
|
+-----------------------
|
|
+
|
|
+Similar to the packet transport layer, there are two main locks in the
|
|
+request transport layer: One guarding access to the request queue and one
|
|
+guarding access to the pending set. These collections may only be accessed
|
|
+and modified under the respective lock.
|
|
+
|
|
+Other parts of the request transport layer are guarded independently. State
|
|
+flags are (again) managed by atomic bit operations and, if necessary, memory
|
|
+barriers. Modifications to the timeout reaper work item and expiration date
|
|
+are guarded by their own lock.
|
|
+
|
|
+Some request fields may be read outside of the respective locks guarding
|
|
+them, specifically the state for tracing. In those cases, proper access is
|
|
+ensured by employing ``WRITE_ONCE()`` and ``READ_ONCE()``. Such read-only
|
|
+access is only allowed when stale values are not critical.
|
|
+
|
|
+With respect to the interface for higher layers, request submission
|
|
+(|ssh_rtl_submit|), request cancellation (|ssh_rtl_cancel|), and layer
|
|
+shutdown (|ssh_rtl_shutdown|) may always be executed concurrently with
|
|
+respect to each other. Note that request submission may not run concurrently
|
|
+with itself for the same request (and also may only be called once per
|
|
+request). Equally, shutdown may also not run concurrently with itself.
|
|
+
|
|
+
|
|
+Controller Layer
|
|
+================
|
|
+
|
|
+The controller layer extends on the request transport layer to provide an
|
|
+easy-to-use interface for client drivers. It is represented by
|
|
+|ssam_controller| and the SSH driver. While the lower level transport layers
|
|
+take care of transmitting and handling packets and requests, the controller
|
|
+layer takes on more of a management role. Specifically, it handles device
|
|
+initialization, power management, and event handling, including event
|
|
+delivery and registration via the (event) completion system (|ssam_cplt|).
|
|
+
|
|
+Event Registration
|
|
+------------------
|
|
+
|
|
+In general, an event (or rather a class of events) has to be explicitly
|
|
+requested by the host before the EC will send it (HID input events seem to
|
|
+be the exception). This is done via an event-enable request (similarly,
|
|
+events should be disabled via an event-disable request once no longer
|
|
+desired).
|
|
+
|
|
+The specific request used to enable (or disable) an event is given via an
|
|
+event registry, i.e. the governing authority of this event (so to speak),
|
|
+represented by |ssam_event_registry|. As parameters to this request, the
|
|
+target category and, depending on the event registry, instance ID of the
|
|
+event to be enabled must be provided. This (optional) instance ID must be
|
|
+zero if the registry does not use it. Together, target category and instance
|
|
+ID form the event ID, represented by |ssam_event_id|. In short, both, event
|
|
+registry and event ID, are required to uniquely identify a respective class
|
|
+of events.
|
|
+
|
|
+Note that a further *request ID* parameter must be provided for the
|
|
+enable-event request. This parameter does not influence the class of events
|
|
+being enabled, but instead is set as the request ID (RQID) on each event of
|
|
+this class sent by the EC. It is used to identify events (as a limited
|
|
+number of request IDs is reserved for use in events only, specifically one
|
|
+to ``SSH_NUM_EVENTS`` inclusively) and also map events to their specific
|
|
+class. Currently, the controller always sets this parameter to the target
|
|
+category specified in |ssam_event_id|.
|
|
+
|
|
+As multiple client drivers may rely on the same (or overlapping) classes of
|
|
+events and enable/disable calls are strictly binary (i.e. on/off), the
|
|
+controller has to manage access to these events. It does so via reference
|
|
+counting, storing the counter inside an RB-tree based mapping with event
|
|
+registry and ID as key (there is no known list of valid event registry and
|
|
+event ID combinations). See |ssam_nf|, |ssam_nf_refcount_inc|, and
|
|
+|ssam_nf_refcount_dec| for details.
|
|
+
|
|
+This management is done together with notifier registration (described in
|
|
+the next section) via the top-level |ssam_notifier_register| and
|
|
+|ssam_notifier_unregister| functions.
|
|
+
|
|
+Event Delivery
|
|
+--------------
|
|
+
|
|
+To receive events, a client driver has to register an event notifier via
|
|
+|ssam_notifier_register|. This increments the reference counter for that
|
|
+specific class of events (as detailed in the previous section), enables the
|
|
+class on the EC (if it has not been enabled already), and installs the
|
|
+provided notifier callback.
|
|
+
|
|
+Notifier callbacks are stored in lists, with one (RCU) list per target
|
|
+category (provided via the event ID; NB: there is a fixed known number of
|
|
+target categories). There is no known association from the combination of
|
|
+event registry and event ID to the command data (target ID, target category,
|
|
+command ID, and instance ID) that can be provided by an event class, apart
|
|
+from target category and instance ID given via the event ID.
|
|
+
|
|
+Note that due to the way notifiers are (or rather have to be) stored, client
|
|
+drivers may receive events that they have not requested and need to account
|
|
+for them. Specifically, they will, by default, receive all events from the
|
|
+same target category. To simplify dealing with this, filtering of events by
|
|
+target ID (provided via the event registry) and instance ID (provided via
|
|
+the event ID) can be requested when registering a notifier. This filtering
|
|
+is applied when iterating over the notifiers at the time they are executed.
|
|
+
|
|
+All notifier callbacks are executed on a dedicated workqueue, the so-called
|
|
+completion workqueue. After an event has been received via the callback
|
|
+installed in the request layer (running on the receiver thread of the packet
|
|
+transport layer), it will be put on its respective event queue
|
|
+(|ssam_event_queue|). From this event queue the completion work item of that
|
|
+queue (running on the completion workqueue) will pick up the event and
|
|
+execute the notifier callback. This is done to avoid blocking on the
|
|
+receiver thread.
|
|
+
|
|
+There is one event queue per combination of target ID and target category.
|
|
+This is done to ensure that notifier callbacks are executed in sequence for
|
|
+events of the same target ID and target category. Callbacks can be executed
|
|
+in parallel for events with a different combination of target ID and target
|
|
+category.
|
|
+
|
|
+Concurrency and Locking
|
|
+-----------------------
|
|
+
|
|
+Most of the concurrency related safety guarantees of the controller are
|
|
+provided by the lower-level request transport layer. In addition to this,
|
|
+event (un-)registration is guarded by its own lock.
|
|
+
|
|
+Access to the controller state is guarded by the state lock. This lock is a
|
|
+read/write semaphore. The reader part can be used to ensure that the state
|
|
+does not change while functions depending on the state to stay the same
|
|
+(e.g. |ssam_notifier_register|, |ssam_notifier_unregister|,
|
|
+|ssam_request_sync_submit|, and derivatives) are executed and this guarantee
|
|
+is not already provided otherwise (e.g. through |ssam_client_bind| or
|
|
+|ssam_client_link|). The writer part guards any transitions that will change
|
|
+the state, i.e. initialization, destruction, suspension, and resumption.
|
|
+
|
|
+The controller state may be accessed (read-only) outside the state lock for
|
|
+smoke-testing against invalid API usage (e.g. in |ssam_request_sync_submit|).
|
|
+Note that such checks are not supposed to (and will not) protect against all
|
|
+invalid usages, but rather aim to help catch them. In those cases, proper
|
|
+variable access is ensured by employing ``WRITE_ONCE()`` and ``READ_ONCE()``.
|
|
+
|
|
+Assuming any preconditions on the state not changing have been satisfied,
|
|
+all non-initialization and non-shutdown functions may run concurrently with
|
|
+each other. This includes |ssam_notifier_register|, |ssam_notifier_unregister|,
|
|
+|ssam_request_sync_submit|, as well as all functions building on top of those.
|
|
diff --git a/Documentation/driver-api/surface_aggregator/overview.rst b/Documentation/driver-api/surface_aggregator/overview.rst
|
|
new file mode 100644
|
|
index 000000000000..1e9d57e50063
|
|
--- /dev/null
|
|
+++ b/Documentation/driver-api/surface_aggregator/overview.rst
|
|
@@ -0,0 +1,77 @@
|
|
+.. SPDX-License-Identifier: GPL-2.0+
|
|
+
|
|
+========
|
|
+Overview
|
|
+========
|
|
+
|
|
+The Surface/System Aggregator Module (SAM, SSAM) is an (arguably *the*)
|
|
+embedded controller (EC) on Microsoft Surface devices. It has been originally
|
|
+introduced on 4th generation devices (Surface Pro 4, Surface Book 1), but
|
|
+its responsibilities and feature-set have since been expanded significantly
|
|
+with the following generations.
|
|
+
|
|
+
|
|
+Features and Integration
|
|
+========================
|
|
+
|
|
+Not much is currently known about SAM on 4th generation devices (Surface Pro
|
|
+4, Surface Book 1), due to the use of a different communication interface
|
|
+between host and EC (as detailed below). On 5th (Surface Pro 2017, Surface
|
|
+Book 2, Surface Laptop 1) and later generation devices, SAM is responsible
|
|
+for providing battery information (both current status and static values,
|
|
+such as maximum capacity etc.), as well as an assortment of temperature
|
|
+sensors (e.g. skin temperature) and cooling/performance-mode setting to the
|
|
+host. On the Surface Book 2, specifically, it additionally provides an
|
|
+interface for properly handling clipboard detachment (i.e. separating the
|
|
+display part from the keyboard part of the device), on the Surface Laptop 1
|
|
+and 2 it is required for keyboard HID input. This HID subsystem has been
|
|
+restructured for 7th generation devices and on those, specifically Surface
|
|
+Laptop 3 and Surface Book 3, is responsible for all major HID input (i.e.
|
|
+keyboard and touchpad).
|
|
+
|
|
+While features have not changed much on a coarse level since the 5th
|
|
+generation, internal interfaces have undergone some rather large changes. On
|
|
+5th and 6th generation devices, both battery and temperature information is
|
|
+exposed to ACPI via a shim driver (referred to as Surface ACPI Notify, or
|
|
+SAN), translating ACPI generic serial bus write-/read-accesses to SAM
|
|
+requests. On 7th generation devices, this additional layer is gone and these
|
|
+devices require a driver hooking directly into the SAM interface. Equally,
|
|
+on newer generations, less devices are declared in ACPI, making them a bit
|
|
+harder to discover and requiring us to hard-code a sort of device registry.
|
|
+Due to this, a SSAM bus and subsystem with client devices
|
|
+(:c:type:`struct ssam_device <ssam_device>`) has been implemented.
|
|
+
|
|
+
|
|
+Communication
|
|
+=============
|
|
+
|
|
+The type of communication interface between host and EC depends on the
|
|
+generation of the Surface device. On 4th generation devices, host and EC
|
|
+communicate via HID, specifically using a HID-over-I2C device, whereas on
|
|
+5th and later generations, communication takes place via a USART serial
|
|
+device. In accordance to the drivers found on other operating systems, we
|
|
+refer to the serial device and its driver as Surface Serial Hub (SSH). When
|
|
+needed, we differentiate between both types of SAM by referring to them as
|
|
+SAM-over-SSH and SAM-over-HID.
|
|
+
|
|
+Currently, this subsystem only supports SAM-over-SSH. The SSH communication
|
|
+interface is described in more detail below. The HID interface has not been
|
|
+reverse engineered yet and it is, at the moment, unclear how many (and
|
|
+which) concepts of the SSH interface detailed below can be transferred to
|
|
+it.
|
|
+
|
|
+Surface Serial Hub
|
|
+------------------
|
|
+
|
|
+As already elaborated above, the Surface Serial Hub (SSH) is the
|
|
+communication interface for SAM on 5th- and all later-generation Surface
|
|
+devices. On the highest level, communication can be separated into two main
|
|
+types: Requests, messages sent from host to EC that may trigger a direct
|
|
+response from the EC (explicitly associated with the request), and events
|
|
+(sometimes also referred to as notifications), sent from EC to host without
|
|
+being a direct response to a previous request. We may also refer to requests
|
|
+without response as commands. In general, events need to be enabled via one
|
|
+of multiple dedicated requests before they are sent by the EC.
|
|
+
|
|
+See :doc:`ssh` for a more technical protocol documentation and
|
|
+:doc:`internal` for an overview of the internal driver architecture.
|
|
diff --git a/Documentation/driver-api/surface_aggregator/ssh.rst b/Documentation/driver-api/surface_aggregator/ssh.rst
|
|
new file mode 100644
|
|
index 000000000000..bf007d6c9873
|
|
--- /dev/null
|
|
+++ b/Documentation/driver-api/surface_aggregator/ssh.rst
|
|
@@ -0,0 +1,344 @@
|
|
+.. SPDX-License-Identifier: GPL-2.0+
|
|
+
|
|
+.. |u8| replace:: :c:type:`u8 <u8>`
|
|
+.. |u16| replace:: :c:type:`u16 <u16>`
|
|
+.. |TYPE| replace:: ``TYPE``
|
|
+.. |LEN| replace:: ``LEN``
|
|
+.. |SEQ| replace:: ``SEQ``
|
|
+.. |SYN| replace:: ``SYN``
|
|
+.. |NAK| replace:: ``NAK``
|
|
+.. |ACK| replace:: ``ACK``
|
|
+.. |DATA| replace:: ``DATA``
|
|
+.. |DATA_SEQ| replace:: ``DATA_SEQ``
|
|
+.. |DATA_NSQ| replace:: ``DATA_NSQ``
|
|
+.. |TC| replace:: ``TC``
|
|
+.. |TID| replace:: ``TID``
|
|
+.. |IID| replace:: ``IID``
|
|
+.. |RQID| replace:: ``RQID``
|
|
+.. |CID| replace:: ``CID``
|
|
+
|
|
+===========================
|
|
+Surface Serial Hub Protocol
|
|
+===========================
|
|
+
|
|
+The Surface Serial Hub (SSH) is the central communication interface for the
|
|
+embedded Surface Aggregator Module controller (SAM or EC), found on newer
|
|
+Surface generations. We will refer to this protocol and interface as
|
|
+SAM-over-SSH, as opposed to SAM-over-HID for the older generations.
|
|
+
|
|
+On Surface devices with SAM-over-SSH, SAM is connected to the host via UART
|
|
+and defined in ACPI as device with ID ``MSHW0084``. On these devices,
|
|
+significant functionality is provided via SAM, including access to battery
|
|
+and power information and events, thermal read-outs and events, and many
|
|
+more. For Surface Laptops, keyboard input is handled via HID directed
|
|
+through SAM, on the Surface Laptop 3 and Surface Book 3 this also includes
|
|
+touchpad input.
|
|
+
|
|
+Note that the standard disclaimer for this subsystem also applies to this
|
|
+document: All of this has been reverse-engineered and may thus be erroneous
|
|
+and/or incomplete.
|
|
+
|
|
+All CRCs used in the following are two-byte ``crc_ccitt_false(0xffff, ...)``.
|
|
+All multi-byte values are little-endian, there is no implicit padding between
|
|
+values.
|
|
+
|
|
+
|
|
+SSH Packet Protocol: Definitions
|
|
+================================
|
|
+
|
|
+The fundamental communication unit of the SSH protocol is a frame
|
|
+(:c:type:`struct ssh_frame <ssh_frame>`). A frame consists of the following
|
|
+fields, packed together and in order:
|
|
+
|
|
+.. flat-table:: SSH Frame
|
|
+ :widths: 1 1 4
|
|
+ :header-rows: 1
|
|
+
|
|
+ * - Field
|
|
+ - Type
|
|
+ - Description
|
|
+
|
|
+ * - |TYPE|
|
|
+ - |u8|
|
|
+ - Type identifier of the frame.
|
|
+
|
|
+ * - |LEN|
|
|
+ - |u16|
|
|
+ - Length of the payload associated with the frame.
|
|
+
|
|
+ * - |SEQ|
|
|
+ - |u8|
|
|
+ - Sequence ID (see explanation below).
|
|
+
|
|
+Each frame structure is followed by a CRC over this structure. The CRC over
|
|
+the frame structure (|TYPE|, |LEN|, and |SEQ| fields) is placed directly
|
|
+after the frame structure and before the payload. The payload is followed by
|
|
+its own CRC (over all payload bytes). If the payload is not present (i.e.
|
|
+the frame has ``LEN=0``), the CRC of the payload is still present and will
|
|
+evaluate to ``0xffff``. The |LEN| field does not include any of the CRCs, it
|
|
+equals the number of bytes inbetween the CRC of the frame and the CRC of the
|
|
+payload.
|
|
+
|
|
+Additionally, the following fixed two-byte sequences are used:
|
|
+
|
|
+.. flat-table:: SSH Byte Sequences
|
|
+ :widths: 1 1 4
|
|
+ :header-rows: 1
|
|
+
|
|
+ * - Name
|
|
+ - Value
|
|
+ - Description
|
|
+
|
|
+ * - |SYN|
|
|
+ - ``[0xAA, 0x55]``
|
|
+ - Synchronization bytes.
|
|
+
|
|
+A message consists of |SYN|, followed by the frame (|TYPE|, |LEN|, |SEQ| and
|
|
+CRC) and, if specified in the frame (i.e. ``LEN > 0``), payload bytes,
|
|
+followed finally, regardless if the payload is present, the payload CRC. The
|
|
+messages corresponding to an exchange are, in part, identified by having the
|
|
+same sequence ID (|SEQ|), stored inside the frame (more on this in the next
|
|
+section). The sequence ID is a wrapping counter.
|
|
+
|
|
+A frame can have the following types
|
|
+(:c:type:`enum ssh_frame_type <ssh_frame_type>`):
|
|
+
|
|
+.. flat-table:: SSH Frame Types
|
|
+ :widths: 1 1 4
|
|
+ :header-rows: 1
|
|
+
|
|
+ * - Name
|
|
+ - Value
|
|
+ - Short Description
|
|
+
|
|
+ * - |NAK|
|
|
+ - ``0x04``
|
|
+ - Sent on error in previously received message.
|
|
+
|
|
+ * - |ACK|
|
|
+ - ``0x40``
|
|
+ - Sent to acknowledge receival of |DATA| frame.
|
|
+
|
|
+ * - |DATA_SEQ|
|
|
+ - ``0x80``
|
|
+ - Sent to transfer data. Sequenced.
|
|
+
|
|
+ * - |DATA_NSQ|
|
|
+ - ``0x00``
|
|
+ - Same as |DATA_SEQ|, but does not need to be ACKed.
|
|
+
|
|
+Both |NAK|- and |ACK|-type frames are used to control flow of messages and
|
|
+thus do not carry a payload. |DATA_SEQ|- and |DATA_NSQ|-type frames on the
|
|
+other hand must carry a payload. The flow sequence and interaction of
|
|
+different frame types will be described in more depth in the next section.
|
|
+
|
|
+
|
|
+SSH Packet Protocol: Flow Sequence
|
|
+==================================
|
|
+
|
|
+Each exchange begins with |SYN|, followed by a |DATA_SEQ|- or
|
|
+|DATA_NSQ|-type frame, followed by its CRC, payload, and payload CRC. In
|
|
+case of a |DATA_NSQ|-type frame, the exchange is then finished. In case of a
|
|
+|DATA_SEQ|-type frame, the receiving party has to acknowledge receival of
|
|
+the frame by responding with a message containing an |ACK|-type frame with
|
|
+the same sequence ID of the |DATA| frame. In other words, the sequence ID of
|
|
+the |ACK| frame specifies the |DATA| frame to be acknowledged. In case of an
|
|
+error, e.g. an invalid CRC, the receiving party responds with a message
|
|
+containing an |NAK|-type frame. As the sequence ID of the previous data
|
|
+frame, for which an error is indicated via the |NAK| frame, cannot be relied
|
|
+upon, the sequence ID of the |NAK| frame should not be used and is set to
|
|
+zero. After receival of an |NAK| frame, the sending party should re-send all
|
|
+outstanding (non-ACKed) messages.
|
|
+
|
|
+Sequence IDs are not synchronized between the two parties, meaning that they
|
|
+are managed independently for each party. Identifying the messages
|
|
+corresponding to a single exchange thus relies on the sequence ID as well as
|
|
+the type of the message, and the context. Specifically, the sequence ID is
|
|
+used to associate an ``ACK`` with its ``DATA_SEQ``-type frame, but not
|
|
+``DATA_SEQ``- or ``DATA_NSQ``-type frames with other ``DATA``- type frames.
|
|
+
|
|
+An example exchange might look like this:
|
|
+
|
|
+::
|
|
+
|
|
+ tx: -- SYN FRAME(D) CRC(F) PAYLOAD CRC(P) -----------------------------
|
|
+ rx: ------------------------------------- SYN FRAME(A) CRC(F) CRC(P) --
|
|
+
|
|
+where both frames have the same sequence ID (``SEQ``). Here, ``FRAME(D)``
|
|
+indicates a |DATA_SEQ|-type frame, ``FRAME(A)`` an ``ACK``-type frame,
|
|
+``CRC(F)`` the CRC over the previous frame, ``CRC(P)`` the CRC over the
|
|
+previous payload. In case of an error, the exchange would look like this:
|
|
+
|
|
+::
|
|
+
|
|
+ tx: -- SYN FRAME(D) CRC(F) PAYLOAD CRC(P) -----------------------------
|
|
+ rx: ------------------------------------- SYN FRAME(N) CRC(F) CRC(P) --
|
|
+
|
|
+upon which the sender should re-send the message. ``FRAME(N)`` indicates an
|
|
+|NAK|-type frame. Note that the sequence ID of the |NAK|-type frame is fixed
|
|
+to zero. For |DATA_NSQ|-type frames, both exchanges are the same:
|
|
+
|
|
+::
|
|
+
|
|
+ tx: -- SYN FRAME(DATA_NSQ) CRC(F) PAYLOAD CRC(P) ----------------------
|
|
+ rx: -------------------------------------------------------------------
|
|
+
|
|
+Here, an error can be detected, but not corrected or indicated to the
|
|
+sending party. These exchanges are symmetric, i.e. switching ``rx`` and
|
|
+``tx`` results again in a valid exchange. Currently, no longer exchanges are
|
|
+known.
|
|
+
|
|
+
|
|
+Commands: Requests, Responses, and Events
|
|
+=========================================
|
|
+
|
|
+Commands are sent as payload inside a data frame. Currently, this is the
|
|
+only known payload type of |DATA| frames, with a payload-type value of
|
|
+``0x80`` (:c:type:`SSH_PLD_TYPE_CMD <ssh_payload_type>`).
|
|
+
|
|
+The command-type payload (:c:type:`struct ssh_command <ssh_command>`)
|
|
+consists of an eight-byte command structure, followed by optional and
|
|
+variable length command data. The length of this optional data is derived
|
|
+from the frame payload length given in the corresponding frame, i.e. it is
|
|
+``frame.len - sizeof(struct ssh_command)``. The command struct contains the
|
|
+following fields, packed together and in order:
|
|
+
|
|
+.. flat-table:: SSH Command
|
|
+ :widths: 1 1 4
|
|
+ :header-rows: 1
|
|
+
|
|
+ * - Field
|
|
+ - Type
|
|
+ - Description
|
|
+
|
|
+ * - |TYPE|
|
|
+ - |u8|
|
|
+ - Type of the payload. For commands always ``0x80``.
|
|
+
|
|
+ * - |TC|
|
|
+ - |u8|
|
|
+ - Target category.
|
|
+
|
|
+ * - |TID| (out)
|
|
+ - |u8|
|
|
+ - Target ID for outgoing (host to EC) commands.
|
|
+
|
|
+ * - |TID| (in)
|
|
+ - |u8|
|
|
+ - Target ID for incoming (EC to host) commands.
|
|
+
|
|
+ * - |IID|
|
|
+ - |u8|
|
|
+ - Instance ID.
|
|
+
|
|
+ * - |RQID|
|
|
+ - |u16|
|
|
+ - Request ID.
|
|
+
|
|
+ * - |CID|
|
|
+ - |u8|
|
|
+ - Command ID.
|
|
+
|
|
+The command struct and data, in general, does not contain any failure
|
|
+detection mechanism (e.g. CRCs), this is solely done on the frame level.
|
|
+
|
|
+Command-type payloads are used by the host to send commands and requests to
|
|
+the EC as well as by the EC to send responses and events back to the host.
|
|
+We differentiate between requests (sent by the host), responses (sent by the
|
|
+EC in response to a request), and events (sent by the EC without a preceding
|
|
+request).
|
|
+
|
|
+Commands and events are uniquely identified by their target category
|
|
+(``TC``) and command ID (``CID``). The target category specifies a general
|
|
+category for the command (e.g. system in general, vs. battery and AC, vs.
|
|
+temperature, and so on), while the command ID specifies the command inside
|
|
+that category. Only the combination of |TC| + |CID| is unique. Additionally,
|
|
+commands have an instance ID (``IID``), which is used to differentiate
|
|
+between different sub-devices. For example ``TC=3`` ``CID=1`` is a
|
|
+request to get the temperature on a thermal sensor, where |IID| specifies
|
|
+the respective sensor. If the instance ID is not used, it should be set to
|
|
+zero. If instance IDs are used, they, in general, start with a value of one,
|
|
+whereas zero may be used for instance independent queries, if applicable. A
|
|
+response to a request should have the same target category, command ID, and
|
|
+instance ID as the corresponding request.
|
|
+
|
|
+Responses are matched to their corresponding request via the request ID
|
|
+(``RQID``) field. This is a 16 bit wrapping counter similar to the sequence
|
|
+ID on the frames. Note that the sequence ID of the frames for a
|
|
+request-response pair does not match. Only the request ID has to match.
|
|
+Frame-protocol wise these are two separate exchanges, and may even be
|
|
+separated, e.g. by an event being sent after the request but before the
|
|
+response. Not all commands produce a response, and this is not detectable by
|
|
+|TC| + |CID|. It is the responsibility of the issuing party to wait for a
|
|
+response (or signal this to the communication framework, as is done in
|
|
+SAN/ACPI via the ``SNC`` flag).
|
|
+
|
|
+Events are identified by unique and reserved request IDs. These IDs should
|
|
+not be used by the host when sending a new request. They are used on the
|
|
+host to, first, detect events and, second, match them with a registered
|
|
+event handler. Request IDs for events are chosen by the host and directed to
|
|
+the EC when setting up and enabling an event source (via the
|
|
+enable-event-source request). The EC then uses the specified request ID for
|
|
+events sent from the respective source. Note that an event should still be
|
|
+identified by its target category, command ID, and, if applicable, instance
|
|
+ID, as a single event source can send multiple different event types. In
|
|
+general, however, a single target category should map to a single reserved
|
|
+event request ID.
|
|
+
|
|
+Furthermore, requests, responses, and events have an associated target ID
|
|
+(``TID``). This target ID is split into output (host to EC) and input (EC to
|
|
+host) fields, with the respecting other field (e.g. output field on incoming
|
|
+messages) set to zero. Two ``TID`` values are known: Primary (``0x01``) and
|
|
+secondary (``0x02``). In general, the response to a request should have the
|
|
+same ``TID`` value, however, the field (output vs. input) should be used in
|
|
+accordance to the direction in which the response is sent (i.e. on the input
|
|
+field, as responses are generally sent from the EC to the host).
|
|
+
|
|
+Note that, even though requests and events should be uniquely identifiable
|
|
+by target category and command ID alone, the EC may require specific
|
|
+target ID and instance ID values to accept a command. A command that is
|
|
+accepted for ``TID=1``, for example, may not be accepted for ``TID=2``
|
|
+and vice versa.
|
|
+
|
|
+
|
|
+Limitations and Observations
|
|
+============================
|
|
+
|
|
+The protocol can, in theory, handle up to ``U8_MAX`` frames in parallel,
|
|
+with up to ``U16_MAX`` pending requests (neglecting request IDs reserved for
|
|
+events). In practice, however, this is more limited. From our testing
|
|
+(although via a python and thus a user-space program), it seems that the EC
|
|
+can handle up to four requests (mostly) reliably in parallel at a certain
|
|
+time. With five or more requests in parallel, consistent discarding of
|
|
+commands (ACKed frame but no command response) has been observed. For five
|
|
+simultaneous commands, this reproducibly resulted in one command being
|
|
+dropped and four commands being handled.
|
|
+
|
|
+However, it has also been noted that, even with three requests in parallel,
|
|
+occasional frame drops happen. Apart from this, with a limit of three
|
|
+pending requests, no dropped commands (i.e. command being dropped but frame
|
|
+carrying command being ACKed) have been observed. In any case, frames (and
|
|
+possibly also commands) should be re-sent by the host if a certain timeout
|
|
+is exceeded. This is done by the EC for frames with a timeout of one second,
|
|
+up to two re-tries (i.e. three transmissions in total). The limit of
|
|
+re-tries also applies to received NAKs, and, in a worst case scenario, can
|
|
+lead to entire messages being dropped.
|
|
+
|
|
+While this also seems to work fine for pending data frames as long as no
|
|
+transmission failures occur, implementation and handling of these seems to
|
|
+depend on the assumption that there is only one non-acknowledged data frame.
|
|
+In particular, the detection of repeated frames relies on the last sequence
|
|
+number. This means that, if a frame that has been successfully received by
|
|
+the EC is sent again, e.g. due to the host not receiving an |ACK|, the EC
|
|
+will only detect this if it has the sequence ID of the last frame received
|
|
+by the EC. As an example: Sending two frames with ``SEQ=0`` and ``SEQ=1``
|
|
+followed by a repetition of ``SEQ=0`` will not detect the second ``SEQ=0``
|
|
+frame as such, and thus execute the command in this frame each time it has
|
|
+been received, i.e. twice in this example. Sending ``SEQ=0``, ``SEQ=1`` and
|
|
+then repeating ``SEQ=1`` will detect the second ``SEQ=1`` as repetition of
|
|
+the first one and ignore it, thus executing the contained command only once.
|
|
+
|
|
+In conclusion, this suggests a limit of at most one pending un-ACKed frame
|
|
+(per party, effectively leading to synchronous communication regarding
|
|
+frames) and at most three pending commands. The limit to synchronous frame
|
|
+transfers seems to be consistent with behavior observed on Windows.
|
|
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
|
|
index 61e1953ff921..3610c379b939 100644
|
|
--- a/drivers/hid/Kconfig
|
|
+++ b/drivers/hid/Kconfig
|
|
@@ -1089,4 +1089,6 @@ source "drivers/hid/i2c-hid/Kconfig"
|
|
|
|
source "drivers/hid/intel-ish-hid/Kconfig"
|
|
|
|
+source "drivers/hid/surface-hid/Kconfig"
|
|
+
|
|
endmenu
|
|
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
|
|
index bd7ac53b75c5..1f23e09049e1 100644
|
|
--- a/drivers/hid/Makefile
|
|
+++ b/drivers/hid/Makefile
|
|
@@ -128,3 +128,5 @@ obj-$(CONFIG_USB_KBD) += usbhid/
|
|
obj-$(CONFIG_I2C_HID) += i2c-hid/
|
|
|
|
obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/
|
|
+
|
|
+obj-$(CONFIG_SURFACE_HID_CORE) += surface-hid/
|
|
diff --git a/drivers/hid/surface-hid/Kconfig b/drivers/hid/surface-hid/Kconfig
|
|
new file mode 100644
|
|
index 000000000000..7ce9b5d641eb
|
|
--- /dev/null
|
|
+++ b/drivers/hid/surface-hid/Kconfig
|
|
@@ -0,0 +1,42 @@
|
|
+# SPDX-License-Identifier: GPL-2.0+
|
|
+menu "Surface System Aggregator Module HID support"
|
|
+ depends on SURFACE_AGGREGATOR
|
|
+ depends on INPUT
|
|
+
|
|
+config SURFACE_HID
|
|
+ tristate "HID transport driver for Surface System Aggregator Module"
|
|
+ depends on SURFACE_AGGREGATOR_REGISTRY
|
|
+ select SURFACE_HID_CORE
|
|
+ help
|
|
+ Driver to support integrated HID devices on newer Microsoft Surface
|
|
+ models.
|
|
+
|
|
+ This driver provides support for the HID transport protocol provided
|
|
+ by the Surface Aggregator Module (i.e. the embedded controller) on
|
|
+ 7th-generation Microsoft Surface devices, i.e. Surface Book 3 and
|
|
+ Surface Laptop 3. On those models, it is mainly used to connect the
|
|
+ integrated touchpad and keyboard.
|
|
+
|
|
+ Say M or Y here, if you want support for integrated HID devices, i.e.
|
|
+ integrated touchpad and keyboard, on 7th generation Microsoft Surface
|
|
+ models.
|
|
+
|
|
+config SURFACE_KBD
|
|
+ tristate "HID keyboard transport driver for Surface System Aggregator Module"
|
|
+ select SURFACE_HID_CORE
|
|
+ help
|
|
+ Driver to support HID keyboards on Surface Laptop 1 and 2 devices.
|
|
+
|
|
+ This driver provides support for the HID transport protocol provided
|
|
+ by the Surface Aggregator Module (i.e. the embedded controller) on
|
|
+ Microsoft Surface Laptops 1 and 2. It is used to connect the
|
|
+ integrated keyboard on those devices.
|
|
+
|
|
+ Say M or Y here, if you want support for the integrated keyboard on
|
|
+ Microsoft Surface Laptops 1 and 2.
|
|
+
|
|
+endmenu
|
|
+
|
|
+config SURFACE_HID_CORE
|
|
+ tristate
|
|
+ select HID
|
|
diff --git a/drivers/hid/surface-hid/Makefile b/drivers/hid/surface-hid/Makefile
|
|
new file mode 100644
|
|
index 000000000000..4ae11cf09b25
|
|
--- /dev/null
|
|
+++ b/drivers/hid/surface-hid/Makefile
|
|
@@ -0,0 +1,7 @@
|
|
+# SPDX-License-Identifier: GPL-2.0+
|
|
+#
|
|
+# Makefile - Surface System Aggregator Module (SSAM) HID transport driver.
|
|
+#
|
|
+obj-$(CONFIG_SURFACE_HID_CORE) += surface_hid_core.o
|
|
+obj-$(CONFIG_SURFACE_HID) += surface_hid.o
|
|
+obj-$(CONFIG_SURFACE_KBD) += surface_kbd.o
|
|
diff --git a/drivers/hid/surface-hid/surface_hid.c b/drivers/hid/surface-hid/surface_hid.c
|
|
new file mode 100644
|
|
index 000000000000..82767dd3e088
|
|
--- /dev/null
|
|
+++ b/drivers/hid/surface-hid/surface_hid.c
|
|
@@ -0,0 +1,251 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Surface System Aggregator Module (SSAM) HID transport driver for the
|
|
+ * generic HID interface (HID/TC=0x15 subsystem). Provides support for
|
|
+ * integrated HID devices on Surface Laptop 3, Book 3, and later.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Blaž Hrastnik <blaz@mxxn.io>,
|
|
+ * Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <asm/unaligned.h>
|
|
+#include <linux/hid.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+#include <linux/surface_aggregator/device.h>
|
|
+
|
|
+#include "surface_hid_core.h"
|
|
+
|
|
+
|
|
+/* -- SAM interface. -------------------------------------------------------- */
|
|
+
|
|
+struct surface_hid_buffer_slice {
|
|
+ __u8 entry;
|
|
+ __le32 offset;
|
|
+ __le32 length;
|
|
+ __u8 end;
|
|
+ __u8 data[];
|
|
+} __packed;
|
|
+
|
|
+enum surface_hid_cid {
|
|
+ SURFACE_HID_CID_OUTPUT_REPORT = 0x01,
|
|
+ SURFACE_HID_CID_GET_FEATURE_REPORT = 0x02,
|
|
+ SURFACE_HID_CID_SET_FEATURE_REPORT = 0x03,
|
|
+ SURFACE_HID_CID_GET_DESCRIPTOR = 0x04,
|
|
+};
|
|
+
|
|
+static int ssam_hid_get_descriptor(struct surface_hid_device *shid, u8 entry, u8 *buf, size_t len)
|
|
+{
|
|
+ u8 buffer[sizeof(struct surface_hid_buffer_slice) + 0x76];
|
|
+ struct surface_hid_buffer_slice *slice;
|
|
+ struct ssam_request rqst;
|
|
+ struct ssam_response rsp;
|
|
+ u32 buffer_len, offset, length;
|
|
+ int status;
|
|
+
|
|
+ /*
|
|
+ * Note: The 0x76 above has been chosen because that's what's used by
|
|
+ * the Windows driver. Together with the header, this leads to a 128
|
|
+ * byte payload in total.
|
|
+ */
|
|
+
|
|
+ buffer_len = ARRAY_SIZE(buffer) - sizeof(struct surface_hid_buffer_slice);
|
|
+
|
|
+ rqst.target_category = shid->uid.category;
|
|
+ rqst.target_id = shid->uid.target;
|
|
+ rqst.command_id = SURFACE_HID_CID_GET_DESCRIPTOR;
|
|
+ rqst.instance_id = shid->uid.instance;
|
|
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
|
|
+ rqst.length = sizeof(struct surface_hid_buffer_slice);
|
|
+ rqst.payload = buffer;
|
|
+
|
|
+ rsp.capacity = ARRAY_SIZE(buffer);
|
|
+ rsp.pointer = buffer;
|
|
+
|
|
+ slice = (struct surface_hid_buffer_slice *)buffer;
|
|
+ slice->entry = entry;
|
|
+ slice->end = 0;
|
|
+
|
|
+ offset = 0;
|
|
+ length = buffer_len;
|
|
+
|
|
+ while (!slice->end && offset < len) {
|
|
+ put_unaligned_le32(offset, &slice->offset);
|
|
+ put_unaligned_le32(length, &slice->length);
|
|
+
|
|
+ rsp.length = 0;
|
|
+
|
|
+ status = ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp,
|
|
+ sizeof(*slice));
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ offset = get_unaligned_le32(&slice->offset);
|
|
+ length = get_unaligned_le32(&slice->length);
|
|
+
|
|
+ /* Don't mess stuff up in case we receive garbage. */
|
|
+ if (length > buffer_len || offset > len)
|
|
+ return -EPROTO;
|
|
+
|
|
+ if (offset + length > len)
|
|
+ length = len - offset;
|
|
+
|
|
+ memcpy(buf + offset, &slice->data[0], length);
|
|
+
|
|
+ offset += length;
|
|
+ length = buffer_len;
|
|
+ }
|
|
+
|
|
+ if (offset != len) {
|
|
+ dev_err(shid->dev, "unexpected descriptor length: got %u, expected %zu\n",
|
|
+ offset, len);
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ssam_hid_set_raw_report(struct surface_hid_device *shid, u8 rprt_id, bool feature,
|
|
+ u8 *buf, size_t len)
|
|
+{
|
|
+ struct ssam_request rqst;
|
|
+ u8 cid;
|
|
+
|
|
+ if (feature)
|
|
+ cid = SURFACE_HID_CID_SET_FEATURE_REPORT;
|
|
+ else
|
|
+ cid = SURFACE_HID_CID_OUTPUT_REPORT;
|
|
+
|
|
+ rqst.target_category = shid->uid.category;
|
|
+ rqst.target_id = shid->uid.target;
|
|
+ rqst.instance_id = shid->uid.instance;
|
|
+ rqst.command_id = cid;
|
|
+ rqst.flags = 0;
|
|
+ rqst.length = len;
|
|
+ rqst.payload = buf;
|
|
+
|
|
+ buf[0] = rprt_id;
|
|
+
|
|
+ return ssam_retry(ssam_request_sync, shid->ctrl, &rqst, NULL);
|
|
+}
|
|
+
|
|
+static int ssam_hid_get_raw_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
|
|
+{
|
|
+ struct ssam_request rqst;
|
|
+ struct ssam_response rsp;
|
|
+
|
|
+ rqst.target_category = shid->uid.category;
|
|
+ rqst.target_id = shid->uid.target;
|
|
+ rqst.instance_id = shid->uid.instance;
|
|
+ rqst.command_id = SURFACE_HID_CID_GET_FEATURE_REPORT;
|
|
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
|
|
+ rqst.length = sizeof(rprt_id);
|
|
+ rqst.payload = &rprt_id;
|
|
+
|
|
+ rsp.capacity = len;
|
|
+ rsp.length = 0;
|
|
+ rsp.pointer = buf;
|
|
+
|
|
+ return ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(rprt_id));
|
|
+}
|
|
+
|
|
+static u32 ssam_hid_event_fn(struct ssam_event_notifier *nf, const struct ssam_event *event)
|
|
+{
|
|
+ struct surface_hid_device *shid = container_of(nf, struct surface_hid_device, notif);
|
|
+
|
|
+ if (event->command_id != 0x00)
|
|
+ return 0;
|
|
+
|
|
+ hid_input_report(shid->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 0);
|
|
+ return SSAM_NOTIF_HANDLED;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Transport driver. ----------------------------------------------------- */
|
|
+
|
|
+static int shid_output_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = ssam_hid_set_raw_report(shid, rprt_id, false, buf, len);
|
|
+ return status >= 0 ? len : status;
|
|
+}
|
|
+
|
|
+static int shid_get_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = ssam_hid_get_raw_report(shid, rprt_id, buf, len);
|
|
+ return status >= 0 ? len : status;
|
|
+}
|
|
+
|
|
+static int shid_set_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = ssam_hid_set_raw_report(shid, rprt_id, true, buf, len);
|
|
+ return status >= 0 ? len : status;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Driver setup. --------------------------------------------------------- */
|
|
+
|
|
+static int surface_hid_probe(struct ssam_device *sdev)
|
|
+{
|
|
+ struct surface_hid_device *shid;
|
|
+
|
|
+ shid = devm_kzalloc(&sdev->dev, sizeof(*shid), GFP_KERNEL);
|
|
+ if (!shid)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ shid->dev = &sdev->dev;
|
|
+ shid->ctrl = sdev->ctrl;
|
|
+ shid->uid = sdev->uid;
|
|
+
|
|
+ shid->notif.base.priority = 1;
|
|
+ shid->notif.base.fn = ssam_hid_event_fn;
|
|
+ shid->notif.event.reg = SSAM_EVENT_REGISTRY_REG;
|
|
+ shid->notif.event.id.target_category = sdev->uid.category;
|
|
+ shid->notif.event.id.instance = sdev->uid.instance;
|
|
+ shid->notif.event.mask = SSAM_EVENT_MASK_STRICT;
|
|
+ shid->notif.event.flags = 0;
|
|
+
|
|
+ shid->ops.get_descriptor = ssam_hid_get_descriptor;
|
|
+ shid->ops.output_report = shid_output_report;
|
|
+ shid->ops.get_feature_report = shid_get_feature_report;
|
|
+ shid->ops.set_feature_report = shid_set_feature_report;
|
|
+
|
|
+ ssam_device_set_drvdata(sdev, shid);
|
|
+ return surface_hid_device_add(shid);
|
|
+}
|
|
+
|
|
+static void surface_hid_remove(struct ssam_device *sdev)
|
|
+{
|
|
+ surface_hid_device_destroy(ssam_device_get_drvdata(sdev));
|
|
+}
|
|
+
|
|
+static const struct ssam_device_id surface_hid_match[] = {
|
|
+ { SSAM_SDEV(HID, 0x02, SSAM_ANY_IID, 0x00) },
|
|
+ { },
|
|
+};
|
|
+MODULE_DEVICE_TABLE(ssam, surface_hid_match);
|
|
+
|
|
+static struct ssam_device_driver surface_hid_driver = {
|
|
+ .probe = surface_hid_probe,
|
|
+ .remove = surface_hid_remove,
|
|
+ .match_table = surface_hid_match,
|
|
+ .driver = {
|
|
+ .name = "surface_hid",
|
|
+ .pm = &surface_hid_pm_ops,
|
|
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
|
+ },
|
|
+};
|
|
+module_ssam_device_driver(surface_hid_driver);
|
|
+
|
|
+MODULE_AUTHOR("Blaž Hrastnik <blaz@mxxn.io>");
|
|
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
|
|
+MODULE_DESCRIPTION("HID transport driver for Surface System Aggregator Module");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/hid/surface-hid/surface_hid_core.c b/drivers/hid/surface-hid/surface_hid_core.c
|
|
new file mode 100644
|
|
index 000000000000..5571e74abe91
|
|
--- /dev/null
|
|
+++ b/drivers/hid/surface-hid/surface_hid_core.c
|
|
@@ -0,0 +1,272 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Common/core components for the Surface System Aggregator Module (SSAM) HID
|
|
+ * transport driver. Provides support for integrated HID devices on Microsoft
|
|
+ * Surface models.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <asm/unaligned.h>
|
|
+#include <linux/hid.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/usb/ch9.h>
|
|
+
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+
|
|
+#include "surface_hid_core.h"
|
|
+
|
|
+
|
|
+/* -- Device descriptor access. --------------------------------------------- */
|
|
+
|
|
+static int surface_hid_load_hid_descriptor(struct surface_hid_device *shid)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_HID,
|
|
+ (u8 *)&shid->hid_desc, sizeof(shid->hid_desc));
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ if (shid->hid_desc.desc_len != sizeof(shid->hid_desc)) {
|
|
+ dev_err(shid->dev, "unexpected HID descriptor length: got %u, expected %zu\n",
|
|
+ shid->hid_desc.desc_len, sizeof(shid->hid_desc));
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
+ if (shid->hid_desc.desc_type != HID_DT_HID) {
|
|
+ dev_err(shid->dev, "unexpected HID descriptor type: got %#04x, expected %#04x\n",
|
|
+ shid->hid_desc.desc_type, HID_DT_HID);
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
+ if (shid->hid_desc.num_descriptors != 1) {
|
|
+ dev_err(shid->dev, "unexpected number of descriptors: got %u, expected 1\n",
|
|
+ shid->hid_desc.num_descriptors);
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
+ if (shid->hid_desc.report_desc_type != HID_DT_REPORT) {
|
|
+ dev_err(shid->dev, "unexpected report descriptor type: got %#04x, expected %#04x\n",
|
|
+ shid->hid_desc.report_desc_type, HID_DT_REPORT);
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int surface_hid_load_device_attributes(struct surface_hid_device *shid)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_ATTRS,
|
|
+ (u8 *)&shid->attrs, sizeof(shid->attrs));
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ if (get_unaligned_le32(&shid->attrs.length) != sizeof(shid->attrs)) {
|
|
+ dev_err(shid->dev, "unexpected attribute length: got %u, expected %zu\n",
|
|
+ get_unaligned_le32(&shid->attrs.length), sizeof(shid->attrs));
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Transport driver (common). -------------------------------------------- */
|
|
+
|
|
+static int surface_hid_start(struct hid_device *hid)
|
|
+{
|
|
+ struct surface_hid_device *shid = hid->driver_data;
|
|
+
|
|
+ return ssam_notifier_register(shid->ctrl, &shid->notif);
|
|
+}
|
|
+
|
|
+static void surface_hid_stop(struct hid_device *hid)
|
|
+{
|
|
+ struct surface_hid_device *shid = hid->driver_data;
|
|
+
|
|
+ /* Note: This call will log errors for us, so ignore them here. */
|
|
+ ssam_notifier_unregister(shid->ctrl, &shid->notif);
|
|
+}
|
|
+
|
|
+static int surface_hid_open(struct hid_device *hid)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void surface_hid_close(struct hid_device *hid)
|
|
+{
|
|
+}
|
|
+
|
|
+static int surface_hid_parse(struct hid_device *hid)
|
|
+{
|
|
+ struct surface_hid_device *shid = hid->driver_data;
|
|
+ size_t len = get_unaligned_le16(&shid->hid_desc.report_desc_len);
|
|
+ u8 *buf;
|
|
+ int status;
|
|
+
|
|
+ buf = kzalloc(len, GFP_KERNEL);
|
|
+ if (!buf)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ status = shid->ops.get_descriptor(shid, SURFACE_HID_DESC_REPORT, buf, len);
|
|
+ if (!status)
|
|
+ status = hid_parse_report(hid, buf, len);
|
|
+
|
|
+ kfree(buf);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static int surface_hid_raw_request(struct hid_device *hid, unsigned char reportnum, u8 *buf,
|
|
+ size_t len, unsigned char rtype, int reqtype)
|
|
+{
|
|
+ struct surface_hid_device *shid = hid->driver_data;
|
|
+
|
|
+ if (rtype == HID_OUTPUT_REPORT && reqtype == HID_REQ_SET_REPORT)
|
|
+ return shid->ops.output_report(shid, reportnum, buf, len);
|
|
+
|
|
+ else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_GET_REPORT)
|
|
+ return shid->ops.get_feature_report(shid, reportnum, buf, len);
|
|
+
|
|
+ else if (rtype == HID_FEATURE_REPORT && reqtype == HID_REQ_SET_REPORT)
|
|
+ return shid->ops.set_feature_report(shid, reportnum, buf, len);
|
|
+
|
|
+ return -EIO;
|
|
+}
|
|
+
|
|
+static struct hid_ll_driver surface_hid_ll_driver = {
|
|
+ .start = surface_hid_start,
|
|
+ .stop = surface_hid_stop,
|
|
+ .open = surface_hid_open,
|
|
+ .close = surface_hid_close,
|
|
+ .parse = surface_hid_parse,
|
|
+ .raw_request = surface_hid_raw_request,
|
|
+};
|
|
+
|
|
+
|
|
+/* -- Common device setup. -------------------------------------------------- */
|
|
+
|
|
+int surface_hid_device_add(struct surface_hid_device *shid)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = surface_hid_load_hid_descriptor(shid);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ status = surface_hid_load_device_attributes(shid);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ shid->hid = hid_allocate_device();
|
|
+ if (IS_ERR(shid->hid))
|
|
+ return PTR_ERR(shid->hid);
|
|
+
|
|
+ shid->hid->dev.parent = shid->dev;
|
|
+ shid->hid->bus = BUS_HOST;
|
|
+ shid->hid->vendor = get_unaligned_le16(&shid->attrs.vendor);
|
|
+ shid->hid->product = get_unaligned_le16(&shid->attrs.product);
|
|
+ shid->hid->version = get_unaligned_le16(&shid->hid_desc.hid_version);
|
|
+ shid->hid->country = shid->hid_desc.country_code;
|
|
+
|
|
+ snprintf(shid->hid->name, sizeof(shid->hid->name), "Microsoft Surface %04X:%04X",
|
|
+ shid->hid->vendor, shid->hid->product);
|
|
+
|
|
+ strscpy(shid->hid->phys, dev_name(shid->dev), sizeof(shid->hid->phys));
|
|
+
|
|
+ shid->hid->driver_data = shid;
|
|
+ shid->hid->ll_driver = &surface_hid_ll_driver;
|
|
+
|
|
+ status = hid_add_device(shid->hid);
|
|
+ if (status)
|
|
+ hid_destroy_device(shid->hid);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(surface_hid_device_add);
|
|
+
|
|
+void surface_hid_device_destroy(struct surface_hid_device *shid)
|
|
+{
|
|
+ hid_destroy_device(shid->hid);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(surface_hid_device_destroy);
|
|
+
|
|
+
|
|
+/* -- PM ops. --------------------------------------------------------------- */
|
|
+
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
+
|
|
+static int surface_hid_suspend(struct device *dev)
|
|
+{
|
|
+ struct surface_hid_device *d = dev_get_drvdata(dev);
|
|
+
|
|
+ if (d->hid->driver && d->hid->driver->suspend)
|
|
+ return d->hid->driver->suspend(d->hid, PMSG_SUSPEND);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int surface_hid_resume(struct device *dev)
|
|
+{
|
|
+ struct surface_hid_device *d = dev_get_drvdata(dev);
|
|
+
|
|
+ if (d->hid->driver && d->hid->driver->resume)
|
|
+ return d->hid->driver->resume(d->hid);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int surface_hid_freeze(struct device *dev)
|
|
+{
|
|
+ struct surface_hid_device *d = dev_get_drvdata(dev);
|
|
+
|
|
+ if (d->hid->driver && d->hid->driver->suspend)
|
|
+ return d->hid->driver->suspend(d->hid, PMSG_FREEZE);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int surface_hid_poweroff(struct device *dev)
|
|
+{
|
|
+ struct surface_hid_device *d = dev_get_drvdata(dev);
|
|
+
|
|
+ if (d->hid->driver && d->hid->driver->suspend)
|
|
+ return d->hid->driver->suspend(d->hid, PMSG_HIBERNATE);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int surface_hid_restore(struct device *dev)
|
|
+{
|
|
+ struct surface_hid_device *d = dev_get_drvdata(dev);
|
|
+
|
|
+ if (d->hid->driver && d->hid->driver->reset_resume)
|
|
+ return d->hid->driver->reset_resume(d->hid);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+const struct dev_pm_ops surface_hid_pm_ops = {
|
|
+ .freeze = surface_hid_freeze,
|
|
+ .thaw = surface_hid_resume,
|
|
+ .suspend = surface_hid_suspend,
|
|
+ .resume = surface_hid_resume,
|
|
+ .poweroff = surface_hid_poweroff,
|
|
+ .restore = surface_hid_restore,
|
|
+};
|
|
+EXPORT_SYMBOL_GPL(surface_hid_pm_ops);
|
|
+
|
|
+#else /* CONFIG_PM_SLEEP */
|
|
+
|
|
+const struct dev_pm_ops surface_hid_pm_ops = { };
|
|
+EXPORT_SYMBOL_GPL(surface_hid_pm_ops);
|
|
+
|
|
+#endif /* CONFIG_PM_SLEEP */
|
|
+
|
|
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
|
|
+MODULE_DESCRIPTION("HID transport driver core for Surface System Aggregator Module");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/hid/surface-hid/surface_hid_core.h b/drivers/hid/surface-hid/surface_hid_core.h
|
|
new file mode 100644
|
|
index 000000000000..56fb9e8c5466
|
|
--- /dev/null
|
|
+++ b/drivers/hid/surface-hid/surface_hid_core.h
|
|
@@ -0,0 +1,73 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Common/core components for the Surface System Aggregator Module (SSAM) HID
|
|
+ * transport driver. Provides support for integrated HID devices on Microsoft
|
|
+ * Surface models.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#ifndef SURFACE_HID_CORE_H
|
|
+#define SURFACE_HID_CORE_H
|
|
+
|
|
+#include <linux/hid.h>
|
|
+#include <linux/pm.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+#include <linux/surface_aggregator/device.h>
|
|
+
|
|
+enum surface_hid_descriptor_entry {
|
|
+ SURFACE_HID_DESC_HID = 0,
|
|
+ SURFACE_HID_DESC_REPORT = 1,
|
|
+ SURFACE_HID_DESC_ATTRS = 2,
|
|
+};
|
|
+
|
|
+struct surface_hid_descriptor {
|
|
+ __u8 desc_len; /* = 9 */
|
|
+ __u8 desc_type; /* = HID_DT_HID */
|
|
+ __le16 hid_version;
|
|
+ __u8 country_code;
|
|
+ __u8 num_descriptors; /* = 1 */
|
|
+
|
|
+ __u8 report_desc_type; /* = HID_DT_REPORT */
|
|
+ __le16 report_desc_len;
|
|
+} __packed;
|
|
+
|
|
+struct surface_hid_attributes {
|
|
+ __le32 length;
|
|
+ __le16 vendor;
|
|
+ __le16 product;
|
|
+ __le16 version;
|
|
+ __u8 _unknown[22];
|
|
+} __packed;
|
|
+
|
|
+struct surface_hid_device;
|
|
+
|
|
+struct surface_hid_device_ops {
|
|
+ int (*get_descriptor)(struct surface_hid_device *shid, u8 entry, u8 *buf, size_t len);
|
|
+ int (*output_report)(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len);
|
|
+ int (*get_feature_report)(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len);
|
|
+ int (*set_feature_report)(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len);
|
|
+};
|
|
+
|
|
+struct surface_hid_device {
|
|
+ struct device *dev;
|
|
+ struct ssam_controller *ctrl;
|
|
+ struct ssam_device_uid uid;
|
|
+
|
|
+ struct surface_hid_descriptor hid_desc;
|
|
+ struct surface_hid_attributes attrs;
|
|
+
|
|
+ struct ssam_event_notifier notif;
|
|
+ struct hid_device *hid;
|
|
+
|
|
+ struct surface_hid_device_ops ops;
|
|
+};
|
|
+
|
|
+int surface_hid_device_add(struct surface_hid_device *shid);
|
|
+void surface_hid_device_destroy(struct surface_hid_device *shid);
|
|
+
|
|
+extern const struct dev_pm_ops surface_hid_pm_ops;
|
|
+
|
|
+#endif /* SURFACE_HID_CORE_H */
|
|
diff --git a/drivers/hid/surface-hid/surface_kbd.c b/drivers/hid/surface-hid/surface_kbd.c
|
|
new file mode 100644
|
|
index 000000000000..0635341bc517
|
|
--- /dev/null
|
|
+++ b/drivers/hid/surface-hid/surface_kbd.c
|
|
@@ -0,0 +1,300 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Surface System Aggregator Module (SSAM) HID transport driver for the legacy
|
|
+ * keyboard interface (KBD/TC=0x08 subsystem). Provides support for the
|
|
+ * integrated HID keyboard on Surface Laptops 1 and 2.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <asm/unaligned.h>
|
|
+#include <linux/hid.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+
|
|
+#include "surface_hid_core.h"
|
|
+
|
|
+
|
|
+/* -- SAM interface (KBD). -------------------------------------------------- */
|
|
+
|
|
+#define KBD_FEATURE_REPORT_SIZE 7 /* 6 + report ID */
|
|
+
|
|
+enum surface_kbd_cid {
|
|
+ SURFACE_KBD_CID_GET_DESCRIPTOR = 0x00,
|
|
+ SURFACE_KBD_CID_SET_CAPSLOCK_LED = 0x01,
|
|
+ SURFACE_KBD_CID_EVT_INPUT_GENERIC = 0x03,
|
|
+ SURFACE_KBD_CID_EVT_INPUT_HOTKEYS = 0x04,
|
|
+ SURFACE_KBD_CID_GET_FEATURE_REPORT = 0x0b,
|
|
+};
|
|
+
|
|
+static int ssam_kbd_get_descriptor(struct surface_hid_device *shid, u8 entry, u8 *buf, size_t len)
|
|
+{
|
|
+ struct ssam_request rqst;
|
|
+ struct ssam_response rsp;
|
|
+ int status;
|
|
+
|
|
+ rqst.target_category = shid->uid.category;
|
|
+ rqst.target_id = shid->uid.target;
|
|
+ rqst.command_id = SURFACE_KBD_CID_GET_DESCRIPTOR;
|
|
+ rqst.instance_id = shid->uid.instance;
|
|
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
|
|
+ rqst.length = sizeof(entry);
|
|
+ rqst.payload = &entry;
|
|
+
|
|
+ rsp.capacity = len;
|
|
+ rsp.length = 0;
|
|
+ rsp.pointer = buf;
|
|
+
|
|
+ status = ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(entry));
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ if (rsp.length != len) {
|
|
+ dev_err(shid->dev, "invalid descriptor length: got %zu, expected, %zu\n",
|
|
+ rsp.length, len);
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ssam_kbd_set_caps_led(struct surface_hid_device *shid, bool value)
|
|
+{
|
|
+ struct ssam_request rqst;
|
|
+ u8 value_u8 = value;
|
|
+
|
|
+ rqst.target_category = shid->uid.category;
|
|
+ rqst.target_id = shid->uid.target;
|
|
+ rqst.command_id = SURFACE_KBD_CID_SET_CAPSLOCK_LED;
|
|
+ rqst.instance_id = shid->uid.instance;
|
|
+ rqst.flags = 0;
|
|
+ rqst.length = sizeof(value_u8);
|
|
+ rqst.payload = &value_u8;
|
|
+
|
|
+ return ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, NULL, sizeof(value_u8));
|
|
+}
|
|
+
|
|
+static int ssam_kbd_get_feature_report(struct surface_hid_device *shid, u8 *buf, size_t len)
|
|
+{
|
|
+ struct ssam_request rqst;
|
|
+ struct ssam_response rsp;
|
|
+ u8 payload = 0;
|
|
+ int status;
|
|
+
|
|
+ rqst.target_category = shid->uid.category;
|
|
+ rqst.target_id = shid->uid.target;
|
|
+ rqst.command_id = SURFACE_KBD_CID_GET_FEATURE_REPORT;
|
|
+ rqst.instance_id = shid->uid.instance;
|
|
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
|
|
+ rqst.length = sizeof(payload);
|
|
+ rqst.payload = &payload;
|
|
+
|
|
+ rsp.capacity = len;
|
|
+ rsp.length = 0;
|
|
+ rsp.pointer = buf;
|
|
+
|
|
+ status = ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(payload));
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ if (rsp.length != len) {
|
|
+ dev_err(shid->dev, "invalid feature report length: got %zu, expected, %zu\n",
|
|
+ rsp.length, len);
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static bool ssam_kbd_is_input_event(const struct ssam_event *event)
|
|
+{
|
|
+ if (event->command_id == SURFACE_KBD_CID_EVT_INPUT_GENERIC)
|
|
+ return true;
|
|
+
|
|
+ if (event->command_id == SURFACE_KBD_CID_EVT_INPUT_HOTKEYS)
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static u32 ssam_kbd_event_fn(struct ssam_event_notifier *nf, const struct ssam_event *event)
|
|
+{
|
|
+ struct surface_hid_device *shid = container_of(nf, struct surface_hid_device, notif);
|
|
+
|
|
+ /*
|
|
+ * Check against device UID manually, as registry and device target
|
|
+ * category doesn't line up.
|
|
+ */
|
|
+
|
|
+ if (shid->uid.category != event->target_category)
|
|
+ return 0;
|
|
+
|
|
+ if (shid->uid.target != event->target_id)
|
|
+ return 0;
|
|
+
|
|
+ if (shid->uid.instance != event->instance_id)
|
|
+ return 0;
|
|
+
|
|
+ if (!ssam_kbd_is_input_event(event))
|
|
+ return 0;
|
|
+
|
|
+ hid_input_report(shid->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 0);
|
|
+ return SSAM_NOTIF_HANDLED;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Transport driver (KBD). ----------------------------------------------- */
|
|
+
|
|
+static int skbd_get_caps_led_value(struct hid_device *hid, u8 rprt_id, u8 *buf, size_t len)
|
|
+{
|
|
+ struct hid_field *field;
|
|
+ unsigned int offset, size;
|
|
+ int i;
|
|
+
|
|
+ /* Get LED field. */
|
|
+ field = hidinput_get_led_field(hid);
|
|
+ if (!field)
|
|
+ return -ENOENT;
|
|
+
|
|
+ /* Check if we got the correct report. */
|
|
+ if (len != hid_report_len(field->report))
|
|
+ return -ENOENT;
|
|
+
|
|
+ if (rprt_id != field->report->id)
|
|
+ return -ENOENT;
|
|
+
|
|
+ /* Get caps lock LED index. */
|
|
+ for (i = 0; i < field->report_count; i++)
|
|
+ if ((field->usage[i].hid & 0xffff) == 0x02)
|
|
+ break;
|
|
+
|
|
+ if (i == field->report_count)
|
|
+ return -ENOENT;
|
|
+
|
|
+ /* Extract value. */
|
|
+ size = field->report_size;
|
|
+ offset = field->report_offset + i * size;
|
|
+ return !!hid_field_extract(hid, buf + 1, size, offset);
|
|
+}
|
|
+
|
|
+static int skbd_output_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
|
|
+{
|
|
+ int caps_led;
|
|
+ int status;
|
|
+
|
|
+ caps_led = skbd_get_caps_led_value(shid->hid, rprt_id, buf, len);
|
|
+ if (caps_led < 0)
|
|
+ return -EIO; /* Only caps LED output reports are supported. */
|
|
+
|
|
+ status = ssam_kbd_set_caps_led(shid, caps_led);
|
|
+ if (status < 0)
|
|
+ return status;
|
|
+
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static int skbd_get_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
|
|
+{
|
|
+ u8 report[KBD_FEATURE_REPORT_SIZE];
|
|
+ int status;
|
|
+
|
|
+ /*
|
|
+ * The keyboard only has a single hard-coded read-only feature report
|
|
+ * of size KBD_FEATURE_REPORT_SIZE. Try to load it and compare its
|
|
+ * report ID against the requested one.
|
|
+ */
|
|
+
|
|
+ if (len < ARRAY_SIZE(report))
|
|
+ return -ENOSPC;
|
|
+
|
|
+ status = ssam_kbd_get_feature_report(shid, report, ARRAY_SIZE(report));
|
|
+ if (status < 0)
|
|
+ return status;
|
|
+
|
|
+ if (rprt_id != report[0])
|
|
+ return -ENOENT;
|
|
+
|
|
+ memcpy(buf, report, ARRAY_SIZE(report));
|
|
+ return len;
|
|
+}
|
|
+
|
|
+static int skbd_set_feature_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
|
|
+{
|
|
+ /* Not supported. See skbd_get_feature_report() for details. */
|
|
+ return -EIO;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Driver setup. --------------------------------------------------------- */
|
|
+
|
|
+static int surface_kbd_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct ssam_controller *ctrl;
|
|
+ struct surface_hid_device *shid;
|
|
+
|
|
+ /* Add device link to EC. */
|
|
+ ctrl = ssam_client_bind(&pdev->dev);
|
|
+ if (IS_ERR(ctrl))
|
|
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
|
|
+
|
|
+ shid = devm_kzalloc(&pdev->dev, sizeof(*shid), GFP_KERNEL);
|
|
+ if (!shid)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ shid->dev = &pdev->dev;
|
|
+ shid->ctrl = ctrl;
|
|
+
|
|
+ shid->uid.domain = SSAM_DOMAIN_SERIALHUB;
|
|
+ shid->uid.category = SSAM_SSH_TC_KBD;
|
|
+ shid->uid.target = 2;
|
|
+ shid->uid.instance = 0;
|
|
+ shid->uid.function = 0;
|
|
+
|
|
+ shid->notif.base.priority = 1;
|
|
+ shid->notif.base.fn = ssam_kbd_event_fn;
|
|
+ shid->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
|
|
+ shid->notif.event.id.target_category = shid->uid.category;
|
|
+ shid->notif.event.id.instance = shid->uid.instance;
|
|
+ shid->notif.event.mask = SSAM_EVENT_MASK_NONE;
|
|
+ shid->notif.event.flags = 0;
|
|
+
|
|
+ shid->ops.get_descriptor = ssam_kbd_get_descriptor;
|
|
+ shid->ops.output_report = skbd_output_report;
|
|
+ shid->ops.get_feature_report = skbd_get_feature_report;
|
|
+ shid->ops.set_feature_report = skbd_set_feature_report;
|
|
+
|
|
+ platform_set_drvdata(pdev, shid);
|
|
+ return surface_hid_device_add(shid);
|
|
+}
|
|
+
|
|
+static int surface_kbd_remove(struct platform_device *pdev)
|
|
+{
|
|
+ surface_hid_device_destroy(platform_get_drvdata(pdev));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct acpi_device_id surface_kbd_match[] = {
|
|
+ { "MSHW0096" },
|
|
+ { },
|
|
+};
|
|
+MODULE_DEVICE_TABLE(acpi, surface_kbd_match);
|
|
+
|
|
+static struct platform_driver surface_kbd_driver = {
|
|
+ .probe = surface_kbd_probe,
|
|
+ .remove = surface_kbd_remove,
|
|
+ .driver = {
|
|
+ .name = "surface_keyboard",
|
|
+ .acpi_match_table = surface_kbd_match,
|
|
+ .pm = &surface_hid_pm_ops,
|
|
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
|
+ },
|
|
+};
|
|
+module_platform_driver(surface_kbd_driver);
|
|
+
|
|
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
|
|
+MODULE_DESCRIPTION("HID legacy transport driver for Surface System Aggregator Module");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
|
|
index d1d9ebaecf1c..9aed8f6941cc 100644
|
|
--- a/drivers/platform/x86/Kconfig
|
|
+++ b/drivers/platform/x86/Kconfig
|
|
@@ -1185,6 +1185,106 @@ config SURFACE_BOOK1_DGPU_SWITCH
|
|
This driver provides a sysfs switch to set the power-state of the
|
|
discrete GPU found on the Microsoft Surface Book 1.
|
|
|
|
+config SURFACE_ACPI_NOTIFY
|
|
+ tristate "Surface ACPI Notify Driver"
|
|
+ depends on SURFACE_AGGREGATOR
|
|
+ help
|
|
+ Surface ACPI Notify (SAN) driver for Microsoft Surface devices.
|
|
+
|
|
+ This driver provides support for the ACPI interface (called SAN) of
|
|
+ the Surface System Aggregator Module (SSAM) EC. This interface is used
|
|
+ on 5th- and 6th-generation Microsoft Surface devices (including
|
|
+ Surface Pro 5 and 6, Surface Book 2, Surface Laptops 1 and 2, and in
|
|
+ reduced functionality on the Surface Laptop 3) to execute SSAM
|
|
+ requests directly from ACPI code, as well as receive SSAM events and
|
|
+ turn them into ACPI notifications. It essentially acts as a
|
|
+ translation layer between the SSAM controller and ACPI.
|
|
+
|
|
+ Specifically, this driver may be needed for battery status reporting,
|
|
+ thermal sensor access, and real-time clock information, depending on
|
|
+ the Surface device in question.
|
|
+
|
|
+config SURFACE_AGGREGATOR_CDEV
|
|
+ tristate "Surface System Aggregator Module User-Space Interface"
|
|
+ depends on SURFACE_AGGREGATOR
|
|
+ help
|
|
+ Provides a misc-device interface to the Surface System Aggregator
|
|
+ Module (SSAM) controller.
|
|
+
|
|
+ This option provides a module (called surface_aggregator_cdev), that,
|
|
+ when loaded, will add a client device (and its respective driver) to
|
|
+ the SSAM controller. Said client device manages a misc-device
|
|
+ interface (/dev/surface/aggregator), which can be used by user-space
|
|
+ tools to directly communicate with the SSAM EC by sending requests and
|
|
+ receiving the corresponding responses.
|
|
+
|
|
+ The provided interface is intended for debugging and development only,
|
|
+ and should not be used otherwise.
|
|
+
|
|
+config SURFACE_AGGREGATOR_REGISTRY
|
|
+ tristate "Surface System Aggregator Module Device Registry"
|
|
+ depends on SURFACE_AGGREGATOR
|
|
+ depends on SURFACE_AGGREGATOR_BUS
|
|
+ help
|
|
+ Device-registry and device-hubs for Surface System Aggregator Module
|
|
+ (SSAM) devices.
|
|
+
|
|
+ Provides a module and driver which act as a device-registry for SSAM
|
|
+ client devices that cannot be detected automatically, e.g. via ACPI.
|
|
+ Such devices are instead provided via this registry and attached via
|
|
+ device hubs, also provided in this module.
|
|
+
|
|
+ Devices provided via this registry are:
|
|
+ - Platform profile (performance-/cooling-mode) device (5th- and later
|
|
+ generations).
|
|
+ - Battery/AC devices (7th-generation).
|
|
+ - HID input devices (7th-generation).
|
|
+
|
|
+ Select M (recommended) or Y here if you want support for the above
|
|
+ mentioned devices on the corresponding Surface models. Without this
|
|
+ module, the respective devices will not be instantiated and thus any
|
|
+ functionality provided by them will be missing, even when drivers for
|
|
+ these devices are present. In other words, this module only provides
|
|
+ the respective client devices. Drivers for these devices still need to
|
|
+ be selected via the other options.
|
|
+
|
|
+config SURFACE_DTX
|
|
+ tristate "Surface DTX (Detachment System) Driver"
|
|
+ depends on SURFACE_AGGREGATOR
|
|
+ depends on INPUT
|
|
+ help
|
|
+ Driver for the Surface Book clipboard detachment system (DTX).
|
|
+
|
|
+ On the Surface Book series devices, the display part containing the
|
|
+ CPU (called the clipboard) can be detached from the base (containing a
|
|
+ battery, the keyboard, and, optionally, a discrete GPU) by (if
|
|
+ necessary) unlocking and opening the latch connecting both parts.
|
|
+
|
|
+ This driver provides a user-space interface that can influence the
|
|
+ behavior of this process, which includes the option to abort it in
|
|
+ case the base is still in use or speed it up in case it is not.
|
|
+
|
|
+ Note that this module can be built without support for the Surface
|
|
+ Aggregator Bus (i.e. CONFIG_SURFACE_AGGREGATOR_BUS=n). In that case,
|
|
+ some devices, specifically the Surface Book 3, will not be supported.
|
|
+
|
|
+config SURFACE_PERFMODE
|
|
+ tristate "Surface Performance-Mode Driver"
|
|
+ depends on SURFACE_AGGREGATOR_BUS
|
|
+ depends on SYSFS
|
|
+ help
|
|
+ Driver for the performance-/cooling-mode interface of Microsoft
|
|
+ Surface devices.
|
|
+
|
|
+ Microsoft Surface devices using the Surface System Aggregator Module
|
|
+ (SSAM) can be switched between different performance modes. This,
|
|
+ depending on the device, can influence their cooling behavior and may
|
|
+ influence power limits, allowing users to choose between performance
|
|
+ and higher power-draw, or lower power-draw and more silent operation.
|
|
+
|
|
+ This driver provides a user-space interface (via sysfs) for
|
|
+ controlling said mode via the corresponding client device.
|
|
+
|
|
config INTEL_PUNIT_IPC
|
|
tristate "Intel P-Unit IPC Driver"
|
|
---help---
|
|
@@ -1268,6 +1368,8 @@ config INTEL_ATOMISP2_PM
|
|
To compile this driver as a module, choose M here: the module
|
|
will be called intel_atomisp2_pm.
|
|
|
|
+source "drivers/platform/x86/surface_aggregator/Kconfig"
|
|
+
|
|
endif # X86_PLATFORM_DEVICES
|
|
|
|
config PMC_ATOM
|
|
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
|
|
index 6b028d1ee802..2737a78616c8 100644
|
|
--- a/drivers/platform/x86/Makefile
|
|
+++ b/drivers/platform/x86/Makefile
|
|
@@ -79,6 +79,12 @@ obj-$(CONFIG_PVPANIC) += pvpanic.o
|
|
obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
|
|
obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o
|
|
obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o
|
|
+obj-$(CONFIG_SURFACE_AGGREGATOR) += surface_aggregator/
|
|
+obj-$(CONFIG_SURFACE_AGGREGATOR_CDEV) += surface_aggregator_cdev.o
|
|
+obj-$(CONFIG_SURFACE_AGGREGATOR_REGISTRY) += surface_aggregator_registry.o
|
|
+obj-$(CONFIG_SURFACE_ACPI_NOTIFY) += surface_acpi_notify.o
|
|
+obj-$(CONFIG_SURFACE_DTX) += surface_dtx.o
|
|
+obj-$(CONFIG_SURFACE_PERFMODE) += surface_perfmode.o
|
|
obj-$(CONFIG_SURFACE_PRO3_BUTTON) += surfacepro3_button.o
|
|
obj-$(CONFIG_SURFACE_3_BUTTON) += surface3_button.o
|
|
obj-$(CONFIG_SURFACE_3_POWER_OPREGION) += surface3_power.o
|
|
diff --git a/drivers/platform/x86/surface_acpi_notify.c b/drivers/platform/x86/surface_acpi_notify.c
|
|
new file mode 100644
|
|
index 000000000000..ef9c1f8e8336
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_acpi_notify.c
|
|
@@ -0,0 +1,886 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Driver for the Surface ACPI Notify (SAN) interface/shim.
|
|
+ *
|
|
+ * Translates communication from ACPI to Surface System Aggregator Module
|
|
+ * (SSAM/SAM) requests and back, specifically SAM-over-SSH. Translates SSAM
|
|
+ * events back to ACPI notifications. Allows handling of discrete GPU
|
|
+ * notifications sent from ACPI via the SAN interface by providing them to any
|
|
+ * registered external driver.
|
|
+ *
|
|
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <asm/unaligned.h>
|
|
+#include <linux/acpi.h>
|
|
+#include <linux/delay.h>
|
|
+#include <linux/jiffies.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/notifier.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/rwsem.h>
|
|
+
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+#include <linux/surface_acpi_notify.h>
|
|
+
|
|
+struct san_data {
|
|
+ struct device *dev;
|
|
+ struct ssam_controller *ctrl;
|
|
+
|
|
+ struct acpi_connection_info info;
|
|
+
|
|
+ struct ssam_event_notifier nf_bat;
|
|
+ struct ssam_event_notifier nf_tmp;
|
|
+};
|
|
+
|
|
+#define to_san_data(ptr, member) \
|
|
+ container_of(ptr, struct san_data, member)
|
|
+
|
|
+
|
|
+/* -- dGPU notifier interface. ---------------------------------------------- */
|
|
+
|
|
+struct san_rqsg_if {
|
|
+ struct rw_semaphore lock;
|
|
+ struct device *dev;
|
|
+ struct blocking_notifier_head nh;
|
|
+};
|
|
+
|
|
+static struct san_rqsg_if san_rqsg_if = {
|
|
+ .lock = __RWSEM_INITIALIZER(san_rqsg_if.lock),
|
|
+ .dev = NULL,
|
|
+ .nh = BLOCKING_NOTIFIER_INIT(san_rqsg_if.nh),
|
|
+};
|
|
+
|
|
+static int san_set_rqsg_interface_device(struct device *dev)
|
|
+{
|
|
+ int status = 0;
|
|
+
|
|
+ down_write(&san_rqsg_if.lock);
|
|
+ if (!san_rqsg_if.dev && dev)
|
|
+ san_rqsg_if.dev = dev;
|
|
+ else
|
|
+ status = -EBUSY;
|
|
+ up_write(&san_rqsg_if.lock);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * san_client_link() - Link client as consumer to SAN device.
|
|
+ * @client: The client to link.
|
|
+ *
|
|
+ * Sets up a device link between the provided client device as consumer and
|
|
+ * the SAN device as provider. This function can be used to ensure that the
|
|
+ * SAN interface has been set up and will be set up for as long as the driver
|
|
+ * of the client device is bound. This guarantees that, during that time, all
|
|
+ * dGPU events will be received by any registered notifier.
|
|
+ *
|
|
+ * The link will be automatically removed once the client device's driver is
|
|
+ * unbound.
|
|
+ *
|
|
+ * Return: Returns zero on success, %-ENXIO if the SAN interface has not been
|
|
+ * set up yet, and %-ENOMEM if device link creation failed.
|
|
+ */
|
|
+int san_client_link(struct device *client)
|
|
+{
|
|
+ const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
|
|
+ struct device_link *link;
|
|
+
|
|
+ down_read(&san_rqsg_if.lock);
|
|
+
|
|
+ if (!san_rqsg_if.dev) {
|
|
+ up_read(&san_rqsg_if.lock);
|
|
+ return -ENXIO;
|
|
+ }
|
|
+
|
|
+ link = device_link_add(client, san_rqsg_if.dev, flags);
|
|
+ if (!link) {
|
|
+ up_read(&san_rqsg_if.lock);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) {
|
|
+ up_read(&san_rqsg_if.lock);
|
|
+ return -ENXIO;
|
|
+ }
|
|
+
|
|
+ up_read(&san_rqsg_if.lock);
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(san_client_link);
|
|
+
|
|
+/**
|
|
+ * san_dgpu_notifier_register() - Register a SAN dGPU notifier.
|
|
+ * @nb: The notifier-block to register.
|
|
+ *
|
|
+ * Registers a SAN dGPU notifier, receiving any new SAN dGPU events sent from
|
|
+ * ACPI. The registered notifier will be called with &struct san_dgpu_event
|
|
+ * as notifier data and the command ID of that event as notifier action.
|
|
+ */
|
|
+int san_dgpu_notifier_register(struct notifier_block *nb)
|
|
+{
|
|
+ return blocking_notifier_chain_register(&san_rqsg_if.nh, nb);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(san_dgpu_notifier_register);
|
|
+
|
|
+/**
|
|
+ * san_dgpu_notifier_unregister() - Unregister a SAN dGPU notifier.
|
|
+ * @nb: The notifier-block to unregister.
|
|
+ */
|
|
+int san_dgpu_notifier_unregister(struct notifier_block *nb)
|
|
+{
|
|
+ return blocking_notifier_chain_unregister(&san_rqsg_if.nh, nb);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(san_dgpu_notifier_unregister);
|
|
+
|
|
+static int san_dgpu_notifier_call(struct san_dgpu_event *evt)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = blocking_notifier_call_chain(&san_rqsg_if.nh, evt->command, evt);
|
|
+ return notifier_to_errno(ret);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- ACPI _DSM event relay. ------------------------------------------------ */
|
|
+
|
|
+#define SAN_DSM_REVISION 0
|
|
+
|
|
+/* 93b666c5-70c6-469f-a215-3d487c91ab3c */
|
|
+static const guid_t SAN_DSM_UUID =
|
|
+ GUID_INIT(0x93b666c5, 0x70c6, 0x469f, 0xa2, 0x15, 0x3d,
|
|
+ 0x48, 0x7c, 0x91, 0xab, 0x3c);
|
|
+
|
|
+enum san_dsm_event_fn {
|
|
+ SAN_DSM_EVENT_FN_BAT1_STAT = 0x03,
|
|
+ SAN_DSM_EVENT_FN_BAT1_INFO = 0x04,
|
|
+ SAN_DSM_EVENT_FN_ADP1_STAT = 0x05,
|
|
+ SAN_DSM_EVENT_FN_ADP1_INFO = 0x06,
|
|
+ SAN_DSM_EVENT_FN_BAT2_STAT = 0x07,
|
|
+ SAN_DSM_EVENT_FN_BAT2_INFO = 0x08,
|
|
+ SAN_DSM_EVENT_FN_THERMAL = 0x09,
|
|
+ SAN_DSM_EVENT_FN_DPTF = 0x0a,
|
|
+};
|
|
+
|
|
+enum sam_event_cid_bat {
|
|
+ SAM_EVENT_CID_BAT_BIX = 0x15,
|
|
+ SAM_EVENT_CID_BAT_BST = 0x16,
|
|
+ SAM_EVENT_CID_BAT_ADP = 0x17,
|
|
+ SAM_EVENT_CID_BAT_PROT = 0x18,
|
|
+ SAM_EVENT_CID_BAT_DPTF = 0x4f,
|
|
+};
|
|
+
|
|
+enum sam_event_cid_tmp {
|
|
+ SAM_EVENT_CID_TMP_TRIP = 0x0b,
|
|
+};
|
|
+
|
|
+struct san_event_work {
|
|
+ struct delayed_work work;
|
|
+ struct device *dev;
|
|
+ struct ssam_event event; /* must be last */
|
|
+};
|
|
+
|
|
+static int san_acpi_notify_event(struct device *dev, u64 func,
|
|
+ union acpi_object *param)
|
|
+{
|
|
+ acpi_handle san = ACPI_HANDLE(dev);
|
|
+ union acpi_object *obj;
|
|
+ int status = 0;
|
|
+
|
|
+ if (!acpi_check_dsm(san, &SAN_DSM_UUID, SAN_DSM_REVISION, BIT_ULL(func)))
|
|
+ return 0;
|
|
+
|
|
+ dev_dbg(dev, "notify event %#04llx\n", func);
|
|
+
|
|
+ obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION,
|
|
+ func, param, ACPI_TYPE_BUFFER);
|
|
+ if (!obj)
|
|
+ return -EFAULT;
|
|
+
|
|
+ if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) {
|
|
+ dev_err(dev, "got unexpected result from _DSM\n");
|
|
+ status = -EPROTO;
|
|
+ }
|
|
+
|
|
+ ACPI_FREE(obj);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static int san_evt_bat_adp(struct device *dev, const struct ssam_event *event)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_ADP1_STAT, NULL);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ /*
|
|
+ * Ensure that the battery states get updated correctly. When the
|
|
+ * battery is fully charged and an adapter is plugged in, it sometimes
|
|
+ * is not updated correctly, instead showing it as charging.
|
|
+ * Explicitly trigger battery updates to fix this.
|
|
+ */
|
|
+
|
|
+ status = san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_BAT1_STAT, NULL);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_BAT2_STAT, NULL);
|
|
+}
|
|
+
|
|
+static int san_evt_bat_bix(struct device *dev, const struct ssam_event *event)
|
|
+{
|
|
+ enum san_dsm_event_fn fn;
|
|
+
|
|
+ if (event->instance_id == 0x02)
|
|
+ fn = SAN_DSM_EVENT_FN_BAT2_INFO;
|
|
+ else
|
|
+ fn = SAN_DSM_EVENT_FN_BAT1_INFO;
|
|
+
|
|
+ return san_acpi_notify_event(dev, fn, NULL);
|
|
+}
|
|
+
|
|
+static int san_evt_bat_bst(struct device *dev, const struct ssam_event *event)
|
|
+{
|
|
+ enum san_dsm_event_fn fn;
|
|
+
|
|
+ if (event->instance_id == 0x02)
|
|
+ fn = SAN_DSM_EVENT_FN_BAT2_STAT;
|
|
+ else
|
|
+ fn = SAN_DSM_EVENT_FN_BAT1_STAT;
|
|
+
|
|
+ return san_acpi_notify_event(dev, fn, NULL);
|
|
+}
|
|
+
|
|
+static int san_evt_bat_dptf(struct device *dev, const struct ssam_event *event)
|
|
+{
|
|
+ union acpi_object payload;
|
|
+
|
|
+ /*
|
|
+ * The Surface ACPI expects a buffer and not a package. It specifically
|
|
+ * checks for ObjectType (Arg3) == 0x03. This will cause a warning in
|
|
+ * acpica/nsarguments.c, but that warning can be safely ignored.
|
|
+ */
|
|
+ payload.type = ACPI_TYPE_BUFFER;
|
|
+ payload.buffer.length = event->length;
|
|
+ payload.buffer.pointer = (u8 *)&event->data[0];
|
|
+
|
|
+ return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_DPTF, &payload);
|
|
+}
|
|
+
|
|
+static unsigned long san_evt_bat_delay(u8 cid)
|
|
+{
|
|
+ switch (cid) {
|
|
+ case SAM_EVENT_CID_BAT_ADP:
|
|
+ /*
|
|
+ * Wait for battery state to update before signaling adapter
|
|
+ * change.
|
|
+ */
|
|
+ return msecs_to_jiffies(5000);
|
|
+
|
|
+ case SAM_EVENT_CID_BAT_BST:
|
|
+ /* Ensure we do not miss anything important due to caching. */
|
|
+ return msecs_to_jiffies(2000);
|
|
+
|
|
+ default:
|
|
+ return 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+static bool san_evt_bat(const struct ssam_event *event, struct device *dev)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ switch (event->command_id) {
|
|
+ case SAM_EVENT_CID_BAT_BIX:
|
|
+ status = san_evt_bat_bix(dev, event);
|
|
+ break;
|
|
+
|
|
+ case SAM_EVENT_CID_BAT_BST:
|
|
+ status = san_evt_bat_bst(dev, event);
|
|
+ break;
|
|
+
|
|
+ case SAM_EVENT_CID_BAT_ADP:
|
|
+ status = san_evt_bat_adp(dev, event);
|
|
+ break;
|
|
+
|
|
+ case SAM_EVENT_CID_BAT_PROT:
|
|
+ /*
|
|
+ * TODO: Implement support for battery protection status change
|
|
+ * event.
|
|
+ */
|
|
+ return true;
|
|
+
|
|
+ case SAM_EVENT_CID_BAT_DPTF:
|
|
+ status = san_evt_bat_dptf(dev, event);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (status) {
|
|
+ dev_err(dev, "error handling power event (cid = %#04x)\n",
|
|
+ event->command_id);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static void san_evt_bat_workfn(struct work_struct *work)
|
|
+{
|
|
+ struct san_event_work *ev;
|
|
+
|
|
+ ev = container_of(work, struct san_event_work, work.work);
|
|
+ san_evt_bat(&ev->event, ev->dev);
|
|
+ kfree(ev);
|
|
+}
|
|
+
|
|
+static u32 san_evt_bat_nf(struct ssam_event_notifier *nf,
|
|
+ const struct ssam_event *event)
|
|
+{
|
|
+ struct san_data *d = to_san_data(nf, nf_bat);
|
|
+ struct san_event_work *work;
|
|
+ unsigned long delay = san_evt_bat_delay(event->command_id);
|
|
+
|
|
+ if (delay == 0)
|
|
+ return san_evt_bat(event, d->dev) ? SSAM_NOTIF_HANDLED : 0;
|
|
+
|
|
+ work = kzalloc(sizeof(*work) + event->length, GFP_KERNEL);
|
|
+ if (!work)
|
|
+ return ssam_notifier_from_errno(-ENOMEM);
|
|
+
|
|
+ INIT_DELAYED_WORK(&work->work, san_evt_bat_workfn);
|
|
+ work->dev = d->dev;
|
|
+
|
|
+ memcpy(&work->event, event, sizeof(struct ssam_event) + event->length);
|
|
+
|
|
+ schedule_delayed_work(&work->work, delay);
|
|
+ return SSAM_NOTIF_HANDLED;
|
|
+}
|
|
+
|
|
+static int san_evt_tmp_trip(struct device *dev, const struct ssam_event *event)
|
|
+{
|
|
+ union acpi_object param;
|
|
+
|
|
+ /*
|
|
+ * The Surface ACPI expects an integer and not a package. This will
|
|
+ * cause a warning in acpica/nsarguments.c, but that warning can be
|
|
+ * safely ignored.
|
|
+ */
|
|
+ param.type = ACPI_TYPE_INTEGER;
|
|
+ param.integer.value = event->instance_id;
|
|
+
|
|
+ return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_THERMAL, ¶m);
|
|
+}
|
|
+
|
|
+static bool san_evt_tmp(const struct ssam_event *event, struct device *dev)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ switch (event->command_id) {
|
|
+ case SAM_EVENT_CID_TMP_TRIP:
|
|
+ status = san_evt_tmp_trip(dev, event);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (status) {
|
|
+ dev_err(dev, "error handling thermal event (cid = %#04x)\n",
|
|
+ event->command_id);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static u32 san_evt_tmp_nf(struct ssam_event_notifier *nf,
|
|
+ const struct ssam_event *event)
|
|
+{
|
|
+ struct san_data *d = to_san_data(nf, nf_tmp);
|
|
+
|
|
+ return san_evt_tmp(event, d->dev) ? SSAM_NOTIF_HANDLED : 0;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- ACPI GSB OperationRegion handler -------------------------------------- */
|
|
+
|
|
+struct gsb_data_in {
|
|
+ u8 cv;
|
|
+} __packed;
|
|
+
|
|
+struct gsb_data_rqsx {
|
|
+ u8 cv; /* Command value (san_gsb_request_cv). */
|
|
+ u8 tc; /* Target category. */
|
|
+ u8 tid; /* Target ID. */
|
|
+ u8 iid; /* Instance ID. */
|
|
+ u8 snc; /* Expect-response-flag. */
|
|
+ u8 cid; /* Command ID. */
|
|
+ u16 cdl; /* Payload length. */
|
|
+ u8 pld[]; /* Payload. */
|
|
+} __packed;
|
|
+
|
|
+struct gsb_data_etwl {
|
|
+ u8 cv; /* Command value (should be 0x02). */
|
|
+ u8 etw3; /* Unknown. */
|
|
+ u8 etw4; /* Unknown. */
|
|
+ u8 msg[]; /* Error message (ASCIIZ). */
|
|
+} __packed;
|
|
+
|
|
+struct gsb_data_out {
|
|
+ u8 status; /* _SSH communication status. */
|
|
+ u8 len; /* _SSH payload length. */
|
|
+ u8 pld[]; /* _SSH payload. */
|
|
+} __packed;
|
|
+
|
|
+union gsb_buffer_data {
|
|
+ struct gsb_data_in in; /* Common input. */
|
|
+ struct gsb_data_rqsx rqsx; /* RQSX input. */
|
|
+ struct gsb_data_etwl etwl; /* ETWL input. */
|
|
+ struct gsb_data_out out; /* Output. */
|
|
+};
|
|
+
|
|
+struct gsb_buffer {
|
|
+ u8 status; /* GSB AttribRawProcess status. */
|
|
+ u8 len; /* GSB AttribRawProcess length. */
|
|
+ union gsb_buffer_data data;
|
|
+} __packed;
|
|
+
|
|
+#define SAN_GSB_MAX_RQSX_PAYLOAD (U8_MAX - 2 - sizeof(struct gsb_data_rqsx))
|
|
+#define SAN_GSB_MAX_RESPONSE (U8_MAX - 2 - sizeof(struct gsb_data_out))
|
|
+
|
|
+#define SAN_GSB_COMMAND 0
|
|
+
|
|
+enum san_gsb_request_cv {
|
|
+ SAN_GSB_REQUEST_CV_RQST = 0x01,
|
|
+ SAN_GSB_REQUEST_CV_ETWL = 0x02,
|
|
+ SAN_GSB_REQUEST_CV_RQSG = 0x03,
|
|
+};
|
|
+
|
|
+#define SAN_REQUEST_NUM_TRIES 5
|
|
+
|
|
+static acpi_status san_etwl(struct san_data *d, struct gsb_buffer *b)
|
|
+{
|
|
+ struct gsb_data_etwl *etwl = &b->data.etwl;
|
|
+
|
|
+ if (b->len < sizeof(struct gsb_data_etwl)) {
|
|
+ dev_err(d->dev, "invalid ETWL package (len = %d)\n", b->len);
|
|
+ return AE_OK;
|
|
+ }
|
|
+
|
|
+ dev_err(d->dev, "ETWL(%#04x, %#04x): %.*s\n", etwl->etw3, etwl->etw4,
|
|
+ (unsigned int)(b->len - sizeof(struct gsb_data_etwl)),
|
|
+ (char *)etwl->msg);
|
|
+
|
|
+ /* Indicate success. */
|
|
+ b->status = 0x00;
|
|
+ b->len = 0x00;
|
|
+
|
|
+ return AE_OK;
|
|
+}
|
|
+
|
|
+static
|
|
+struct gsb_data_rqsx *san_validate_rqsx(struct device *dev, const char *type,
|
|
+ struct gsb_buffer *b)
|
|
+{
|
|
+ struct gsb_data_rqsx *rqsx = &b->data.rqsx;
|
|
+
|
|
+ if (b->len < sizeof(struct gsb_data_rqsx)) {
|
|
+ dev_err(dev, "invalid %s package (len = %d)\n", type, b->len);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (get_unaligned(&rqsx->cdl) != b->len - sizeof(struct gsb_data_rqsx)) {
|
|
+ dev_err(dev, "bogus %s package (len = %d, cdl = %d)\n",
|
|
+ type, b->len, get_unaligned(&rqsx->cdl));
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (get_unaligned(&rqsx->cdl) > SAN_GSB_MAX_RQSX_PAYLOAD) {
|
|
+ dev_err(dev, "payload for %s package too large (cdl = %d)\n",
|
|
+ type, get_unaligned(&rqsx->cdl));
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ return rqsx;
|
|
+}
|
|
+
|
|
+static void gsb_rqsx_response_error(struct gsb_buffer *gsb, int status)
|
|
+{
|
|
+ gsb->status = 0x00;
|
|
+ gsb->len = 0x02;
|
|
+ gsb->data.out.status = (u8)(-status);
|
|
+ gsb->data.out.len = 0x00;
|
|
+}
|
|
+
|
|
+static void gsb_rqsx_response_success(struct gsb_buffer *gsb, u8 *ptr, size_t len)
|
|
+{
|
|
+ gsb->status = 0x00;
|
|
+ gsb->len = len + 2;
|
|
+ gsb->data.out.status = 0x00;
|
|
+ gsb->data.out.len = len;
|
|
+
|
|
+ if (len)
|
|
+ memcpy(&gsb->data.out.pld[0], ptr, len);
|
|
+}
|
|
+
|
|
+static acpi_status san_rqst_fixup_suspended(struct san_data *d,
|
|
+ struct ssam_request *rqst,
|
|
+ struct gsb_buffer *gsb)
|
|
+{
|
|
+ if (rqst->target_category == SSAM_SSH_TC_BAS && rqst->command_id == 0x0D) {
|
|
+ u8 base_state = 1;
|
|
+
|
|
+ /* Base state quirk:
|
|
+ * The base state may be queried from ACPI when the EC is still
|
|
+ * suspended. In this case it will return '-EPERM'. This query
|
|
+ * will only be triggered from the ACPI lid GPE interrupt, thus
|
|
+ * we are either in laptop or studio mode (base status 0x01 or
|
|
+ * 0x02). Furthermore, we will only get here if the device (and
|
|
+ * EC) have been suspended.
|
|
+ *
|
|
+ * We now assume that the device is in laptop mode (0x01). This
|
|
+ * has the drawback that it will wake the device when unfolding
|
|
+ * it in studio mode, but it also allows us to avoid actively
|
|
+ * waiting for the EC to wake up, which may incur a notable
|
|
+ * delay.
|
|
+ */
|
|
+
|
|
+ dev_dbg(d->dev, "rqst: fixup: base-state quirk\n");
|
|
+
|
|
+ gsb_rqsx_response_success(gsb, &base_state, sizeof(base_state));
|
|
+ return AE_OK;
|
|
+ }
|
|
+
|
|
+ gsb_rqsx_response_error(gsb, -ENXIO);
|
|
+ return AE_OK;
|
|
+}
|
|
+
|
|
+static acpi_status san_rqst(struct san_data *d, struct gsb_buffer *buffer)
|
|
+{
|
|
+ u8 rspbuf[SAN_GSB_MAX_RESPONSE];
|
|
+ struct gsb_data_rqsx *gsb_rqst;
|
|
+ struct ssam_request rqst;
|
|
+ struct ssam_response rsp;
|
|
+ int status = 0;
|
|
+
|
|
+ gsb_rqst = san_validate_rqsx(d->dev, "RQST", buffer);
|
|
+ if (!gsb_rqst)
|
|
+ return AE_OK;
|
|
+
|
|
+ rqst.target_category = gsb_rqst->tc;
|
|
+ rqst.target_id = gsb_rqst->tid;
|
|
+ rqst.command_id = gsb_rqst->cid;
|
|
+ rqst.instance_id = gsb_rqst->iid;
|
|
+ rqst.flags = gsb_rqst->snc ? SSAM_REQUEST_HAS_RESPONSE : 0;
|
|
+ rqst.length = get_unaligned(&gsb_rqst->cdl);
|
|
+ rqst.payload = &gsb_rqst->pld[0];
|
|
+
|
|
+ rsp.capacity = ARRAY_SIZE(rspbuf);
|
|
+ rsp.length = 0;
|
|
+ rsp.pointer = &rspbuf[0];
|
|
+
|
|
+ /* Handle suspended device. */
|
|
+ if (d->dev->power.is_suspended) {
|
|
+ dev_warn(d->dev, "rqst: device is suspended, not executing\n");
|
|
+ return san_rqst_fixup_suspended(d, &rqst, buffer);
|
|
+ }
|
|
+
|
|
+ status = __ssam_retry(ssam_request_sync_onstack, SAN_REQUEST_NUM_TRIES,
|
|
+ d->ctrl, &rqst, &rsp, SAN_GSB_MAX_RQSX_PAYLOAD);
|
|
+
|
|
+ if (!status) {
|
|
+ gsb_rqsx_response_success(buffer, rsp.pointer, rsp.length);
|
|
+ } else {
|
|
+ dev_err(d->dev, "rqst: failed with error %d\n", status);
|
|
+ gsb_rqsx_response_error(buffer, status);
|
|
+ }
|
|
+
|
|
+ return AE_OK;
|
|
+}
|
|
+
|
|
+static acpi_status san_rqsg(struct san_data *d, struct gsb_buffer *buffer)
|
|
+{
|
|
+ struct gsb_data_rqsx *gsb_rqsg;
|
|
+ struct san_dgpu_event evt;
|
|
+ int status;
|
|
+
|
|
+ gsb_rqsg = san_validate_rqsx(d->dev, "RQSG", buffer);
|
|
+ if (!gsb_rqsg)
|
|
+ return AE_OK;
|
|
+
|
|
+ evt.category = gsb_rqsg->tc;
|
|
+ evt.target = gsb_rqsg->tid;
|
|
+ evt.command = gsb_rqsg->cid;
|
|
+ evt.instance = gsb_rqsg->iid;
|
|
+ evt.length = get_unaligned(&gsb_rqsg->cdl);
|
|
+ evt.payload = &gsb_rqsg->pld[0];
|
|
+
|
|
+ status = san_dgpu_notifier_call(&evt);
|
|
+ if (!status) {
|
|
+ gsb_rqsx_response_success(buffer, NULL, 0);
|
|
+ } else {
|
|
+ dev_err(d->dev, "rqsg: failed with error %d\n", status);
|
|
+ gsb_rqsx_response_error(buffer, status);
|
|
+ }
|
|
+
|
|
+ return AE_OK;
|
|
+}
|
|
+
|
|
+static acpi_status san_opreg_handler(u32 function, acpi_physical_address command,
|
|
+ u32 bits, u64 *value64, void *opreg_context,
|
|
+ void *region_context)
|
|
+{
|
|
+ struct san_data *d = to_san_data(opreg_context, info);
|
|
+ struct gsb_buffer *buffer = (struct gsb_buffer *)value64;
|
|
+ int accessor_type = (function & 0xFFFF0000) >> 16;
|
|
+
|
|
+ if (command != SAN_GSB_COMMAND) {
|
|
+ dev_warn(d->dev, "unsupported command: %#04llx\n", command);
|
|
+ return AE_OK;
|
|
+ }
|
|
+
|
|
+ if (accessor_type != ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS) {
|
|
+ dev_err(d->dev, "invalid access type: %#04x\n", accessor_type);
|
|
+ return AE_OK;
|
|
+ }
|
|
+
|
|
+ /* Buffer must have at least contain the command-value. */
|
|
+ if (buffer->len == 0) {
|
|
+ dev_err(d->dev, "request-package too small\n");
|
|
+ return AE_OK;
|
|
+ }
|
|
+
|
|
+ switch (buffer->data.in.cv) {
|
|
+ case SAN_GSB_REQUEST_CV_RQST:
|
|
+ return san_rqst(d, buffer);
|
|
+
|
|
+ case SAN_GSB_REQUEST_CV_ETWL:
|
|
+ return san_etwl(d, buffer);
|
|
+
|
|
+ case SAN_GSB_REQUEST_CV_RQSG:
|
|
+ return san_rqsg(d, buffer);
|
|
+
|
|
+ default:
|
|
+ dev_warn(d->dev, "unsupported SAN0 request (cv: %#04x)\n",
|
|
+ buffer->data.in.cv);
|
|
+ return AE_OK;
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Driver setup. --------------------------------------------------------- */
|
|
+
|
|
+static int san_events_register(struct platform_device *pdev)
|
|
+{
|
|
+ struct san_data *d = platform_get_drvdata(pdev);
|
|
+ int status;
|
|
+
|
|
+ d->nf_bat.base.priority = 1;
|
|
+ d->nf_bat.base.fn = san_evt_bat_nf;
|
|
+ d->nf_bat.event.reg = SSAM_EVENT_REGISTRY_SAM;
|
|
+ d->nf_bat.event.id.target_category = SSAM_SSH_TC_BAT;
|
|
+ d->nf_bat.event.id.instance = 0;
|
|
+ d->nf_bat.event.mask = SSAM_EVENT_MASK_TARGET;
|
|
+ d->nf_bat.event.flags = SSAM_EVENT_SEQUENCED;
|
|
+
|
|
+ d->nf_tmp.base.priority = 1;
|
|
+ d->nf_tmp.base.fn = san_evt_tmp_nf;
|
|
+ d->nf_tmp.event.reg = SSAM_EVENT_REGISTRY_SAM;
|
|
+ d->nf_tmp.event.id.target_category = SSAM_SSH_TC_TMP;
|
|
+ d->nf_tmp.event.id.instance = 0;
|
|
+ d->nf_tmp.event.mask = SSAM_EVENT_MASK_TARGET;
|
|
+ d->nf_tmp.event.flags = SSAM_EVENT_SEQUENCED;
|
|
+
|
|
+ status = ssam_notifier_register(d->ctrl, &d->nf_bat);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ status = ssam_notifier_register(d->ctrl, &d->nf_tmp);
|
|
+ if (status)
|
|
+ ssam_notifier_unregister(d->ctrl, &d->nf_bat);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static void san_events_unregister(struct platform_device *pdev)
|
|
+{
|
|
+ struct san_data *d = platform_get_drvdata(pdev);
|
|
+
|
|
+ ssam_notifier_unregister(d->ctrl, &d->nf_bat);
|
|
+ ssam_notifier_unregister(d->ctrl, &d->nf_tmp);
|
|
+}
|
|
+
|
|
+#define san_consumer_printk(level, dev, handle, fmt, ...) \
|
|
+do { \
|
|
+ char *path = "<error getting consumer path>"; \
|
|
+ struct acpi_buffer buffer = { \
|
|
+ .length = ACPI_ALLOCATE_BUFFER, \
|
|
+ .pointer = NULL, \
|
|
+ }; \
|
|
+ \
|
|
+ if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) \
|
|
+ path = buffer.pointer; \
|
|
+ \
|
|
+ dev_##level(dev, "[%s]: " fmt, path, ##__VA_ARGS__); \
|
|
+ kfree(buffer.pointer); \
|
|
+} while (0)
|
|
+
|
|
+#define san_consumer_dbg(dev, handle, fmt, ...) \
|
|
+ san_consumer_printk(dbg, dev, handle, fmt, ##__VA_ARGS__)
|
|
+
|
|
+#define san_consumer_warn(dev, handle, fmt, ...) \
|
|
+ san_consumer_printk(warn, dev, handle, fmt, ##__VA_ARGS__)
|
|
+
|
|
+static bool is_san_consumer(struct platform_device *pdev, acpi_handle handle)
|
|
+{
|
|
+ struct acpi_handle_list dep_devices;
|
|
+ acpi_handle supplier = ACPI_HANDLE(&pdev->dev);
|
|
+ acpi_status status;
|
|
+ int i;
|
|
+
|
|
+ if (!acpi_has_method(handle, "_DEP"))
|
|
+ return false;
|
|
+
|
|
+ status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices);
|
|
+ if (ACPI_FAILURE(status)) {
|
|
+ san_consumer_dbg(&pdev->dev, handle, "failed to evaluate _DEP\n");
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < dep_devices.count; i++) {
|
|
+ if (dep_devices.handles[i] == supplier)
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static acpi_status san_consumer_setup(acpi_handle handle, u32 lvl,
|
|
+ void *context, void **rv)
|
|
+{
|
|
+ const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER;
|
|
+ struct platform_device *pdev = context;
|
|
+ struct acpi_device *adev;
|
|
+ struct device_link *link;
|
|
+
|
|
+ if (!is_san_consumer(pdev, handle))
|
|
+ return AE_OK;
|
|
+
|
|
+ /* Ignore ACPI devices that are not present. */
|
|
+ if (acpi_bus_get_device(handle, &adev) != 0)
|
|
+ return AE_OK;
|
|
+
|
|
+ san_consumer_dbg(&pdev->dev, handle, "creating device link\n");
|
|
+
|
|
+ /* Try to set up device links, ignore but log errors. */
|
|
+ link = device_link_add(&adev->dev, &pdev->dev, flags);
|
|
+ if (!link) {
|
|
+ san_consumer_warn(&pdev->dev, handle, "failed to create device link\n");
|
|
+ return AE_OK;
|
|
+ }
|
|
+
|
|
+ return AE_OK;
|
|
+}
|
|
+
|
|
+static int san_consumer_links_setup(struct platform_device *pdev)
|
|
+{
|
|
+ acpi_status status;
|
|
+
|
|
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
|
|
+ ACPI_UINT32_MAX, san_consumer_setup, NULL,
|
|
+ pdev, NULL);
|
|
+
|
|
+ return status ? -EFAULT : 0;
|
|
+}
|
|
+
|
|
+static int san_probe(struct platform_device *pdev)
|
|
+{
|
|
+ acpi_handle san = ACPI_HANDLE(&pdev->dev);
|
|
+ struct ssam_controller *ctrl;
|
|
+ struct san_data *data;
|
|
+ acpi_status astatus;
|
|
+ int status;
|
|
+
|
|
+ ctrl = ssam_client_bind(&pdev->dev);
|
|
+ if (IS_ERR(ctrl))
|
|
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
|
|
+
|
|
+ status = san_consumer_links_setup(pdev);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
|
|
+ if (!data)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ data->dev = &pdev->dev;
|
|
+ data->ctrl = ctrl;
|
|
+
|
|
+ platform_set_drvdata(pdev, data);
|
|
+
|
|
+ astatus = acpi_install_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
|
|
+ &san_opreg_handler, NULL,
|
|
+ &data->info);
|
|
+ if (ACPI_FAILURE(astatus))
|
|
+ return -ENXIO;
|
|
+
|
|
+ status = san_events_register(pdev);
|
|
+ if (status)
|
|
+ goto err_enable_events;
|
|
+
|
|
+ status = san_set_rqsg_interface_device(&pdev->dev);
|
|
+ if (status)
|
|
+ goto err_install_dev;
|
|
+
|
|
+ acpi_walk_dep_device_list(san);
|
|
+ return 0;
|
|
+
|
|
+err_install_dev:
|
|
+ san_events_unregister(pdev);
|
|
+err_enable_events:
|
|
+ acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
|
|
+ &san_opreg_handler);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static int san_remove(struct platform_device *pdev)
|
|
+{
|
|
+ acpi_handle san = ACPI_HANDLE(&pdev->dev);
|
|
+
|
|
+ san_set_rqsg_interface_device(NULL);
|
|
+ acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
|
|
+ &san_opreg_handler);
|
|
+ san_events_unregister(pdev);
|
|
+
|
|
+ /*
|
|
+ * We have unregistered our event sources. Now we need to ensure that
|
|
+ * all delayed works they may have spawned are run to completion.
|
|
+ */
|
|
+ flush_scheduled_work();
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct acpi_device_id san_match[] = {
|
|
+ { "MSHW0091" },
|
|
+ { },
|
|
+};
|
|
+MODULE_DEVICE_TABLE(acpi, san_match);
|
|
+
|
|
+static struct platform_driver surface_acpi_notify = {
|
|
+ .probe = san_probe,
|
|
+ .remove = san_remove,
|
|
+ .driver = {
|
|
+ .name = "surface_acpi_notify",
|
|
+ .acpi_match_table = san_match,
|
|
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
|
+ },
|
|
+};
|
|
+module_platform_driver(surface_acpi_notify);
|
|
+
|
|
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
|
|
+MODULE_DESCRIPTION("Surface ACPI Notify driver for Surface System Aggregator Module");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/platform/x86/surface_aggregator/Kconfig b/drivers/platform/x86/surface_aggregator/Kconfig
|
|
new file mode 100644
|
|
index 000000000000..cab020324256
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator/Kconfig
|
|
@@ -0,0 +1,69 @@
|
|
+# SPDX-License-Identifier: GPL-2.0+
|
|
+# Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+
|
|
+menuconfig SURFACE_AGGREGATOR
|
|
+ tristate "Microsoft Surface System Aggregator Module Subsystem and Drivers"
|
|
+ depends on SERIAL_DEV_BUS
|
|
+ depends on ACPI
|
|
+ select CRC_CCITT
|
|
+ help
|
|
+ The Surface System Aggregator Module (Surface SAM or SSAM) is an
|
|
+ embedded controller (EC) found on 5th- and later-generation Microsoft
|
|
+ Surface devices (i.e. Surface Pro 5, Surface Book 2, Surface Laptop,
|
|
+ and newer, with exception of Surface Go series devices).
|
|
+
|
|
+ Depending on the device in question, this EC provides varying
|
|
+ functionality, including:
|
|
+ - EC access from ACPI via Surface ACPI Notify (5th- and 6th-generation)
|
|
+ - battery status information (all devices)
|
|
+ - thermal sensor access (all devices)
|
|
+ - performance mode / cooling mode control (all devices)
|
|
+ - clipboard detachment system control (Surface Book 2 and 3)
|
|
+ - HID / keyboard input (Surface Laptops, Surface Book 3)
|
|
+
|
|
+ This option controls whether the Surface SAM subsystem core will be
|
|
+ built. This includes a driver for the Surface Serial Hub (SSH), which
|
|
+ is the device responsible for the communication with the EC, and a
|
|
+ basic kernel interface exposing the EC functionality to other client
|
|
+ drivers, i.e. allowing them to make requests to the EC and receive
|
|
+ events from it. Selecting this option alone will not provide any
|
|
+ client drivers and therefore no functionality beyond the in-kernel
|
|
+ interface. Said functionality is the responsibility of the respective
|
|
+ client drivers.
|
|
+
|
|
+ Note: While 4th-generation Surface devices also make use of a SAM EC,
|
|
+ due to a difference in the communication interface of the controller,
|
|
+ only 5th and later generations are currently supported. Specifically,
|
|
+ devices using SAM-over-SSH are supported, whereas devices using
|
|
+ SAM-over-HID, which is used on the 4th generation, are currently not
|
|
+ supported.
|
|
+
|
|
+ Choose m if you want to build the SAM subsystem core and SSH driver as
|
|
+ module, y if you want to build it into the kernel and n if you don't
|
|
+ want it at all.
|
|
+
|
|
+config SURFACE_AGGREGATOR_BUS
|
|
+ bool "Surface System Aggregator Module Bus"
|
|
+ depends on SURFACE_AGGREGATOR
|
|
+ default y
|
|
+ help
|
|
+ Expands the Surface System Aggregator Module (SSAM) core driver by
|
|
+ providing a dedicated bus and client-device type.
|
|
+
|
|
+ This bus and device type are intended to provide and simplify support
|
|
+ for non-platform and non-ACPI SSAM devices, i.e. SSAM devices that are
|
|
+ not auto-detectable via the conventional means (e.g. ACPI).
|
|
+
|
|
+config SURFACE_AGGREGATOR_ERROR_INJECTION
|
|
+ bool "Surface System Aggregator Module Error Injection Capabilities"
|
|
+ depends on SURFACE_AGGREGATOR
|
|
+ depends on FUNCTION_ERROR_INJECTION
|
|
+ help
|
|
+ Provides error-injection capabilities for the Surface System
|
|
+ Aggregator Module subsystem and Surface Serial Hub driver.
|
|
+
|
|
+ Specifically, exports error injection hooks to be used with the
|
|
+ kernel's function error injection capabilities to simulate underlying
|
|
+ transport and communication problems, such as invalid data sent to or
|
|
+ received from the EC, dropped data, and communication timeouts.
|
|
+ Intended for development and debugging.
|
|
diff --git a/drivers/platform/x86/surface_aggregator/Makefile b/drivers/platform/x86/surface_aggregator/Makefile
|
|
new file mode 100644
|
|
index 000000000000..c8498c41e758
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator/Makefile
|
|
@@ -0,0 +1,17 @@
|
|
+# SPDX-License-Identifier: GPL-2.0+
|
|
+# Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+
|
|
+# For include/trace/define_trace.h to include trace.h
|
|
+CFLAGS_core.o = -I$(src)
|
|
+
|
|
+obj-$(CONFIG_SURFACE_AGGREGATOR) += surface_aggregator.o
|
|
+
|
|
+surface_aggregator-objs := core.o
|
|
+surface_aggregator-objs += ssh_parser.o
|
|
+surface_aggregator-objs += ssh_packet_layer.o
|
|
+surface_aggregator-objs += ssh_request_layer.o
|
|
+surface_aggregator-objs += controller.o
|
|
+
|
|
+ifeq ($(CONFIG_SURFACE_AGGREGATOR_BUS),y)
|
|
+surface_aggregator-objs += bus.o
|
|
+endif
|
|
diff --git a/drivers/platform/x86/surface_aggregator/bus.c b/drivers/platform/x86/surface_aggregator/bus.c
|
|
new file mode 100644
|
|
index 000000000000..e525f34eb92c
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator/bus.c
|
|
@@ -0,0 +1,415 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Surface System Aggregator Module bus and device integration.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <linux/device.h>
|
|
+#include <linux/slab.h>
|
|
+
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+#include <linux/surface_aggregator/device.h>
|
|
+
|
|
+#include "bus.h"
|
|
+#include "controller.h"
|
|
+
|
|
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct ssam_device *sdev = to_ssam_device(dev);
|
|
+
|
|
+ return scnprintf(buf, PAGE_SIZE, "ssam:d%02Xc%02Xt%02Xi%02Xf%02X\n",
|
|
+ sdev->uid.domain, sdev->uid.category, sdev->uid.target,
|
|
+ sdev->uid.instance, sdev->uid.function);
|
|
+}
|
|
+static DEVICE_ATTR_RO(modalias);
|
|
+
|
|
+static struct attribute *ssam_device_attrs[] = {
|
|
+ &dev_attr_modalias.attr,
|
|
+ NULL,
|
|
+};
|
|
+ATTRIBUTE_GROUPS(ssam_device);
|
|
+
|
|
+static int ssam_device_uevent(struct device *dev, struct kobj_uevent_env *env)
|
|
+{
|
|
+ struct ssam_device *sdev = to_ssam_device(dev);
|
|
+
|
|
+ return add_uevent_var(env, "MODALIAS=ssam:d%02Xc%02Xt%02Xi%02Xf%02X",
|
|
+ sdev->uid.domain, sdev->uid.category,
|
|
+ sdev->uid.target, sdev->uid.instance,
|
|
+ sdev->uid.function);
|
|
+}
|
|
+
|
|
+static void ssam_device_release(struct device *dev)
|
|
+{
|
|
+ struct ssam_device *sdev = to_ssam_device(dev);
|
|
+
|
|
+ ssam_controller_put(sdev->ctrl);
|
|
+ kfree(sdev);
|
|
+}
|
|
+
|
|
+const struct device_type ssam_device_type = {
|
|
+ .name = "surface_aggregator_device",
|
|
+ .groups = ssam_device_groups,
|
|
+ .uevent = ssam_device_uevent,
|
|
+ .release = ssam_device_release,
|
|
+};
|
|
+EXPORT_SYMBOL_GPL(ssam_device_type);
|
|
+
|
|
+/**
|
|
+ * ssam_device_alloc() - Allocate and initialize a SSAM client device.
|
|
+ * @ctrl: The controller under which the device should be added.
|
|
+ * @uid: The UID of the device to be added.
|
|
+ *
|
|
+ * Allocates and initializes a new client device. The parent of the device
|
|
+ * will be set to the controller device and the name will be set based on the
|
|
+ * UID. Note that the device still has to be added via ssam_device_add().
|
|
+ * Refer to that function for more details.
|
|
+ *
|
|
+ * Return: Returns the newly allocated and initialized SSAM client device, or
|
|
+ * %NULL if it could not be allocated.
|
|
+ */
|
|
+struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl,
|
|
+ struct ssam_device_uid uid)
|
|
+{
|
|
+ struct ssam_device *sdev;
|
|
+
|
|
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
|
|
+ if (!sdev)
|
|
+ return NULL;
|
|
+
|
|
+ device_initialize(&sdev->dev);
|
|
+ sdev->dev.bus = &ssam_bus_type;
|
|
+ sdev->dev.type = &ssam_device_type;
|
|
+ sdev->dev.parent = ssam_controller_device(ctrl);
|
|
+ sdev->ctrl = ssam_controller_get(ctrl);
|
|
+ sdev->uid = uid;
|
|
+
|
|
+ dev_set_name(&sdev->dev, "%02x:%02x:%02x:%02x:%02x",
|
|
+ sdev->uid.domain, sdev->uid.category, sdev->uid.target,
|
|
+ sdev->uid.instance, sdev->uid.function);
|
|
+
|
|
+ return sdev;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_device_alloc);
|
|
+
|
|
+/**
|
|
+ * ssam_device_add() - Add a SSAM client device.
|
|
+ * @sdev: The SSAM client device to be added.
|
|
+ *
|
|
+ * Added client devices must be guaranteed to always have a valid and active
|
|
+ * controller. Thus, this function will fail with %-ENODEV if the controller
|
|
+ * of the device has not been initialized yet, has been suspended, or has been
|
|
+ * shut down.
|
|
+ *
|
|
+ * The caller of this function should ensure that the corresponding call to
|
|
+ * ssam_device_remove() is issued before the controller is shut down. If the
|
|
+ * added device is a direct child of the controller device (default), it will
|
|
+ * be automatically removed when the controller is shut down.
|
|
+ *
|
|
+ * By default, the controller device will become the parent of the newly
|
|
+ * created client device. The parent may be changed before ssam_device_add is
|
|
+ * called, but care must be taken that a) the correct suspend/resume ordering
|
|
+ * is guaranteed and b) the client device does not outlive the controller,
|
|
+ * i.e. that the device is removed before the controller is being shut down.
|
|
+ * In case these guarantees have to be manually enforced, please refer to the
|
|
+ * ssam_client_link() and ssam_client_bind() functions, which are intended to
|
|
+ * set up device-links for this purpose.
|
|
+ *
|
|
+ * Return: Returns zero on success, a negative error code on failure.
|
|
+ */
|
|
+int ssam_device_add(struct ssam_device *sdev)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ /*
|
|
+ * Ensure that we can only add new devices to a controller if it has
|
|
+ * been started and is not going away soon. This works in combination
|
|
+ * with ssam_controller_remove_clients to ensure driver presence for the
|
|
+ * controller device, i.e. it ensures that the controller (sdev->ctrl)
|
|
+ * is always valid and can be used for requests as long as the client
|
|
+ * device we add here is registered as child under it. This essentially
|
|
+ * guarantees that the client driver can always expect the preconditions
|
|
+ * for functions like ssam_request_sync (controller has to be started
|
|
+ * and is not suspended) to hold and thus does not have to check for
|
|
+ * them.
|
|
+ *
|
|
+ * Note that for this to work, the controller has to be a parent device.
|
|
+ * If it is not a direct parent, care has to be taken that the device is
|
|
+ * removed via ssam_device_remove(), as device_unregister does not
|
|
+ * remove child devices recursively.
|
|
+ */
|
|
+ ssam_controller_statelock(sdev->ctrl);
|
|
+
|
|
+ if (sdev->ctrl->state != SSAM_CONTROLLER_STARTED) {
|
|
+ ssam_controller_stateunlock(sdev->ctrl);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ status = device_add(&sdev->dev);
|
|
+
|
|
+ ssam_controller_stateunlock(sdev->ctrl);
|
|
+ return status;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_device_add);
|
|
+
|
|
+/**
|
|
+ * ssam_device_remove() - Remove a SSAM client device.
|
|
+ * @sdev: The device to remove.
|
|
+ *
|
|
+ * Removes and unregisters the provided SSAM client device.
|
|
+ */
|
|
+void ssam_device_remove(struct ssam_device *sdev)
|
|
+{
|
|
+ device_unregister(&sdev->dev);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_device_remove);
|
|
+
|
|
+/**
|
|
+ * ssam_device_id_compatible() - Check if a device ID matches a UID.
|
|
+ * @id: The device ID as potential match.
|
|
+ * @uid: The device UID matching against.
|
|
+ *
|
|
+ * Check if the given ID is a match for the given UID, i.e. if a device with
|
|
+ * the provided UID is compatible to the given ID following the match rules
|
|
+ * described in its &ssam_device_id.match_flags member.
|
|
+ *
|
|
+ * Return: Returns %true if the given UID is compatible to the match rule
|
|
+ * described by the given ID, %false otherwise.
|
|
+ */
|
|
+static bool ssam_device_id_compatible(const struct ssam_device_id *id,
|
|
+ struct ssam_device_uid uid)
|
|
+{
|
|
+ if (id->domain != uid.domain || id->category != uid.category)
|
|
+ return false;
|
|
+
|
|
+ if ((id->match_flags & SSAM_MATCH_TARGET) && id->target != uid.target)
|
|
+ return false;
|
|
+
|
|
+ if ((id->match_flags & SSAM_MATCH_INSTANCE) && id->instance != uid.instance)
|
|
+ return false;
|
|
+
|
|
+ if ((id->match_flags & SSAM_MATCH_FUNCTION) && id->function != uid.function)
|
|
+ return false;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_device_id_is_null() - Check if a device ID is null.
|
|
+ * @id: The device ID to check.
|
|
+ *
|
|
+ * Check if a given device ID is null, i.e. all zeros. Used to check for the
|
|
+ * end of ``MODULE_DEVICE_TABLE(ssam, ...)`` or similar lists.
|
|
+ *
|
|
+ * Return: Returns %true if the given ID represents a null ID, %false
|
|
+ * otherwise.
|
|
+ */
|
|
+static bool ssam_device_id_is_null(const struct ssam_device_id *id)
|
|
+{
|
|
+ return id->match_flags == 0 &&
|
|
+ id->domain == 0 &&
|
|
+ id->category == 0 &&
|
|
+ id->target == 0 &&
|
|
+ id->instance == 0 &&
|
|
+ id->function == 0 &&
|
|
+ id->driver_data == 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_device_id_match() - Find the matching ID table entry for the given UID.
|
|
+ * @table: The table to search in.
|
|
+ * @uid: The UID to matched against the individual table entries.
|
|
+ *
|
|
+ * Find the first match for the provided device UID in the provided ID table
|
|
+ * and return it. Returns %NULL if no match could be found.
|
|
+ */
|
|
+const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table,
|
|
+ const struct ssam_device_uid uid)
|
|
+{
|
|
+ const struct ssam_device_id *id;
|
|
+
|
|
+ for (id = table; !ssam_device_id_is_null(id); ++id)
|
|
+ if (ssam_device_id_compatible(id, uid))
|
|
+ return id;
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_device_id_match);
|
|
+
|
|
+/**
|
|
+ * ssam_device_get_match() - Find and return the ID matching the device in the
|
|
+ * ID table of the bound driver.
|
|
+ * @dev: The device for which to get the matching ID table entry.
|
|
+ *
|
|
+ * Find the fist match for the UID of the device in the ID table of the
|
|
+ * currently bound driver and return it. Returns %NULL if the device does not
|
|
+ * have a driver bound to it, the driver does not have match_table (i.e. it is
|
|
+ * %NULL), or there is no match in the driver's match_table.
|
|
+ *
|
|
+ * This function essentially calls ssam_device_id_match() with the ID table of
|
|
+ * the bound device driver and the UID of the device.
|
|
+ *
|
|
+ * Return: Returns the first match for the UID of the device in the device
|
|
+ * driver's match table, or %NULL if no such match could be found.
|
|
+ */
|
|
+const struct ssam_device_id *ssam_device_get_match(const struct ssam_device *dev)
|
|
+{
|
|
+ const struct ssam_device_driver *sdrv;
|
|
+
|
|
+ sdrv = to_ssam_device_driver(dev->dev.driver);
|
|
+ if (!sdrv)
|
|
+ return NULL;
|
|
+
|
|
+ if (!sdrv->match_table)
|
|
+ return NULL;
|
|
+
|
|
+ return ssam_device_id_match(sdrv->match_table, dev->uid);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_device_get_match);
|
|
+
|
|
+/**
|
|
+ * ssam_device_get_match_data() - Find the ID matching the device in the
|
|
+ * ID table of the bound driver and return its ``driver_data`` member.
|
|
+ * @dev: The device for which to get the match data.
|
|
+ *
|
|
+ * Find the fist match for the UID of the device in the ID table of the
|
|
+ * corresponding driver and return its driver_data. Returns %NULL if the
|
|
+ * device does not have a driver bound to it, the driver does not have
|
|
+ * match_table (i.e. it is %NULL), there is no match in the driver's
|
|
+ * match_table, or the match does not have any driver_data.
|
|
+ *
|
|
+ * This function essentially calls ssam_device_get_match() and, if any match
|
|
+ * could be found, returns its ``struct ssam_device_id.driver_data`` member.
|
|
+ *
|
|
+ * Return: Returns the driver data associated with the first match for the UID
|
|
+ * of the device in the device driver's match table, or %NULL if no such match
|
|
+ * could be found.
|
|
+ */
|
|
+const void *ssam_device_get_match_data(const struct ssam_device *dev)
|
|
+{
|
|
+ const struct ssam_device_id *id;
|
|
+
|
|
+ id = ssam_device_get_match(dev);
|
|
+ if (!id)
|
|
+ return NULL;
|
|
+
|
|
+ return (const void *)id->driver_data;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_device_get_match_data);
|
|
+
|
|
+static int ssam_bus_match(struct device *dev, struct device_driver *drv)
|
|
+{
|
|
+ struct ssam_device_driver *sdrv = to_ssam_device_driver(drv);
|
|
+ struct ssam_device *sdev = to_ssam_device(dev);
|
|
+
|
|
+ if (!is_ssam_device(dev))
|
|
+ return 0;
|
|
+
|
|
+ return !!ssam_device_id_match(sdrv->match_table, sdev->uid);
|
|
+}
|
|
+
|
|
+static int ssam_bus_probe(struct device *dev)
|
|
+{
|
|
+ return to_ssam_device_driver(dev->driver)
|
|
+ ->probe(to_ssam_device(dev));
|
|
+}
|
|
+
|
|
+static int ssam_bus_remove(struct device *dev)
|
|
+{
|
|
+ struct ssam_device_driver *sdrv = to_ssam_device_driver(dev->driver);
|
|
+
|
|
+ if (sdrv->remove)
|
|
+ sdrv->remove(to_ssam_device(dev));
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+struct bus_type ssam_bus_type = {
|
|
+ .name = "surface_aggregator",
|
|
+ .match = ssam_bus_match,
|
|
+ .probe = ssam_bus_probe,
|
|
+ .remove = ssam_bus_remove,
|
|
+};
|
|
+EXPORT_SYMBOL_GPL(ssam_bus_type);
|
|
+
|
|
+/**
|
|
+ * __ssam_device_driver_register() - Register a SSAM client device driver.
|
|
+ * @sdrv: The driver to register.
|
|
+ * @owner: The module owning the provided driver.
|
|
+ *
|
|
+ * Please refer to the ssam_device_driver_register() macro for the normal way
|
|
+ * to register a driver from inside its owning module.
|
|
+ */
|
|
+int __ssam_device_driver_register(struct ssam_device_driver *sdrv,
|
|
+ struct module *owner)
|
|
+{
|
|
+ sdrv->driver.owner = owner;
|
|
+ sdrv->driver.bus = &ssam_bus_type;
|
|
+
|
|
+ /* force drivers to async probe so I/O is possible in probe */
|
|
+ sdrv->driver.probe_type = PROBE_PREFER_ASYNCHRONOUS;
|
|
+
|
|
+ return driver_register(&sdrv->driver);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(__ssam_device_driver_register);
|
|
+
|
|
+/**
|
|
+ * ssam_device_driver_unregister - Unregister a SSAM device driver.
|
|
+ * @sdrv: The driver to unregister.
|
|
+ */
|
|
+void ssam_device_driver_unregister(struct ssam_device_driver *sdrv)
|
|
+{
|
|
+ driver_unregister(&sdrv->driver);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_device_driver_unregister);
|
|
+
|
|
+static int ssam_remove_device(struct device *dev, void *_data)
|
|
+{
|
|
+ struct ssam_device *sdev = to_ssam_device(dev);
|
|
+
|
|
+ if (is_ssam_device(dev))
|
|
+ ssam_device_remove(sdev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_controller_remove_clients() - Remove SSAM client devices registered as
|
|
+ * direct children under the given controller.
|
|
+ * @ctrl: The controller to remove all direct clients for.
|
|
+ *
|
|
+ * Remove all SSAM client devices registered as direct children under the
|
|
+ * given controller. Note that this only accounts for direct children of the
|
|
+ * controller device. This does not take care of any client devices where the
|
|
+ * parent device has been manually set before calling ssam_device_add. Refer
|
|
+ * to ssam_device_add()/ssam_device_remove() for more details on those cases.
|
|
+ *
|
|
+ * To avoid new devices being added in parallel to this call, the main
|
|
+ * controller lock (not statelock) must be held during this (and if
|
|
+ * necessary, any subsequent deinitialization) call.
|
|
+ */
|
|
+void ssam_controller_remove_clients(struct ssam_controller *ctrl)
|
|
+{
|
|
+ struct device *dev;
|
|
+
|
|
+ dev = ssam_controller_device(ctrl);
|
|
+ device_for_each_child_reverse(dev, NULL, ssam_remove_device);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_bus_register() - Register and set-up the SSAM client device bus.
|
|
+ */
|
|
+int ssam_bus_register(void)
|
|
+{
|
|
+ return bus_register(&ssam_bus_type);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_bus_unregister() - Unregister the SSAM client device bus.
|
|
+ */
|
|
+void ssam_bus_unregister(void)
|
|
+{
|
|
+ return bus_unregister(&ssam_bus_type);
|
|
+}
|
|
diff --git a/drivers/platform/x86/surface_aggregator/bus.h b/drivers/platform/x86/surface_aggregator/bus.h
|
|
new file mode 100644
|
|
index 000000000000..ed032c2cbdb2
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator/bus.h
|
|
@@ -0,0 +1,27 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Surface System Aggregator Module bus and device integration.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#ifndef _SURFACE_AGGREGATOR_BUS_H
|
|
+#define _SURFACE_AGGREGATOR_BUS_H
|
|
+
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+
|
|
+#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
|
|
+
|
|
+void ssam_controller_remove_clients(struct ssam_controller *ctrl);
|
|
+
|
|
+int ssam_bus_register(void);
|
|
+void ssam_bus_unregister(void);
|
|
+
|
|
+#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
|
|
+
|
|
+static inline void ssam_controller_remove_clients(struct ssam_controller *ctrl) {}
|
|
+static inline int ssam_bus_register(void) { return 0; }
|
|
+static inline void ssam_bus_unregister(void) {}
|
|
+
|
|
+#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
|
|
+#endif /* _SURFACE_AGGREGATOR_BUS_H */
|
|
diff --git a/drivers/platform/x86/surface_aggregator/controller.c b/drivers/platform/x86/surface_aggregator/controller.c
|
|
new file mode 100644
|
|
index 000000000000..caf76333f8b3
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator/controller.c
|
|
@@ -0,0 +1,2780 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Main SSAM/SSH controller structure and functionality.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <linux/acpi.h>
|
|
+#include <linux/atomic.h>
|
|
+#include <linux/completion.h>
|
|
+#include <linux/gpio/consumer.h>
|
|
+#include <linux/interrupt.h>
|
|
+#include <linux/kref.h>
|
|
+#include <linux/limits.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/lockdep.h>
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/rculist.h>
|
|
+#include <linux/rbtree.h>
|
|
+#include <linux/rwsem.h>
|
|
+#include <linux/serdev.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/srcu.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/workqueue.h>
|
|
+
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+#include <linux/surface_aggregator/serial_hub.h>
|
|
+
|
|
+#include "controller.h"
|
|
+#include "ssh_msgb.h"
|
|
+#include "ssh_request_layer.h"
|
|
+
|
|
+#include "trace.h"
|
|
+
|
|
+
|
|
+/* -- Safe counters. -------------------------------------------------------- */
|
|
+
|
|
+/**
|
|
+ * ssh_seq_reset() - Reset/initialize sequence ID counter.
|
|
+ * @c: The counter to reset.
|
|
+ */
|
|
+static void ssh_seq_reset(struct ssh_seq_counter *c)
|
|
+{
|
|
+ WRITE_ONCE(c->value, 0);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_seq_next() - Get next sequence ID.
|
|
+ * @c: The counter providing the sequence IDs.
|
|
+ *
|
|
+ * Return: Returns the next sequence ID of the counter.
|
|
+ */
|
|
+static u8 ssh_seq_next(struct ssh_seq_counter *c)
|
|
+{
|
|
+ u8 old = READ_ONCE(c->value);
|
|
+ u8 new = old + 1;
|
|
+ u8 ret;
|
|
+
|
|
+ while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
|
|
+ old = ret;
|
|
+ new = old + 1;
|
|
+ }
|
|
+
|
|
+ return old;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_rqid_reset() - Reset/initialize request ID counter.
|
|
+ * @c: The counter to reset.
|
|
+ */
|
|
+static void ssh_rqid_reset(struct ssh_rqid_counter *c)
|
|
+{
|
|
+ WRITE_ONCE(c->value, 0);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_rqid_next() - Get next request ID.
|
|
+ * @c: The counter providing the request IDs.
|
|
+ *
|
|
+ * Return: Returns the next request ID of the counter, skipping any reserved
|
|
+ * request IDs.
|
|
+ */
|
|
+static u16 ssh_rqid_next(struct ssh_rqid_counter *c)
|
|
+{
|
|
+ u16 old = READ_ONCE(c->value);
|
|
+ u16 new = ssh_rqid_next_valid(old);
|
|
+ u16 ret;
|
|
+
|
|
+ while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
|
|
+ old = ret;
|
|
+ new = ssh_rqid_next_valid(old);
|
|
+ }
|
|
+
|
|
+ return old;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Event notifier/callbacks. --------------------------------------------- */
|
|
+/*
|
|
+ * The notifier system is based on linux/notifier.h, specifically the SRCU
|
|
+ * implementation. The difference to that is, that some bits of the notifier
|
|
+ * call return value can be tracked across multiple calls. This is done so
|
|
+ * that handling of events can be tracked and a warning can be issued in case
|
|
+ * an event goes unhandled. The idea of that warning is that it should help
|
|
+ * discover and identify new/currently unimplemented features.
|
|
+ */
|
|
+
|
|
+/**
|
|
+ * ssam_event_matches_notifier() - Test if an event matches a notifier.
|
|
+ * @n: The event notifier to test against.
|
|
+ * @event: The event to test.
|
|
+ *
|
|
+ * Return: Returns %true if the given event matches the given notifier
|
|
+ * according to the rules set in the notifier's event mask, %false otherwise.
|
|
+ */
|
|
+static bool ssam_event_matches_notifier(const struct ssam_event_notifier *n,
|
|
+ const struct ssam_event *event)
|
|
+{
|
|
+ bool match = n->event.id.target_category == event->target_category;
|
|
+
|
|
+ if (n->event.mask & SSAM_EVENT_MASK_TARGET)
|
|
+ match &= n->event.reg.target_id == event->target_id;
|
|
+
|
|
+ if (n->event.mask & SSAM_EVENT_MASK_INSTANCE)
|
|
+ match &= n->event.id.instance == event->instance_id;
|
|
+
|
|
+ return match;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_nfblk_call_chain() - Call event notifier callbacks of the given chain.
|
|
+ * @nh: The notifier head for which the notifier callbacks should be called.
|
|
+ * @event: The event data provided to the callbacks.
|
|
+ *
|
|
+ * Call all registered notifier callbacks in order of their priority until
|
|
+ * either no notifier is left or a notifier returns a value with the
|
|
+ * %SSAM_NOTIF_STOP bit set. Note that this bit is automatically set via
|
|
+ * ssam_notifier_from_errno() on any non-zero error value.
|
|
+ *
|
|
+ * Return: Returns the notifier status value, which contains the notifier
|
|
+ * status bits (%SSAM_NOTIF_HANDLED and %SSAM_NOTIF_STOP) as well as a
|
|
+ * potential error value returned from the last executed notifier callback.
|
|
+ * Use ssam_notifier_to_errno() to convert this value to the original error
|
|
+ * value.
|
|
+ */
|
|
+static int ssam_nfblk_call_chain(struct ssam_nf_head *nh, struct ssam_event *event)
|
|
+{
|
|
+ struct ssam_event_notifier *nf;
|
|
+ int ret = 0, idx;
|
|
+
|
|
+ idx = srcu_read_lock(&nh->srcu);
|
|
+
|
|
+ list_for_each_entry_rcu(nf, &nh->head, base.node) {
|
|
+ if (ssam_event_matches_notifier(nf, event)) {
|
|
+ ret = (ret & SSAM_NOTIF_STATE_MASK) | nf->base.fn(nf, event);
|
|
+ if (ret & SSAM_NOTIF_STOP)
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ srcu_read_unlock(&nh->srcu, idx);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_nfblk_insert() - Insert a new notifier block into the given notifier
|
|
+ * list.
|
|
+ * @nh: The notifier head into which the block should be inserted.
|
|
+ * @nb: The notifier block to add.
|
|
+ *
|
|
+ * Note: This function must be synchronized by the caller with respect to other
|
|
+ * insert, find, and/or remove calls by holding ``struct ssam_nf.lock``.
|
|
+ *
|
|
+ * Return: Returns zero on success, %-EEXIST if the notifier block has already
|
|
+ * been registered.
|
|
+ */
|
|
+static int ssam_nfblk_insert(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
|
|
+{
|
|
+ struct ssam_notifier_block *p;
|
|
+ struct list_head *h;
|
|
+
|
|
+ /* Runs under lock, no need for RCU variant. */
|
|
+ list_for_each(h, &nh->head) {
|
|
+ p = list_entry(h, struct ssam_notifier_block, node);
|
|
+
|
|
+ if (unlikely(p == nb)) {
|
|
+ WARN(1, "double register detected");
|
|
+ return -EEXIST;
|
|
+ }
|
|
+
|
|
+ if (nb->priority > p->priority)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ list_add_tail_rcu(&nb->node, h);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_nfblk_find() - Check if a notifier block is registered on the given
|
|
+ * notifier head.
|
|
+ * list.
|
|
+ * @nh: The notifier head on which to search.
|
|
+ * @nb: The notifier block to search for.
|
|
+ *
|
|
+ * Note: This function must be synchronized by the caller with respect to other
|
|
+ * insert, find, and/or remove calls by holding ``struct ssam_nf.lock``.
|
|
+ *
|
|
+ * Return: Returns true if the given notifier block is registered on the given
|
|
+ * notifier head, false otherwise.
|
|
+ */
|
|
+static bool ssam_nfblk_find(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
|
|
+{
|
|
+ struct ssam_notifier_block *p;
|
|
+
|
|
+ /* Runs under lock, no need for RCU variant. */
|
|
+ list_for_each_entry(p, &nh->head, node) {
|
|
+ if (p == nb)
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_nfblk_remove() - Remove a notifier block from its notifier list.
|
|
+ * @nb: The notifier block to be removed.
|
|
+ *
|
|
+ * Note: This function must be synchronized by the caller with respect to
|
|
+ * other insert, find, and/or remove calls by holding ``struct ssam_nf.lock``.
|
|
+ * Furthermore, the caller _must_ ensure SRCU synchronization by calling
|
|
+ * synchronize_srcu() with ``nh->srcu`` after leaving the critical section, to
|
|
+ * ensure that the removed notifier block is not in use any more.
|
|
+ */
|
|
+static void ssam_nfblk_remove(struct ssam_notifier_block *nb)
|
|
+{
|
|
+ list_del_rcu(&nb->node);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_nf_head_init() - Initialize the given notifier head.
|
|
+ * @nh: The notifier head to initialize.
|
|
+ */
|
|
+static int ssam_nf_head_init(struct ssam_nf_head *nh)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = init_srcu_struct(&nh->srcu);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ INIT_LIST_HEAD(&nh->head);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_nf_head_destroy() - Deinitialize the given notifier head.
|
|
+ * @nh: The notifier head to deinitialize.
|
|
+ */
|
|
+static void ssam_nf_head_destroy(struct ssam_nf_head *nh)
|
|
+{
|
|
+ cleanup_srcu_struct(&nh->srcu);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Event/notification registry. ------------------------------------------ */
|
|
+
|
|
+/**
|
|
+ * struct ssam_nf_refcount_key - Key used for event activation reference
|
|
+ * counting.
|
|
+ * @reg: The registry via which the event is enabled/disabled.
|
|
+ * @id: The ID uniquely describing the event.
|
|
+ */
|
|
+struct ssam_nf_refcount_key {
|
|
+ struct ssam_event_registry reg;
|
|
+ struct ssam_event_id id;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_nf_refcount_entry - RB-tree entry for reference counting event
|
|
+ * activations.
|
|
+ * @node: The node of this entry in the rb-tree.
|
|
+ * @key: The key of the event.
|
|
+ * @refcount: The reference-count of the event.
|
|
+ * @flags: The flags used when enabling the event.
|
|
+ */
|
|
+struct ssam_nf_refcount_entry {
|
|
+ struct rb_node node;
|
|
+ struct ssam_nf_refcount_key key;
|
|
+ int refcount;
|
|
+ u8 flags;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * ssam_nf_refcount_inc() - Increment reference-/activation-count of the given
|
|
+ * event.
|
|
+ * @nf: The notifier system reference.
|
|
+ * @reg: The registry used to enable/disable the event.
|
|
+ * @id: The event ID.
|
|
+ *
|
|
+ * Increments the reference-/activation-count associated with the specified
|
|
+ * event type/ID, allocating a new entry for this event ID if necessary. A
|
|
+ * newly allocated entry will have a refcount of one.
|
|
+ *
|
|
+ * Note: ``nf->lock`` must be held when calling this function.
|
|
+ *
|
|
+ * Return: Returns the refcount entry on success. Returns an error pointer
|
|
+ * with %-ENOSPC if there have already been %INT_MAX events of the specified
|
|
+ * ID and type registered, or %-ENOMEM if the entry could not be allocated.
|
|
+ */
|
|
+static struct ssam_nf_refcount_entry *
|
|
+ssam_nf_refcount_inc(struct ssam_nf *nf, struct ssam_event_registry reg,
|
|
+ struct ssam_event_id id)
|
|
+{
|
|
+ struct ssam_nf_refcount_entry *entry;
|
|
+ struct ssam_nf_refcount_key key;
|
|
+ struct rb_node **link = &nf->refcount.rb_node;
|
|
+ struct rb_node *parent = NULL;
|
|
+ int cmp;
|
|
+
|
|
+ lockdep_assert_held(&nf->lock);
|
|
+
|
|
+ key.reg = reg;
|
|
+ key.id = id;
|
|
+
|
|
+ while (*link) {
|
|
+ entry = rb_entry(*link, struct ssam_nf_refcount_entry, node);
|
|
+ parent = *link;
|
|
+
|
|
+ cmp = memcmp(&key, &entry->key, sizeof(key));
|
|
+ if (cmp < 0) {
|
|
+ link = &(*link)->rb_left;
|
|
+ } else if (cmp > 0) {
|
|
+ link = &(*link)->rb_right;
|
|
+ } else if (entry->refcount < INT_MAX) {
|
|
+ entry->refcount++;
|
|
+ return entry;
|
|
+ } else {
|
|
+ WARN_ON(1);
|
|
+ return ERR_PTR(-ENOSPC);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
|
+ if (!entry)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ entry->key = key;
|
|
+ entry->refcount = 1;
|
|
+
|
|
+ rb_link_node(&entry->node, parent, link);
|
|
+ rb_insert_color(&entry->node, &nf->refcount);
|
|
+
|
|
+ return entry;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_nf_refcount_dec() - Decrement reference-/activation-count of the given
|
|
+ * event.
|
|
+ * @nf: The notifier system reference.
|
|
+ * @reg: The registry used to enable/disable the event.
|
|
+ * @id: The event ID.
|
|
+ *
|
|
+ * Decrements the reference-/activation-count of the specified event,
|
|
+ * returning its entry. If the returned entry has a refcount of zero, the
|
|
+ * caller is responsible for freeing it using kfree().
|
|
+ *
|
|
+ * Note: ``nf->lock`` must be held when calling this function.
|
|
+ *
|
|
+ * Return: Returns the refcount entry on success or %NULL if the entry has not
|
|
+ * been found.
|
|
+ */
|
|
+static struct ssam_nf_refcount_entry *
|
|
+ssam_nf_refcount_dec(struct ssam_nf *nf, struct ssam_event_registry reg,
|
|
+ struct ssam_event_id id)
|
|
+{
|
|
+ struct ssam_nf_refcount_entry *entry;
|
|
+ struct ssam_nf_refcount_key key;
|
|
+ struct rb_node *node = nf->refcount.rb_node;
|
|
+ int cmp;
|
|
+
|
|
+ lockdep_assert_held(&nf->lock);
|
|
+
|
|
+ key.reg = reg;
|
|
+ key.id = id;
|
|
+
|
|
+ while (node) {
|
|
+ entry = rb_entry(node, struct ssam_nf_refcount_entry, node);
|
|
+
|
|
+ cmp = memcmp(&key, &entry->key, sizeof(key));
|
|
+ if (cmp < 0) {
|
|
+ node = node->rb_left;
|
|
+ } else if (cmp > 0) {
|
|
+ node = node->rb_right;
|
|
+ } else {
|
|
+ entry->refcount--;
|
|
+ if (entry->refcount == 0)
|
|
+ rb_erase(&entry->node, &nf->refcount);
|
|
+
|
|
+ return entry;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_nf_refcount_dec_free() - Decrement reference-/activation-count of the
|
|
+ * given event and free its entry if the reference count reaches zero.
|
|
+ * @nf: The notifier system reference.
|
|
+ * @reg: The registry used to enable/disable the event.
|
|
+ * @id: The event ID.
|
|
+ *
|
|
+ * Decrements the reference-/activation-count of the specified event, freeing
|
|
+ * its entry if it reaches zero.
|
|
+ *
|
|
+ * Note: ``nf->lock`` must be held when calling this function.
|
|
+ */
|
|
+static void ssam_nf_refcount_dec_free(struct ssam_nf *nf,
|
|
+ struct ssam_event_registry reg,
|
|
+ struct ssam_event_id id)
|
|
+{
|
|
+ struct ssam_nf_refcount_entry *entry;
|
|
+
|
|
+ lockdep_assert_held(&nf->lock);
|
|
+
|
|
+ entry = ssam_nf_refcount_dec(nf, reg, id);
|
|
+ if (entry && entry->refcount == 0)
|
|
+ kfree(entry);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_nf_refcount_empty() - Test if the notification system has any
|
|
+ * enabled/active events.
|
|
+ * @nf: The notification system.
|
|
+ */
|
|
+static bool ssam_nf_refcount_empty(struct ssam_nf *nf)
|
|
+{
|
|
+ return RB_EMPTY_ROOT(&nf->refcount);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_nf_call() - Call notification callbacks for the provided event.
|
|
+ * @nf: The notifier system
|
|
+ * @dev: The associated device, only used for logging.
|
|
+ * @rqid: The request ID of the event.
|
|
+ * @event: The event provided to the callbacks.
|
|
+ *
|
|
+ * Execute registered callbacks in order of their priority until either no
|
|
+ * callback is left or a callback returns a value with the %SSAM_NOTIF_STOP
|
|
+ * bit set. Note that this bit is set automatically when converting non-zero
|
|
+ * error values via ssam_notifier_from_errno() to notifier values.
|
|
+ *
|
|
+ * Also note that any callback that could handle an event should return a value
|
|
+ * with bit %SSAM_NOTIF_HANDLED set, indicating that the event does not go
|
|
+ * unhandled/ignored. In case no registered callback could handle an event,
|
|
+ * this function will emit a warning.
|
|
+ *
|
|
+ * In case a callback failed, this function will emit an error message.
|
|
+ */
|
|
+static void ssam_nf_call(struct ssam_nf *nf, struct device *dev, u16 rqid,
|
|
+ struct ssam_event *event)
|
|
+{
|
|
+ struct ssam_nf_head *nf_head;
|
|
+ int status, nf_ret;
|
|
+
|
|
+ if (!ssh_rqid_is_event(rqid)) {
|
|
+ dev_warn(dev, "event: unsupported rqid: %#06x\n", rqid);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ nf_head = &nf->head[ssh_rqid_to_event(rqid)];
|
|
+ nf_ret = ssam_nfblk_call_chain(nf_head, event);
|
|
+ status = ssam_notifier_to_errno(nf_ret);
|
|
+
|
|
+ if (status < 0) {
|
|
+ dev_err(dev,
|
|
+ "event: error handling event: %d (tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
|
|
+ status, event->target_category, event->target_id,
|
|
+ event->command_id, event->instance_id);
|
|
+ } else if (!(nf_ret & SSAM_NOTIF_HANDLED)) {
|
|
+ dev_warn(dev,
|
|
+ "event: unhandled event (rqid: %#04x, tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
|
|
+ rqid, event->target_category, event->target_id,
|
|
+ event->command_id, event->instance_id);
|
|
+ }
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_nf_init() - Initialize the notifier system.
|
|
+ * @nf: The notifier system to initialize.
|
|
+ */
|
|
+static int ssam_nf_init(struct ssam_nf *nf)
|
|
+{
|
|
+ int i, status;
|
|
+
|
|
+ for (i = 0; i < SSH_NUM_EVENTS; i++) {
|
|
+ status = ssam_nf_head_init(&nf->head[i]);
|
|
+ if (status)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (status) {
|
|
+ while (i--)
|
|
+ ssam_nf_head_destroy(&nf->head[i]);
|
|
+
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ mutex_init(&nf->lock);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_nf_destroy() - Deinitialize the notifier system.
|
|
+ * @nf: The notifier system to deinitialize.
|
|
+ */
|
|
+static void ssam_nf_destroy(struct ssam_nf *nf)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < SSH_NUM_EVENTS; i++)
|
|
+ ssam_nf_head_destroy(&nf->head[i]);
|
|
+
|
|
+ mutex_destroy(&nf->lock);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Event/async request completion system. -------------------------------- */
|
|
+
|
|
+#define SSAM_CPLT_WQ_NAME "ssam_cpltq"
|
|
+
|
|
+/*
|
|
+ * SSAM_CPLT_WQ_BATCH - Maximum number of event item completions executed per
|
|
+ * work execution. Used to prevent livelocking of the workqueue. Value chosen
|
|
+ * via educated guess, may be adjusted.
|
|
+ */
|
|
+#define SSAM_CPLT_WQ_BATCH 10
|
|
+
|
|
+/*
|
|
+ * SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN - Maximum payload length for a cached
|
|
+ * &struct ssam_event_item.
|
|
+ *
|
|
+ * This length has been chosen to be accommodate standard touchpad and
|
|
+ * keyboard input events. Events with larger payloads will be allocated
|
|
+ * separately.
|
|
+ */
|
|
+#define SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN 32
|
|
+
|
|
+static struct kmem_cache *ssam_event_item_cache;
|
|
+
|
|
+/**
|
|
+ * ssam_event_item_cache_init() - Initialize the event item cache.
|
|
+ */
|
|
+int ssam_event_item_cache_init(void)
|
|
+{
|
|
+ const unsigned int size = sizeof(struct ssam_event_item)
|
|
+ + SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN;
|
|
+ const unsigned int align = __alignof__(struct ssam_event_item);
|
|
+ struct kmem_cache *cache;
|
|
+
|
|
+ cache = kmem_cache_create("ssam_event_item", size, align, 0, NULL);
|
|
+ if (!cache)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ ssam_event_item_cache = cache;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_event_item_cache_destroy() - Deinitialize the event item cache.
|
|
+ */
|
|
+void ssam_event_item_cache_destroy(void)
|
|
+{
|
|
+ kmem_cache_destroy(ssam_event_item_cache);
|
|
+ ssam_event_item_cache = NULL;
|
|
+}
|
|
+
|
|
+static void __ssam_event_item_free_cached(struct ssam_event_item *item)
|
|
+{
|
|
+ kmem_cache_free(ssam_event_item_cache, item);
|
|
+}
|
|
+
|
|
+static void __ssam_event_item_free_generic(struct ssam_event_item *item)
|
|
+{
|
|
+ kfree(item);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_event_item_free() - Free the provided event item.
|
|
+ * @item: The event item to free.
|
|
+ */
|
|
+static void ssam_event_item_free(struct ssam_event_item *item)
|
|
+{
|
|
+ trace_ssam_event_item_free(item);
|
|
+ item->ops.free(item);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_event_item_alloc() - Allocate an event item with the given payload size.
|
|
+ * @len: The event payload length.
|
|
+ * @flags: The flags used for allocation.
|
|
+ *
|
|
+ * Allocate an event item with the given payload size, preferring allocation
|
|
+ * from the event item cache if the payload is small enough (i.e. smaller than
|
|
+ * %SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN). Sets the item operations and payload
|
|
+ * length values. The item free callback (``ops.free``) should not be
|
|
+ * overwritten after this call.
|
|
+ *
|
|
+ * Return: Returns the newly allocated event item.
|
|
+ */
|
|
+static struct ssam_event_item *ssam_event_item_alloc(size_t len, gfp_t flags)
|
|
+{
|
|
+ struct ssam_event_item *item;
|
|
+
|
|
+ if (len <= SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN) {
|
|
+ item = kmem_cache_alloc(ssam_event_item_cache, flags);
|
|
+ if (!item)
|
|
+ return NULL;
|
|
+
|
|
+ item->ops.free = __ssam_event_item_free_cached;
|
|
+ } else {
|
|
+ item = kzalloc(struct_size(item, event.data, len), flags);
|
|
+ if (!item)
|
|
+ return NULL;
|
|
+
|
|
+ item->ops.free = __ssam_event_item_free_generic;
|
|
+ }
|
|
+
|
|
+ item->event.length = len;
|
|
+
|
|
+ trace_ssam_event_item_alloc(item, len);
|
|
+ return item;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_event_queue_push() - Push an event item to the event queue.
|
|
+ * @q: The event queue.
|
|
+ * @item: The item to add.
|
|
+ */
|
|
+static void ssam_event_queue_push(struct ssam_event_queue *q,
|
|
+ struct ssam_event_item *item)
|
|
+{
|
|
+ spin_lock(&q->lock);
|
|
+ list_add_tail(&item->node, &q->head);
|
|
+ spin_unlock(&q->lock);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_event_queue_pop() - Pop the next event item from the event queue.
|
|
+ * @q: The event queue.
|
|
+ *
|
|
+ * Returns and removes the next event item from the queue. Returns %NULL If
|
|
+ * there is no event item left.
|
|
+ */
|
|
+static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q)
|
|
+{
|
|
+ struct ssam_event_item *item;
|
|
+
|
|
+ spin_lock(&q->lock);
|
|
+ item = list_first_entry_or_null(&q->head, struct ssam_event_item, node);
|
|
+ if (item)
|
|
+ list_del(&item->node);
|
|
+ spin_unlock(&q->lock);
|
|
+
|
|
+ return item;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_event_queue_is_empty() - Check if the event queue is empty.
|
|
+ * @q: The event queue.
|
|
+ */
|
|
+static bool ssam_event_queue_is_empty(struct ssam_event_queue *q)
|
|
+{
|
|
+ bool empty;
|
|
+
|
|
+ spin_lock(&q->lock);
|
|
+ empty = list_empty(&q->head);
|
|
+ spin_unlock(&q->lock);
|
|
+
|
|
+ return empty;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_cplt_get_event_queue() - Get the event queue for the given parameters.
|
|
+ * @cplt: The completion system on which to look for the queue.
|
|
+ * @tid: The target ID of the queue.
|
|
+ * @rqid: The request ID representing the event ID for which to get the queue.
|
|
+ *
|
|
+ * Return: Returns the event queue corresponding to the event type described
|
|
+ * by the given parameters. If the request ID does not represent an event,
|
|
+ * this function returns %NULL. If the target ID is not supported, this
|
|
+ * function will fall back to the default target ID (``tid = 1``).
|
|
+ */
|
|
+static
|
|
+struct ssam_event_queue *ssam_cplt_get_event_queue(struct ssam_cplt *cplt,
|
|
+ u8 tid, u16 rqid)
|
|
+{
|
|
+ u16 event = ssh_rqid_to_event(rqid);
|
|
+ u16 tidx = ssh_tid_to_index(tid);
|
|
+
|
|
+ if (!ssh_rqid_is_event(rqid)) {
|
|
+ dev_err(cplt->dev, "event: unsupported request ID: %#06x\n", rqid);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (!ssh_tid_is_valid(tid)) {
|
|
+ dev_warn(cplt->dev, "event: unsupported target ID: %u\n", tid);
|
|
+ tidx = 0;
|
|
+ }
|
|
+
|
|
+ return &cplt->event.target[tidx].queue[event];
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_cplt_submit() - Submit a work item to the completion system workqueue.
|
|
+ * @cplt: The completion system.
|
|
+ * @work: The work item to submit.
|
|
+ */
|
|
+static bool ssam_cplt_submit(struct ssam_cplt *cplt, struct work_struct *work)
|
|
+{
|
|
+ return queue_work(cplt->wq, work);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_cplt_submit_event() - Submit an event to the completion system.
|
|
+ * @cplt: The completion system.
|
|
+ * @item: The event item to submit.
|
|
+ *
|
|
+ * Submits the event to the completion system by queuing it on the event item
|
|
+ * queue and queuing the respective event queue work item on the completion
|
|
+ * workqueue, which will eventually complete the event.
|
|
+ *
|
|
+ * Return: Returns zero on success, %-EINVAL if there is no event queue that
|
|
+ * can handle the given event item.
|
|
+ */
|
|
+static int ssam_cplt_submit_event(struct ssam_cplt *cplt,
|
|
+ struct ssam_event_item *item)
|
|
+{
|
|
+ struct ssam_event_queue *evq;
|
|
+
|
|
+ evq = ssam_cplt_get_event_queue(cplt, item->event.target_id, item->rqid);
|
|
+ if (!evq)
|
|
+ return -EINVAL;
|
|
+
|
|
+ ssam_event_queue_push(evq, item);
|
|
+ ssam_cplt_submit(cplt, &evq->work);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_cplt_flush() - Flush the completion system.
|
|
+ * @cplt: The completion system.
|
|
+ *
|
|
+ * Flush the completion system by waiting until all currently submitted work
|
|
+ * items have been completed.
|
|
+ *
|
|
+ * Note: This function does not guarantee that all events will have been
|
|
+ * handled once this call terminates. In case of a larger number of
|
|
+ * to-be-completed events, the event queue work function may re-schedule its
|
|
+ * work item, which this flush operation will ignore.
|
|
+ *
|
|
+ * This operation is only intended to, during normal operation prior to
|
|
+ * shutdown, try to complete most events and requests to get them out of the
|
|
+ * system while the system is still fully operational. It does not aim to
|
|
+ * provide any guarantee that all of them have been handled.
|
|
+ */
|
|
+static void ssam_cplt_flush(struct ssam_cplt *cplt)
|
|
+{
|
|
+ flush_workqueue(cplt->wq);
|
|
+}
|
|
+
|
|
+static void ssam_event_queue_work_fn(struct work_struct *work)
|
|
+{
|
|
+ struct ssam_event_queue *queue;
|
|
+ struct ssam_event_item *item;
|
|
+ struct ssam_nf *nf;
|
|
+ struct device *dev;
|
|
+ unsigned int iterations = SSAM_CPLT_WQ_BATCH;
|
|
+
|
|
+ queue = container_of(work, struct ssam_event_queue, work);
|
|
+ nf = &queue->cplt->event.notif;
|
|
+ dev = queue->cplt->dev;
|
|
+
|
|
+ /* Limit number of processed events to avoid livelocking. */
|
|
+ do {
|
|
+ item = ssam_event_queue_pop(queue);
|
|
+ if (!item)
|
|
+ return;
|
|
+
|
|
+ ssam_nf_call(nf, dev, item->rqid, &item->event);
|
|
+ ssam_event_item_free(item);
|
|
+ } while (--iterations);
|
|
+
|
|
+ if (!ssam_event_queue_is_empty(queue))
|
|
+ ssam_cplt_submit(queue->cplt, &queue->work);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_event_queue_init() - Initialize an event queue.
|
|
+ * @cplt: The completion system on which the queue resides.
|
|
+ * @evq: The event queue to initialize.
|
|
+ */
|
|
+static void ssam_event_queue_init(struct ssam_cplt *cplt,
|
|
+ struct ssam_event_queue *evq)
|
|
+{
|
|
+ evq->cplt = cplt;
|
|
+ spin_lock_init(&evq->lock);
|
|
+ INIT_LIST_HEAD(&evq->head);
|
|
+ INIT_WORK(&evq->work, ssam_event_queue_work_fn);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_cplt_init() - Initialize completion system.
|
|
+ * @cplt: The completion system to initialize.
|
|
+ * @dev: The device used for logging.
|
|
+ */
|
|
+static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev)
|
|
+{
|
|
+ struct ssam_event_target *target;
|
|
+ int status, c, i;
|
|
+
|
|
+ cplt->dev = dev;
|
|
+
|
|
+ cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME);
|
|
+ if (!cplt->wq)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ for (c = 0; c < ARRAY_SIZE(cplt->event.target); c++) {
|
|
+ target = &cplt->event.target[c];
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(target->queue); i++)
|
|
+ ssam_event_queue_init(cplt, &target->queue[i]);
|
|
+ }
|
|
+
|
|
+ status = ssam_nf_init(&cplt->event.notif);
|
|
+ if (status)
|
|
+ destroy_workqueue(cplt->wq);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_cplt_destroy() - Deinitialize the completion system.
|
|
+ * @cplt: The completion system to deinitialize.
|
|
+ *
|
|
+ * Deinitialize the given completion system and ensure that all pending, i.e.
|
|
+ * yet-to-be-completed, event items and requests have been handled.
|
|
+ */
|
|
+static void ssam_cplt_destroy(struct ssam_cplt *cplt)
|
|
+{
|
|
+ /*
|
|
+ * Note: destroy_workqueue ensures that all currently queued work will
|
|
+ * be fully completed and the workqueue drained. This means that this
|
|
+ * call will inherently also free any queued ssam_event_items, thus we
|
|
+ * don't have to take care of that here explicitly.
|
|
+ */
|
|
+ destroy_workqueue(cplt->wq);
|
|
+ ssam_nf_destroy(&cplt->event.notif);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Main SSAM device structures. ------------------------------------------ */
|
|
+
|
|
+/**
|
|
+ * ssam_controller_device() - Get the &struct device associated with this
|
|
+ * controller.
|
|
+ * @c: The controller for which to get the device.
|
|
+ *
|
|
+ * Return: Returns the &struct device associated with this controller,
|
|
+ * providing its lower-level transport.
|
|
+ */
|
|
+struct device *ssam_controller_device(struct ssam_controller *c)
|
|
+{
|
|
+ return ssh_rtl_get_device(&c->rtl);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_controller_device);
|
|
+
|
|
+static void __ssam_controller_release(struct kref *kref)
|
|
+{
|
|
+ struct ssam_controller *ctrl = to_ssam_controller(kref, kref);
|
|
+
|
|
+ /*
|
|
+ * The lock-call here is to satisfy lockdep. At this point we really
|
|
+ * expect this to be the last remaining reference to the controller.
|
|
+ * Anything else is a bug.
|
|
+ */
|
|
+ ssam_controller_lock(ctrl);
|
|
+ ssam_controller_destroy(ctrl);
|
|
+ ssam_controller_unlock(ctrl);
|
|
+
|
|
+ kfree(ctrl);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_controller_get() - Increment reference count of controller.
|
|
+ * @c: The controller.
|
|
+ *
|
|
+ * Return: Returns the controller provided as input.
|
|
+ */
|
|
+struct ssam_controller *ssam_controller_get(struct ssam_controller *c)
|
|
+{
|
|
+ if (c)
|
|
+ kref_get(&c->kref);
|
|
+ return c;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_controller_get);
|
|
+
|
|
+/**
|
|
+ * ssam_controller_put() - Decrement reference count of controller.
|
|
+ * @c: The controller.
|
|
+ */
|
|
+void ssam_controller_put(struct ssam_controller *c)
|
|
+{
|
|
+ if (c)
|
|
+ kref_put(&c->kref, __ssam_controller_release);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_controller_put);
|
|
+
|
|
+/**
|
|
+ * ssam_controller_statelock() - Lock the controller against state transitions.
|
|
+ * @c: The controller to lock.
|
|
+ *
|
|
+ * Lock the controller against state transitions. Holding this lock guarantees
|
|
+ * that the controller will not transition between states, i.e. if the
|
|
+ * controller is in state "started", when this lock has been acquired, it will
|
|
+ * remain in this state at least until the lock has been released.
|
|
+ *
|
|
+ * Multiple clients may concurrently hold this lock. In other words: The
|
|
+ * ``statelock`` functions represent the read-lock part of a r/w-semaphore.
|
|
+ * Actions causing state transitions of the controller must be executed while
|
|
+ * holding the write-part of this r/w-semaphore (see ssam_controller_lock()
|
|
+ * and ssam_controller_unlock() for that).
|
|
+ *
|
|
+ * See ssam_controller_stateunlock() for the corresponding unlock function.
|
|
+ */
|
|
+void ssam_controller_statelock(struct ssam_controller *c)
|
|
+{
|
|
+ down_read(&c->lock);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_controller_statelock);
|
|
+
|
|
+/**
|
|
+ * ssam_controller_stateunlock() - Unlock controller state transitions.
|
|
+ * @c: The controller to unlock.
|
|
+ *
|
|
+ * See ssam_controller_statelock() for the corresponding lock function.
|
|
+ */
|
|
+void ssam_controller_stateunlock(struct ssam_controller *c)
|
|
+{
|
|
+ up_read(&c->lock);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_controller_stateunlock);
|
|
+
|
|
+/**
|
|
+ * ssam_controller_lock() - Acquire the main controller lock.
|
|
+ * @c: The controller to lock.
|
|
+ *
|
|
+ * This lock must be held for any state transitions, including transition to
|
|
+ * suspend/resumed states and during shutdown. See ssam_controller_statelock()
|
|
+ * for more details on controller locking.
|
|
+ *
|
|
+ * See ssam_controller_unlock() for the corresponding unlock function.
|
|
+ */
|
|
+void ssam_controller_lock(struct ssam_controller *c)
|
|
+{
|
|
+ down_write(&c->lock);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * ssam_controller_unlock() - Release the main controller lock.
|
|
+ * @c: The controller to unlock.
|
|
+ *
|
|
+ * See ssam_controller_lock() for the corresponding lock function.
|
|
+ */
|
|
+void ssam_controller_unlock(struct ssam_controller *c)
|
|
+{
|
|
+ up_write(&c->lock);
|
|
+}
|
|
+
|
|
+static void ssam_handle_event(struct ssh_rtl *rtl,
|
|
+ const struct ssh_command *cmd,
|
|
+ const struct ssam_span *data)
|
|
+{
|
|
+ struct ssam_controller *ctrl = to_ssam_controller(rtl, rtl);
|
|
+ struct ssam_event_item *item;
|
|
+
|
|
+ item = ssam_event_item_alloc(data->len, GFP_KERNEL);
|
|
+ if (!item)
|
|
+ return;
|
|
+
|
|
+ item->rqid = get_unaligned_le16(&cmd->rqid);
|
|
+ item->event.target_category = cmd->tc;
|
|
+ item->event.target_id = cmd->tid_in;
|
|
+ item->event.command_id = cmd->cid;
|
|
+ item->event.instance_id = cmd->iid;
|
|
+ memcpy(&item->event.data[0], data->ptr, data->len);
|
|
+
|
|
+ if (WARN_ON(ssam_cplt_submit_event(&ctrl->cplt, item)))
|
|
+ ssam_event_item_free(item);
|
|
+}
|
|
+
|
|
+static const struct ssh_rtl_ops ssam_rtl_ops = {
|
|
+ .handle_event = ssam_handle_event,
|
|
+};
|
|
+
|
|
+static bool ssam_notifier_is_empty(struct ssam_controller *ctrl);
|
|
+static void ssam_notifier_unregister_all(struct ssam_controller *ctrl);
|
|
+
|
|
+#define SSAM_SSH_DSM_REVISION 0
|
|
+
|
|
+/* d5e383e1-d892-4a76-89fc-f6aaae7ed5b5 */
|
|
+static const guid_t SSAM_SSH_DSM_GUID =
|
|
+ GUID_INIT(0xd5e383e1, 0xd892, 0x4a76,
|
|
+ 0x89, 0xfc, 0xf6, 0xaa, 0xae, 0x7e, 0xd5, 0xb5);
|
|
+
|
|
+enum ssh_dsm_fn {
|
|
+ SSH_DSM_FN_SSH_POWER_PROFILE = 0x05,
|
|
+ SSH_DSM_FN_SCREEN_ON_SLEEP_IDLE_TIMEOUT = 0x06,
|
|
+ SSH_DSM_FN_SCREEN_OFF_SLEEP_IDLE_TIMEOUT = 0x07,
|
|
+ SSH_DSM_FN_D3_CLOSES_HANDLE = 0x08,
|
|
+ SSH_DSM_FN_SSH_BUFFER_SIZE = 0x09,
|
|
+};
|
|
+
|
|
+static int ssam_dsm_get_functions(acpi_handle handle, u64 *funcs)
|
|
+{
|
|
+ union acpi_object *obj;
|
|
+ u64 mask = 0;
|
|
+ int i;
|
|
+
|
|
+ *funcs = 0;
|
|
+
|
|
+ /*
|
|
+ * The _DSM function is only present on newer models. It is not
|
|
+ * present on 5th and 6th generation devices (i.e. up to and including
|
|
+ * Surface Pro 6, Surface Laptop 2, Surface Book 2).
|
|
+ *
|
|
+ * If the _DSM is not present, indicate that no function is supported.
|
|
+ * This will result in default values being set.
|
|
+ */
|
|
+ if (!acpi_has_method(handle, "_DSM"))
|
|
+ return 0;
|
|
+
|
|
+ obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
|
|
+ SSAM_SSH_DSM_REVISION, 0, NULL,
|
|
+ ACPI_TYPE_BUFFER);
|
|
+ if (!obj)
|
|
+ return -EIO;
|
|
+
|
|
+ for (i = 0; i < obj->buffer.length && i < 8; i++)
|
|
+ mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
|
|
+
|
|
+ if (mask & BIT(0))
|
|
+ *funcs = mask;
|
|
+
|
|
+ ACPI_FREE(obj);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ssam_dsm_load_u32(acpi_handle handle, u64 funcs, u64 func, u32 *ret)
|
|
+{
|
|
+ union acpi_object *obj;
|
|
+ u64 val;
|
|
+
|
|
+ if (!(funcs & BIT_ULL(func)))
|
|
+ return 0; /* Not supported, leave *ret at its default value */
|
|
+
|
|
+ obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
|
|
+ SSAM_SSH_DSM_REVISION, func, NULL,
|
|
+ ACPI_TYPE_INTEGER);
|
|
+ if (!obj)
|
|
+ return -EIO;
|
|
+
|
|
+ val = obj->integer.value;
|
|
+ ACPI_FREE(obj);
|
|
+
|
|
+ if (val > U32_MAX)
|
|
+ return -ERANGE;
|
|
+
|
|
+ *ret = val;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_controller_caps_load_from_acpi() - Load controller capabilities from
|
|
+ * ACPI _DSM.
|
|
+ * @handle: The handle of the ACPI controller/SSH device.
|
|
+ * @caps: Where to store the capabilities in.
|
|
+ *
|
|
+ * Initializes the given controller capabilities with default values, then
|
|
+ * checks and, if the respective _DSM functions are available, loads the
|
|
+ * actual capabilities from the _DSM.
|
|
+ *
|
|
+ * Return: Returns zero on success, a negative error code on failure.
|
|
+ */
|
|
+static
|
|
+int ssam_controller_caps_load_from_acpi(acpi_handle handle,
|
|
+ struct ssam_controller_caps *caps)
|
|
+{
|
|
+ u32 d3_closes_handle = false;
|
|
+ u64 funcs;
|
|
+ int status;
|
|
+
|
|
+ /* Set defaults. */
|
|
+ caps->ssh_power_profile = U32_MAX;
|
|
+ caps->screen_on_sleep_idle_timeout = U32_MAX;
|
|
+ caps->screen_off_sleep_idle_timeout = U32_MAX;
|
|
+ caps->d3_closes_handle = false;
|
|
+ caps->ssh_buffer_size = U32_MAX;
|
|
+
|
|
+ /* Pre-load supported DSM functions. */
|
|
+ status = ssam_dsm_get_functions(handle, &funcs);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ /* Load actual values from ACPI, if present. */
|
|
+ status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_SSH_POWER_PROFILE,
|
|
+ &caps->ssh_power_profile);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ status = ssam_dsm_load_u32(handle, funcs,
|
|
+ SSH_DSM_FN_SCREEN_ON_SLEEP_IDLE_TIMEOUT,
|
|
+ &caps->screen_on_sleep_idle_timeout);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ status = ssam_dsm_load_u32(handle, funcs,
|
|
+ SSH_DSM_FN_SCREEN_OFF_SLEEP_IDLE_TIMEOUT,
|
|
+ &caps->screen_off_sleep_idle_timeout);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_D3_CLOSES_HANDLE,
|
|
+ &d3_closes_handle);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ caps->d3_closes_handle = !!d3_closes_handle;
|
|
+
|
|
+ status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_SSH_BUFFER_SIZE,
|
|
+ &caps->ssh_buffer_size);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_controller_init() - Initialize SSAM controller.
|
|
+ * @ctrl: The controller to initialize.
|
|
+ * @serdev: The serial device representing the underlying data transport.
|
|
+ *
|
|
+ * Initializes the given controller. Does neither start receiver nor
|
|
+ * transmitter threads. After this call, the controller has to be hooked up to
|
|
+ * the serdev core separately via &struct serdev_device_ops, relaying calls to
|
|
+ * ssam_controller_receive_buf() and ssam_controller_write_wakeup(). Once the
|
|
+ * controller has been hooked up, transmitter and receiver threads may be
|
|
+ * started via ssam_controller_start(). These setup steps need to be completed
|
|
+ * before controller can be used for requests.
|
|
+ */
|
|
+int ssam_controller_init(struct ssam_controller *ctrl,
|
|
+ struct serdev_device *serdev)
|
|
+{
|
|
+ acpi_handle handle = ACPI_HANDLE(&serdev->dev);
|
|
+ int status;
|
|
+
|
|
+ init_rwsem(&ctrl->lock);
|
|
+ kref_init(&ctrl->kref);
|
|
+
|
|
+ status = ssam_controller_caps_load_from_acpi(handle, &ctrl->caps);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ dev_dbg(&serdev->dev,
|
|
+ "device capabilities:\n"
|
|
+ " ssh_power_profile: %u\n"
|
|
+ " ssh_buffer_size: %u\n"
|
|
+ " screen_on_sleep_idle_timeout: %u\n"
|
|
+ " screen_off_sleep_idle_timeout: %u\n"
|
|
+ " d3_closes_handle: %u\n",
|
|
+ ctrl->caps.ssh_power_profile,
|
|
+ ctrl->caps.ssh_buffer_size,
|
|
+ ctrl->caps.screen_on_sleep_idle_timeout,
|
|
+ ctrl->caps.screen_off_sleep_idle_timeout,
|
|
+ ctrl->caps.d3_closes_handle);
|
|
+
|
|
+ ssh_seq_reset(&ctrl->counter.seq);
|
|
+ ssh_rqid_reset(&ctrl->counter.rqid);
|
|
+
|
|
+ /* Initialize event/request completion system. */
|
|
+ status = ssam_cplt_init(&ctrl->cplt, &serdev->dev);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ /* Initialize request and packet transport layers. */
|
|
+ status = ssh_rtl_init(&ctrl->rtl, serdev, &ssam_rtl_ops);
|
|
+ if (status) {
|
|
+ ssam_cplt_destroy(&ctrl->cplt);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Set state via write_once even though we expect to be in an
|
|
+ * exclusive context, due to smoke-testing in
|
|
+ * ssam_request_sync_submit().
|
|
+ */
|
|
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_INITIALIZED);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_controller_start() - Start the receiver and transmitter threads of the
|
|
+ * controller.
|
|
+ * @ctrl: The controller.
|
|
+ *
|
|
+ * Note: When this function is called, the controller should be properly
|
|
+ * hooked up to the serdev core via &struct serdev_device_ops. Please refer
|
|
+ * to ssam_controller_init() for more details on controller initialization.
|
|
+ *
|
|
+ * This function must be called with the main controller lock held (i.e. by
|
|
+ * calling ssam_controller_lock()).
|
|
+ */
|
|
+int ssam_controller_start(struct ssam_controller *ctrl)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ if (ctrl->state != SSAM_CONTROLLER_INITIALIZED)
|
|
+ return -EINVAL;
|
|
+
|
|
+ status = ssh_rtl_start(&ctrl->rtl);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ /*
|
|
+ * Set state via write_once even though we expect to be locked/in an
|
|
+ * exclusive context, due to smoke-testing in
|
|
+ * ssam_request_sync_submit().
|
|
+ */
|
|
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT - Timeout for flushing requests during
|
|
+ * shutdown.
|
|
+ *
|
|
+ * Chosen to be larger than one full request timeout, including packets timing
|
|
+ * out. This value should give ample time to complete any outstanding requests
|
|
+ * during normal operation and account for the odd package timeout.
|
|
+ */
|
|
+#define SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT msecs_to_jiffies(5000)
|
|
+
|
|
+/**
|
|
+ * ssam_controller_shutdown() - Shut down the controller.
|
|
+ * @ctrl: The controller.
|
|
+ *
|
|
+ * Shuts down the controller by flushing all pending requests and stopping the
|
|
+ * transmitter and receiver threads. All requests submitted after this call
|
|
+ * will fail with %-ESHUTDOWN. While it is discouraged to do so, this function
|
|
+ * is safe to use in parallel with ongoing request submission.
|
|
+ *
|
|
+ * In the course of this shutdown procedure, all currently registered
|
|
+ * notifiers will be unregistered. It is, however, strongly recommended to not
|
|
+ * rely on this behavior, and instead the party registering the notifier
|
|
+ * should unregister it before the controller gets shut down, e.g. via the
|
|
+ * SSAM bus which guarantees client devices to be removed before a shutdown.
|
|
+ *
|
|
+ * Note that events may still be pending after this call, but, due to the
|
|
+ * notifiers being unregistered, these events will be dropped when the
|
|
+ * controller is subsequently destroyed via ssam_controller_destroy().
|
|
+ *
|
|
+ * This function must be called with the main controller lock held (i.e. by
|
|
+ * calling ssam_controller_lock()).
|
|
+ */
|
|
+void ssam_controller_shutdown(struct ssam_controller *ctrl)
|
|
+{
|
|
+ enum ssam_controller_state s = ctrl->state;
|
|
+ int status;
|
|
+
|
|
+ if (s == SSAM_CONTROLLER_UNINITIALIZED || s == SSAM_CONTROLLER_STOPPED)
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * Try to flush pending events and requests while everything still
|
|
+ * works. Note: There may still be packets and/or requests in the
|
|
+ * system after this call (e.g. via control packets submitted by the
|
|
+ * packet transport layer or flush timeout / failure, ...). Those will
|
|
+ * be handled with the ssh_rtl_shutdown() call below.
|
|
+ */
|
|
+ status = ssh_rtl_flush(&ctrl->rtl, SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT);
|
|
+ if (status) {
|
|
+ ssam_err(ctrl, "failed to flush request transport layer: %d\n",
|
|
+ status);
|
|
+ }
|
|
+
|
|
+ /* Try to flush all currently completing requests and events. */
|
|
+ ssam_cplt_flush(&ctrl->cplt);
|
|
+
|
|
+ /*
|
|
+ * We expect all notifiers to have been removed by the respective client
|
|
+ * driver that set them up at this point. If this warning occurs, some
|
|
+ * client driver has not done that...
|
|
+ */
|
|
+ WARN_ON(!ssam_notifier_is_empty(ctrl));
|
|
+
|
|
+ /*
|
|
+ * Nevertheless, we should still take care of drivers that don't behave
|
|
+ * well. Thus disable all enabled events, unregister all notifiers.
|
|
+ */
|
|
+ ssam_notifier_unregister_all(ctrl);
|
|
+
|
|
+ /*
|
|
+ * Cancel remaining requests. Ensure no new ones can be queued and stop
|
|
+ * threads.
|
|
+ */
|
|
+ ssh_rtl_shutdown(&ctrl->rtl);
|
|
+
|
|
+ /*
|
|
+ * Set state via write_once even though we expect to be locked/in an
|
|
+ * exclusive context, due to smoke-testing in
|
|
+ * ssam_request_sync_submit().
|
|
+ */
|
|
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STOPPED);
|
|
+ ctrl->rtl.ptl.serdev = NULL;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_controller_destroy() - Destroy the controller and free its resources.
|
|
+ * @ctrl: The controller.
|
|
+ *
|
|
+ * Ensures that all resources associated with the controller get freed. This
|
|
+ * function should only be called after the controller has been stopped via
|
|
+ * ssam_controller_shutdown(). In general, this function should not be called
|
|
+ * directly. The only valid place to call this function directly is during
|
|
+ * initialization, before the controller has been fully initialized and passed
|
|
+ * to other processes. This function is called automatically when the
|
|
+ * reference count of the controller reaches zero.
|
|
+ *
|
|
+ * This function must be called with the main controller lock held (i.e. by
|
|
+ * calling ssam_controller_lock()).
|
|
+ */
|
|
+void ssam_controller_destroy(struct ssam_controller *ctrl)
|
|
+{
|
|
+ if (ctrl->state == SSAM_CONTROLLER_UNINITIALIZED)
|
|
+ return;
|
|
+
|
|
+ WARN_ON(ctrl->state != SSAM_CONTROLLER_STOPPED);
|
|
+
|
|
+ /*
|
|
+ * Note: New events could still have been received after the previous
|
|
+ * flush in ssam_controller_shutdown, before the request transport layer
|
|
+ * has been shut down. At this point, after the shutdown, we can be sure
|
|
+ * that no new events will be queued. The call to ssam_cplt_destroy will
|
|
+ * ensure that those remaining are being completed and freed.
|
|
+ */
|
|
+
|
|
+ /* Actually free resources. */
|
|
+ ssam_cplt_destroy(&ctrl->cplt);
|
|
+ ssh_rtl_destroy(&ctrl->rtl);
|
|
+
|
|
+ /*
|
|
+ * Set state via write_once even though we expect to be locked/in an
|
|
+ * exclusive context, due to smoke-testing in
|
|
+ * ssam_request_sync_submit().
|
|
+ */
|
|
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_UNINITIALIZED);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_controller_suspend() - Suspend the controller.
|
|
+ * @ctrl: The controller to suspend.
|
|
+ *
|
|
+ * Marks the controller as suspended. Note that display-off and D0-exit
|
|
+ * notifications have to be sent manually before transitioning the controller
|
|
+ * into the suspended state via this function.
|
|
+ *
|
|
+ * See ssam_controller_resume() for the corresponding resume function.
|
|
+ *
|
|
+ * Return: Returns %-EINVAL if the controller is currently not in the
|
|
+ * "started" state.
|
|
+ */
|
|
+int ssam_controller_suspend(struct ssam_controller *ctrl)
|
|
+{
|
|
+ ssam_controller_lock(ctrl);
|
|
+
|
|
+ if (ctrl->state != SSAM_CONTROLLER_STARTED) {
|
|
+ ssam_controller_unlock(ctrl);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ ssam_dbg(ctrl, "pm: suspending controller\n");
|
|
+
|
|
+ /*
|
|
+ * Set state via write_once even though we're locked, due to
|
|
+ * smoke-testing in ssam_request_sync_submit().
|
|
+ */
|
|
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_SUSPENDED);
|
|
+
|
|
+ ssam_controller_unlock(ctrl);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_controller_resume() - Resume the controller from suspend.
|
|
+ * @ctrl: The controller to resume.
|
|
+ *
|
|
+ * Resume the controller from the suspended state it was put into via
|
|
+ * ssam_controller_suspend(). This function does not issue display-on and
|
|
+ * D0-entry notifications. If required, those have to be sent manually after
|
|
+ * this call.
|
|
+ *
|
|
+ * Return: Returns %-EINVAL if the controller is currently not suspended.
|
|
+ */
|
|
+int ssam_controller_resume(struct ssam_controller *ctrl)
|
|
+{
|
|
+ ssam_controller_lock(ctrl);
|
|
+
|
|
+ if (ctrl->state != SSAM_CONTROLLER_SUSPENDED) {
|
|
+ ssam_controller_unlock(ctrl);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ ssam_dbg(ctrl, "pm: resuming controller\n");
|
|
+
|
|
+ /*
|
|
+ * Set state via write_once even though we're locked, due to
|
|
+ * smoke-testing in ssam_request_sync_submit().
|
|
+ */
|
|
+ WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED);
|
|
+
|
|
+ ssam_controller_unlock(ctrl);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Top-level request interface ------------------------------------------- */
|
|
+
|
|
+/**
|
|
+ * ssam_request_write_data() - Construct and write SAM request message to
|
|
+ * buffer.
|
|
+ * @buf: The buffer to write the data to.
|
|
+ * @ctrl: The controller via which the request will be sent.
|
|
+ * @spec: The request data and specification.
|
|
+ *
|
|
+ * Constructs a SAM/SSH request message and writes it to the provided buffer.
|
|
+ * The request and transport counters, specifically RQID and SEQ, will be set
|
|
+ * in this call. These counters are obtained from the controller. It is thus
|
|
+ * only valid to send the resulting message via the controller specified here.
|
|
+ *
|
|
+ * For calculation of the required buffer size, refer to the
|
|
+ * SSH_COMMAND_MESSAGE_LENGTH() macro.
|
|
+ *
|
|
+ * Return: Returns the number of bytes used in the buffer on success. Returns
|
|
+ * %-EINVAL if the payload length provided in the request specification is too
|
|
+ * large (larger than %SSH_COMMAND_MAX_PAYLOAD_SIZE) or if the provided buffer
|
|
+ * is too small.
|
|
+ */
|
|
+ssize_t ssam_request_write_data(struct ssam_span *buf,
|
|
+ struct ssam_controller *ctrl,
|
|
+ const struct ssam_request *spec)
|
|
+{
|
|
+ struct msgbuf msgb;
|
|
+ u16 rqid;
|
|
+ u8 seq;
|
|
+
|
|
+ if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (SSH_COMMAND_MESSAGE_LENGTH(spec->length) > buf->len)
|
|
+ return -EINVAL;
|
|
+
|
|
+ msgb_init(&msgb, buf->ptr, buf->len);
|
|
+ seq = ssh_seq_next(&ctrl->counter.seq);
|
|
+ rqid = ssh_rqid_next(&ctrl->counter.rqid);
|
|
+ msgb_push_cmd(&msgb, seq, rqid, spec);
|
|
+
|
|
+ return msgb_bytes_used(&msgb);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_request_write_data);
|
|
+
|
|
+static void ssam_request_sync_complete(struct ssh_request *rqst,
|
|
+ const struct ssh_command *cmd,
|
|
+ const struct ssam_span *data, int status)
|
|
+{
|
|
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
|
|
+ struct ssam_request_sync *r;
|
|
+
|
|
+ r = container_of(rqst, struct ssam_request_sync, base);
|
|
+ r->status = status;
|
|
+
|
|
+ if (r->resp)
|
|
+ r->resp->length = 0;
|
|
+
|
|
+ if (status) {
|
|
+ rtl_dbg_cond(rtl, "rsp: request failed: %d\n", status);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (!data) /* Handle requests without a response. */
|
|
+ return;
|
|
+
|
|
+ if (!r->resp || !r->resp->pointer) {
|
|
+ if (data->len)
|
|
+ rtl_warn(rtl, "rsp: no response buffer provided, dropping data\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (data->len > r->resp->capacity) {
|
|
+ rtl_err(rtl,
|
|
+ "rsp: response buffer too small, capacity: %zu bytes, got: %zu bytes\n",
|
|
+ r->resp->capacity, data->len);
|
|
+ r->status = -ENOSPC;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ r->resp->length = data->len;
|
|
+ memcpy(r->resp->pointer, data->ptr, data->len);
|
|
+}
|
|
+
|
|
+static void ssam_request_sync_release(struct ssh_request *rqst)
|
|
+{
|
|
+ complete_all(&container_of(rqst, struct ssam_request_sync, base)->comp);
|
|
+}
|
|
+
|
|
+static const struct ssh_request_ops ssam_request_sync_ops = {
|
|
+ .release = ssam_request_sync_release,
|
|
+ .complete = ssam_request_sync_complete,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * ssam_request_sync_alloc() - Allocate a synchronous request.
|
|
+ * @payload_len: The length of the request payload.
|
|
+ * @flags: Flags used for allocation.
|
|
+ * @rqst: Where to store the pointer to the allocated request.
|
|
+ * @buffer: Where to store the buffer descriptor for the message buffer of
|
|
+ * the request.
|
|
+ *
|
|
+ * Allocates a synchronous request with corresponding message buffer. The
|
|
+ * request still needs to be initialized ssam_request_sync_init() before
|
|
+ * it can be submitted, and the message buffer data must still be set to the
|
|
+ * returned buffer via ssam_request_sync_set_data() after it has been filled,
|
|
+ * if need be with adjusted message length.
|
|
+ *
|
|
+ * After use, the request and its corresponding message buffer should be freed
|
|
+ * via ssam_request_sync_free(). The buffer must not be freed separately.
|
|
+ *
|
|
+ * Return: Returns zero on success, %-ENOMEM if the request could not be
|
|
+ * allocated.
|
|
+ */
|
|
+int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
|
|
+ struct ssam_request_sync **rqst,
|
|
+ struct ssam_span *buffer)
|
|
+{
|
|
+ size_t msglen = SSH_COMMAND_MESSAGE_LENGTH(payload_len);
|
|
+
|
|
+ *rqst = kzalloc(sizeof(**rqst) + msglen, flags);
|
|
+ if (!*rqst)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ buffer->ptr = (u8 *)(*rqst + 1);
|
|
+ buffer->len = msglen;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_request_sync_alloc);
|
|
+
|
|
+/**
|
|
+ * ssam_request_sync_free() - Free a synchronous request.
|
|
+ * @rqst: The request to be freed.
|
|
+ *
|
|
+ * Free a synchronous request and its corresponding buffer allocated with
|
|
+ * ssam_request_sync_alloc(). Do not use for requests allocated on the stack
|
|
+ * or via any other function.
|
|
+ *
|
|
+ * Warning: The caller must ensure that the request is not in use any more.
|
|
+ * I.e. the caller must ensure that it has the only reference to the request
|
|
+ * and the request is not currently pending. This means that the caller has
|
|
+ * either never submitted the request, request submission has failed, or the
|
|
+ * caller has waited until the submitted request has been completed via
|
|
+ * ssam_request_sync_wait().
|
|
+ */
|
|
+void ssam_request_sync_free(struct ssam_request_sync *rqst)
|
|
+{
|
|
+ kfree(rqst);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_request_sync_free);
|
|
+
|
|
+/**
|
|
+ * ssam_request_sync_init() - Initialize a synchronous request struct.
|
|
+ * @rqst: The request to initialize.
|
|
+ * @flags: The request flags.
|
|
+ *
|
|
+ * Initializes the given request struct. Does not initialize the request
|
|
+ * message data. This has to be done explicitly after this call via
|
|
+ * ssam_request_sync_set_data() and the actual message data has to be written
|
|
+ * via ssam_request_write_data().
|
|
+ *
|
|
+ * Return: Returns zero on success or %-EINVAL if the given flags are invalid.
|
|
+ */
|
|
+int ssam_request_sync_init(struct ssam_request_sync *rqst,
|
|
+ enum ssam_request_flags flags)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = ssh_request_init(&rqst->base, flags, &ssam_request_sync_ops);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ init_completion(&rqst->comp);
|
|
+ rqst->resp = NULL;
|
|
+ rqst->status = 0;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_request_sync_init);
|
|
+
|
|
+/**
|
|
+ * ssam_request_sync_submit() - Submit a synchronous request.
|
|
+ * @ctrl: The controller with which to submit the request.
|
|
+ * @rqst: The request to submit.
|
|
+ *
|
|
+ * Submit a synchronous request. The request has to be initialized and
|
|
+ * properly set up, including response buffer (may be %NULL if no response is
|
|
+ * expected) and command message data. This function does not wait for the
|
|
+ * request to be completed.
|
|
+ *
|
|
+ * If this function succeeds, ssam_request_sync_wait() must be used to ensure
|
|
+ * that the request has been completed before the response data can be
|
|
+ * accessed and/or the request can be freed. On failure, the request may
|
|
+ * immediately be freed.
|
|
+ *
|
|
+ * This function may only be used if the controller is active, i.e. has been
|
|
+ * initialized and not suspended.
|
|
+ */
|
|
+int ssam_request_sync_submit(struct ssam_controller *ctrl,
|
|
+ struct ssam_request_sync *rqst)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ /*
|
|
+ * This is only a superficial check. In general, the caller needs to
|
|
+ * ensure that the controller is initialized and is not (and does not
|
|
+ * get) suspended during use, i.e. until the request has been completed
|
|
+ * (if _absolutely_ necessary, by use of ssam_controller_statelock/
|
|
+ * ssam_controller_stateunlock, but something like ssam_client_link
|
|
+ * should be preferred as this needs to last until the request has been
|
|
+ * completed).
|
|
+ *
|
|
+ * Note that it is actually safe to use this function while the
|
|
+ * controller is in the process of being shut down (as ssh_rtl_submit
|
|
+ * is safe with regards to this), but it is generally discouraged to do
|
|
+ * so.
|
|
+ */
|
|
+ if (WARN_ON(READ_ONCE(ctrl->state) != SSAM_CONTROLLER_STARTED)) {
|
|
+ ssh_request_put(&rqst->base);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ status = ssh_rtl_submit(&ctrl->rtl, &rqst->base);
|
|
+ ssh_request_put(&rqst->base);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_request_sync_submit);
|
|
+
|
|
+/**
|
|
+ * ssam_request_sync() - Execute a synchronous request.
|
|
+ * @ctrl: The controller via which the request will be submitted.
|
|
+ * @spec: The request specification and payload.
|
|
+ * @rsp: The response buffer.
|
|
+ *
|
|
+ * Allocates a synchronous request with its message data buffer on the heap
|
|
+ * via ssam_request_sync_alloc(), fully initializes it via the provided
|
|
+ * request specification, submits it, and finally waits for its completion
|
|
+ * before freeing it and returning its status.
|
|
+ *
|
|
+ * Return: Returns the status of the request or any failure during setup.
|
|
+ */
|
|
+int ssam_request_sync(struct ssam_controller *ctrl,
|
|
+ const struct ssam_request *spec,
|
|
+ struct ssam_response *rsp)
|
|
+{
|
|
+ struct ssam_request_sync *rqst;
|
|
+ struct ssam_span buf;
|
|
+ ssize_t len;
|
|
+ int status;
|
|
+
|
|
+ status = ssam_request_sync_alloc(spec->length, GFP_KERNEL, &rqst, &buf);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ status = ssam_request_sync_init(rqst, spec->flags);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ ssam_request_sync_set_resp(rqst, rsp);
|
|
+
|
|
+ len = ssam_request_write_data(&buf, ctrl, spec);
|
|
+ if (len < 0) {
|
|
+ ssam_request_sync_free(rqst);
|
|
+ return len;
|
|
+ }
|
|
+
|
|
+ ssam_request_sync_set_data(rqst, buf.ptr, len);
|
|
+
|
|
+ status = ssam_request_sync_submit(ctrl, rqst);
|
|
+ if (!status)
|
|
+ status = ssam_request_sync_wait(rqst);
|
|
+
|
|
+ ssam_request_sync_free(rqst);
|
|
+ return status;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_request_sync);
|
|
+
|
|
+/**
|
|
+ * ssam_request_sync_with_buffer() - Execute a synchronous request with the
|
|
+ * provided buffer as back-end for the message buffer.
|
|
+ * @ctrl: The controller via which the request will be submitted.
|
|
+ * @spec: The request specification and payload.
|
|
+ * @rsp: The response buffer.
|
|
+ * @buf: The buffer for the request message data.
|
|
+ *
|
|
+ * Allocates a synchronous request struct on the stack, fully initializes it
|
|
+ * using the provided buffer as message data buffer, submits it, and then
|
|
+ * waits for its completion before returning its status. The
|
|
+ * SSH_COMMAND_MESSAGE_LENGTH() macro can be used to compute the required
|
|
+ * message buffer size.
|
|
+ *
|
|
+ * This function does essentially the same as ssam_request_sync(), but instead
|
|
+ * of dynamically allocating the request and message data buffer, it uses the
|
|
+ * provided message data buffer and stores the (small) request struct on the
|
|
+ * heap.
|
|
+ *
|
|
+ * Return: Returns the status of the request or any failure during setup.
|
|
+ */
|
|
+int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
|
|
+ const struct ssam_request *spec,
|
|
+ struct ssam_response *rsp,
|
|
+ struct ssam_span *buf)
|
|
+{
|
|
+ struct ssam_request_sync rqst;
|
|
+ ssize_t len;
|
|
+ int status;
|
|
+
|
|
+ status = ssam_request_sync_init(&rqst, spec->flags);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ ssam_request_sync_set_resp(&rqst, rsp);
|
|
+
|
|
+ len = ssam_request_write_data(buf, ctrl, spec);
|
|
+ if (len < 0)
|
|
+ return len;
|
|
+
|
|
+ ssam_request_sync_set_data(&rqst, buf->ptr, len);
|
|
+
|
|
+ status = ssam_request_sync_submit(ctrl, &rqst);
|
|
+ if (!status)
|
|
+ status = ssam_request_sync_wait(&rqst);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_request_sync_with_buffer);
|
|
+
|
|
+
|
|
+/* -- Internal SAM requests. ------------------------------------------------ */
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
|
|
+ .target_category = SSAM_SSH_TC_SAM,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x13,
|
|
+ .instance_id = 0x00,
|
|
+});
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
|
|
+ .target_category = SSAM_SSH_TC_SAM,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x15,
|
|
+ .instance_id = 0x00,
|
|
+});
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
|
|
+ .target_category = SSAM_SSH_TC_SAM,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x16,
|
|
+ .instance_id = 0x00,
|
|
+});
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
|
|
+ .target_category = SSAM_SSH_TC_SAM,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x33,
|
|
+ .instance_id = 0x00,
|
|
+});
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
|
|
+ .target_category = SSAM_SSH_TC_SAM,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x34,
|
|
+ .instance_id = 0x00,
|
|
+});
|
|
+
|
|
+/**
|
|
+ * struct ssh_notification_params - Command payload to enable/disable SSH
|
|
+ * notifications.
|
|
+ * @target_category: The target category for which notifications should be
|
|
+ * enabled/disabled.
|
|
+ * @flags: Flags determining how notifications are being sent.
|
|
+ * @request_id: The request ID that is used to send these notifications.
|
|
+ * @instance_id: The specific instance in the given target category for
|
|
+ * which notifications should be enabled.
|
|
+ */
|
|
+struct ssh_notification_params {
|
|
+ u8 target_category;
|
|
+ u8 flags;
|
|
+ __le16 request_id;
|
|
+ u8 instance_id;
|
|
+} __packed;
|
|
+
|
|
+static int __ssam_ssh_event_request(struct ssam_controller *ctrl,
|
|
+ struct ssam_event_registry reg, u8 cid,
|
|
+ struct ssam_event_id id, u8 flags)
|
|
+{
|
|
+ struct ssh_notification_params params;
|
|
+ struct ssam_request rqst;
|
|
+ struct ssam_response result;
|
|
+ int status;
|
|
+
|
|
+ u16 rqid = ssh_tc_to_rqid(id.target_category);
|
|
+ u8 buf = 0;
|
|
+
|
|
+ /* Only allow RQIDs that lie within the event spectrum. */
|
|
+ if (!ssh_rqid_is_event(rqid))
|
|
+ return -EINVAL;
|
|
+
|
|
+ params.target_category = id.target_category;
|
|
+ params.instance_id = id.instance;
|
|
+ params.flags = flags;
|
|
+ put_unaligned_le16(rqid, ¶ms.request_id);
|
|
+
|
|
+ rqst.target_category = reg.target_category;
|
|
+ rqst.target_id = reg.target_id;
|
|
+ rqst.command_id = cid;
|
|
+ rqst.instance_id = 0x00;
|
|
+ rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
|
|
+ rqst.length = sizeof(params);
|
|
+ rqst.payload = (u8 *)¶ms;
|
|
+
|
|
+ result.capacity = sizeof(buf);
|
|
+ result.length = 0;
|
|
+ result.pointer = &buf;
|
|
+
|
|
+ status = ssam_retry(ssam_request_sync_onstack, ctrl, &rqst, &result,
|
|
+ sizeof(params));
|
|
+
|
|
+ return status < 0 ? status : buf;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_ssh_event_enable() - Enable SSH event.
|
|
+ * @ctrl: The controller for which to enable the event.
|
|
+ * @reg: The event registry describing what request to use for enabling and
|
|
+ * disabling the event.
|
|
+ * @id: The event identifier.
|
|
+ * @flags: The event flags.
|
|
+ *
|
|
+ * Enables the specified event on the EC. This function does not manage
|
|
+ * reference counting of enabled events and is basically only a wrapper for
|
|
+ * the raw EC request. If the specified event is already enabled, the EC will
|
|
+ * ignore this request.
|
|
+ *
|
|
+ * Return: Returns the status of the executed SAM request (zero on success and
|
|
+ * negative on direct failure) or %-EPROTO if the request response indicates a
|
|
+ * failure.
|
|
+ */
|
|
+static int ssam_ssh_event_enable(struct ssam_controller *ctrl,
|
|
+ struct ssam_event_registry reg,
|
|
+ struct ssam_event_id id, u8 flags)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = __ssam_ssh_event_request(ctrl, reg, reg.cid_enable, id, flags);
|
|
+
|
|
+ if (status < 0 && status != -EINVAL) {
|
|
+ ssam_err(ctrl,
|
|
+ "failed to enable event source (tc: %#04x, iid: %#04x, reg: %#04x)\n",
|
|
+ id.target_category, id.instance, reg.target_category);
|
|
+ }
|
|
+
|
|
+ if (status > 0) {
|
|
+ ssam_err(ctrl,
|
|
+ "unexpected result while enabling event source: %#04x (tc: %#04x, iid: %#04x, reg: %#04x)\n",
|
|
+ status, id.target_category, id.instance, reg.target_category);
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_ssh_event_disable() - Disable SSH event.
|
|
+ * @ctrl: The controller for which to disable the event.
|
|
+ * @reg: The event registry describing what request to use for enabling and
|
|
+ * disabling the event (must be same as used when enabling the event).
|
|
+ * @id: The event identifier.
|
|
+ * @flags: The event flags (likely ignored for disabling of events).
|
|
+ *
|
|
+ * Disables the specified event on the EC. This function does not manage
|
|
+ * reference counting of enabled events and is basically only a wrapper for
|
|
+ * the raw EC request. If the specified event is already disabled, the EC will
|
|
+ * ignore this request.
|
|
+ *
|
|
+ * Return: Returns the status of the executed SAM request (zero on success and
|
|
+ * negative on direct failure) or %-EPROTO if the request response indicates a
|
|
+ * failure.
|
|
+ */
|
|
+static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
|
|
+ struct ssam_event_registry reg,
|
|
+ struct ssam_event_id id, u8 flags)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = __ssam_ssh_event_request(ctrl, reg, reg.cid_disable, id, flags);
|
|
+
|
|
+ if (status < 0 && status != -EINVAL) {
|
|
+ ssam_err(ctrl,
|
|
+ "failed to disable event source (tc: %#04x, iid: %#04x, reg: %#04x)\n",
|
|
+ id.target_category, id.instance, reg.target_category);
|
|
+ }
|
|
+
|
|
+ if (status > 0) {
|
|
+ ssam_err(ctrl,
|
|
+ "unexpected result while disabling event source: %#04x (tc: %#04x, iid: %#04x, reg: %#04x)\n",
|
|
+ status, id.target_category, id.instance, reg.target_category);
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Wrappers for internal SAM requests. ----------------------------------- */
|
|
+
|
|
+/**
|
|
+ * ssam_get_firmware_version() - Get the SAM/EC firmware version.
|
|
+ * @ctrl: The controller.
|
|
+ * @version: Where to store the version number.
|
|
+ *
|
|
+ * Return: Returns zero on success or the status of the executed SAM request
|
|
+ * if that request failed.
|
|
+ */
|
|
+int ssam_get_firmware_version(struct ssam_controller *ctrl, u32 *version)
|
|
+{
|
|
+ __le32 __version;
|
|
+ int status;
|
|
+
|
|
+ status = ssam_retry(ssam_ssh_get_firmware_version, ctrl, &__version);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ *version = le32_to_cpu(__version);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_ctrl_notif_display_off() - Notify EC that the display has been turned
|
|
+ * off.
|
|
+ * @ctrl: The controller.
|
|
+ *
|
|
+ * Notify the EC that the display has been turned off and the driver may enter
|
|
+ * a lower-power state. This will prevent events from being sent directly.
|
|
+ * Rather, the EC signals an event by pulling the wakeup GPIO high for as long
|
|
+ * as there are pending events. The events then need to be manually released,
|
|
+ * one by one, via the GPIO callback request. All pending events accumulated
|
|
+ * during this state can also be released by issuing the display-on
|
|
+ * notification, e.g. via ssam_ctrl_notif_display_on(), which will also reset
|
|
+ * the GPIO.
|
|
+ *
|
|
+ * On some devices, specifically ones with an integrated keyboard, the keyboard
|
|
+ * backlight will be turned off by this call.
|
|
+ *
|
|
+ * This function will only send the display-off notification command if
|
|
+ * display notifications are supported by the EC. Currently all known devices
|
|
+ * support these notifications.
|
|
+ *
|
|
+ * Use ssam_ctrl_notif_display_on() to reverse the effects of this function.
|
|
+ *
|
|
+ * Return: Returns zero on success or if no request has been executed, the
|
|
+ * status of the executed SAM request if that request failed, or %-EPROTO if
|
|
+ * an unexpected response has been received.
|
|
+ */
|
|
+int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl)
|
|
+{
|
|
+ int status;
|
|
+ u8 response;
|
|
+
|
|
+ ssam_dbg(ctrl, "pm: notifying display off\n");
|
|
+
|
|
+ status = ssam_retry(ssam_ssh_notif_display_off, ctrl, &response);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ if (response != 0) {
|
|
+ ssam_err(ctrl, "unexpected response from display-off notification: %#04x\n",
|
|
+ response);
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_ctrl_notif_display_on() - Notify EC that the display has been turned on.
|
|
+ * @ctrl: The controller.
|
|
+ *
|
|
+ * Notify the EC that the display has been turned back on and the driver has
|
|
+ * exited its lower-power state. This notification is the counterpart to the
|
|
+ * display-off notification sent via ssam_ctrl_notif_display_off() and will
|
|
+ * reverse its effects, including resetting events to their default behavior.
|
|
+ *
|
|
+ * This function will only send the display-on notification command if display
|
|
+ * notifications are supported by the EC. Currently all known devices support
|
|
+ * these notifications.
|
|
+ *
|
|
+ * See ssam_ctrl_notif_display_off() for more details.
|
|
+ *
|
|
+ * Return: Returns zero on success or if no request has been executed, the
|
|
+ * status of the executed SAM request if that request failed, or %-EPROTO if
|
|
+ * an unexpected response has been received.
|
|
+ */
|
|
+int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl)
|
|
+{
|
|
+ int status;
|
|
+ u8 response;
|
|
+
|
|
+ ssam_dbg(ctrl, "pm: notifying display on\n");
|
|
+
|
|
+ status = ssam_retry(ssam_ssh_notif_display_on, ctrl, &response);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ if (response != 0) {
|
|
+ ssam_err(ctrl, "unexpected response from display-on notification: %#04x\n",
|
|
+ response);
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_ctrl_notif_d0_exit() - Notify EC that the driver/device exits the D0
|
|
+ * power state.
|
|
+ * @ctrl: The controller
|
|
+ *
|
|
+ * Notifies the EC that the driver prepares to exit the D0 power state in
|
|
+ * favor of a lower-power state. Exact effects of this function related to the
|
|
+ * EC are currently unknown.
|
|
+ *
|
|
+ * This function will only send the D0-exit notification command if D0-state
|
|
+ * notifications are supported by the EC. Only newer Surface generations
|
|
+ * support these notifications.
|
|
+ *
|
|
+ * Use ssam_ctrl_notif_d0_entry() to reverse the effects of this function.
|
|
+ *
|
|
+ * Return: Returns zero on success or if no request has been executed, the
|
|
+ * status of the executed SAM request if that request failed, or %-EPROTO if
|
|
+ * an unexpected response has been received.
|
|
+ */
|
|
+int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl)
|
|
+{
|
|
+ int status;
|
|
+ u8 response;
|
|
+
|
|
+ if (!ctrl->caps.d3_closes_handle)
|
|
+ return 0;
|
|
+
|
|
+ ssam_dbg(ctrl, "pm: notifying D0 exit\n");
|
|
+
|
|
+ status = ssam_retry(ssam_ssh_notif_d0_exit, ctrl, &response);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ if (response != 0) {
|
|
+ ssam_err(ctrl, "unexpected response from D0-exit notification: %#04x\n",
|
|
+ response);
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_ctrl_notif_d0_entry() - Notify EC that the driver/device enters the D0
|
|
+ * power state.
|
|
+ * @ctrl: The controller
|
|
+ *
|
|
+ * Notifies the EC that the driver has exited a lower-power state and entered
|
|
+ * the D0 power state. Exact effects of this function related to the EC are
|
|
+ * currently unknown.
|
|
+ *
|
|
+ * This function will only send the D0-entry notification command if D0-state
|
|
+ * notifications are supported by the EC. Only newer Surface generations
|
|
+ * support these notifications.
|
|
+ *
|
|
+ * See ssam_ctrl_notif_d0_exit() for more details.
|
|
+ *
|
|
+ * Return: Returns zero on success or if no request has been executed, the
|
|
+ * status of the executed SAM request if that request failed, or %-EPROTO if
|
|
+ * an unexpected response has been received.
|
|
+ */
|
|
+int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl)
|
|
+{
|
|
+ int status;
|
|
+ u8 response;
|
|
+
|
|
+ if (!ctrl->caps.d3_closes_handle)
|
|
+ return 0;
|
|
+
|
|
+ ssam_dbg(ctrl, "pm: notifying D0 entry\n");
|
|
+
|
|
+ status = ssam_retry(ssam_ssh_notif_d0_entry, ctrl, &response);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ if (response != 0) {
|
|
+ ssam_err(ctrl, "unexpected response from D0-entry notification: %#04x\n",
|
|
+ response);
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Top-level event registry interface. ----------------------------------- */
|
|
+
|
|
+/**
|
|
+ * ssam_nf_refcount_enable() - Enable event for reference count entry if it has
|
|
+ * not already been enabled.
|
|
+ * @ctrl: The controller to enable the event on.
|
|
+ * @entry: The reference count entry for the event to be enabled.
|
|
+ * @flags: The flags used for enabling the event on the EC.
|
|
+ *
|
|
+ * Enable the event associated with the given reference count entry if the
|
|
+ * reference count equals one, i.e. the event has not previously been enabled.
|
|
+ * If the event has already been enabled (i.e. reference count not equal to
|
|
+ * one), check that the flags used for enabling match and warn about this if
|
|
+ * they do not.
|
|
+ *
|
|
+ * This does not modify the reference count itself, which is done with
|
|
+ * ssam_nf_refcount_inc() / ssam_nf_refcount_dec().
|
|
+ *
|
|
+ * Note: ``nf->lock`` must be held when calling this function.
|
|
+ *
|
|
+ * Return: Returns zero on success. If the event is enabled by this call,
|
|
+ * returns the status of the event-enable EC command.
|
|
+ */
|
|
+static int ssam_nf_refcount_enable(struct ssam_controller *ctrl,
|
|
+ struct ssam_nf_refcount_entry *entry, u8 flags)
|
|
+{
|
|
+ const struct ssam_event_registry reg = entry->key.reg;
|
|
+ const struct ssam_event_id id = entry->key.id;
|
|
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
|
|
+ int status;
|
|
+
|
|
+ lockdep_assert_held(&nf->lock);
|
|
+
|
|
+ ssam_dbg(ctrl, "enabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
|
|
+ reg.target_category, id.target_category, id.instance, entry->refcount);
|
|
+
|
|
+ if (entry->refcount == 1) {
|
|
+ status = ssam_ssh_event_enable(ctrl, reg, id, flags);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ entry->flags = flags;
|
|
+
|
|
+ } else if (entry->flags != flags) {
|
|
+ ssam_warn(ctrl,
|
|
+ "inconsistent flags when enabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n",
|
|
+ flags, entry->flags, reg.target_category, id.target_category,
|
|
+ id.instance);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_nf_refcount_disable_free() - Disable event for reference count entry if it is
|
|
+ * no longer in use and free the corresponding entry.
|
|
+ * @ctrl: The controller to disable the event on.
|
|
+ * @entry: The reference count entry for the event to be disabled.
|
|
+ * @flags: The flags used for enabling the event on the EC.
|
|
+ *
|
|
+ * If the reference count equals zero, i.e. the event is no longer requested by
|
|
+ * any client, the event will be disabled and the corresponding reference count
|
|
+ * entry freed. The reference count entry must not be used any more after a
|
|
+ * call to this function.
|
|
+ *
|
|
+ * Also checks if the flags used for disabling the event match the flags used
|
|
+ * for enabling the event and warns if they do not (regardless of reference
|
|
+ * count).
|
|
+ *
|
|
+ * This does not modify the reference count itself, which is done with
|
|
+ * ssam_nf_refcount_inc() / ssam_nf_refcount_dec().
|
|
+ *
|
|
+ * Note: ``nf->lock`` must be held when calling this function.
|
|
+ *
|
|
+ * Return: Returns zero on success. If the event is disabled by this call,
|
|
+ * returns the status of the event-enable EC command.
|
|
+ */
|
|
+static int ssam_nf_refcount_disable_free(struct ssam_controller *ctrl,
|
|
+ struct ssam_nf_refcount_entry *entry, u8 flags)
|
|
+{
|
|
+ const struct ssam_event_registry reg = entry->key.reg;
|
|
+ const struct ssam_event_id id = entry->key.id;
|
|
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
|
|
+ int status = 0;
|
|
+
|
|
+ lockdep_assert_held(&nf->lock);
|
|
+
|
|
+ ssam_dbg(ctrl, "disabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
|
|
+ reg.target_category, id.target_category, id.instance, entry->refcount);
|
|
+
|
|
+ if (entry->flags != flags) {
|
|
+ ssam_warn(ctrl,
|
|
+ "inconsistent flags when disabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n",
|
|
+ flags, entry->flags, reg.target_category, id.target_category,
|
|
+ id.instance);
|
|
+ }
|
|
+
|
|
+ if (entry->refcount == 0) {
|
|
+ status = ssam_ssh_event_disable(ctrl, reg, id, flags);
|
|
+ kfree(entry);
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_notifier_register() - Register an event notifier.
|
|
+ * @ctrl: The controller to register the notifier on.
|
|
+ * @n: The event notifier to register.
|
|
+ *
|
|
+ * Register an event notifier. Increment the usage counter of the associated
|
|
+ * SAM event if the notifier is not marked as an observer. If the event is not
|
|
+ * marked as an observer and is currently not enabled, it will be enabled
|
|
+ * during this call. If the notifier is marked as an observer, no attempt will
|
|
+ * be made at enabling any event and no reference count will be modified.
|
|
+ *
|
|
+ * Notifiers marked as observers do not need to be associated with one specific
|
|
+ * event, i.e. as long as no event matching is performed, only the event target
|
|
+ * category needs to be set.
|
|
+ *
|
|
+ * Return: Returns zero on success, %-ENOSPC if there have already been
|
|
+ * %INT_MAX notifiers for the event ID/type associated with the notifier block
|
|
+ * registered, %-ENOMEM if the corresponding event entry could not be
|
|
+ * allocated. If this is the first time that a notifier block is registered
|
|
+ * for the specific associated event, returns the status of the event-enable
|
|
+ * EC-command.
|
|
+ */
|
|
+int ssam_notifier_register(struct ssam_controller *ctrl, struct ssam_event_notifier *n)
|
|
+{
|
|
+ u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
|
|
+ struct ssam_nf_refcount_entry *entry = NULL;
|
|
+ struct ssam_nf_head *nf_head;
|
|
+ struct ssam_nf *nf;
|
|
+ int status;
|
|
+
|
|
+ if (!ssh_rqid_is_event(rqid))
|
|
+ return -EINVAL;
|
|
+
|
|
+ nf = &ctrl->cplt.event.notif;
|
|
+ nf_head = &nf->head[ssh_rqid_to_event(rqid)];
|
|
+
|
|
+ mutex_lock(&nf->lock);
|
|
+
|
|
+ if (!(n->flags & SSAM_EVENT_NOTIFIER_OBSERVER)) {
|
|
+ entry = ssam_nf_refcount_inc(nf, n->event.reg, n->event.id);
|
|
+ if (IS_ERR(entry)) {
|
|
+ mutex_unlock(&nf->lock);
|
|
+ return PTR_ERR(entry);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ status = ssam_nfblk_insert(nf_head, &n->base);
|
|
+ if (status) {
|
|
+ if (entry)
|
|
+ ssam_nf_refcount_dec_free(nf, n->event.reg, n->event.id);
|
|
+
|
|
+ mutex_unlock(&nf->lock);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ if (entry) {
|
|
+ status = ssam_nf_refcount_enable(ctrl, entry, n->event.flags);
|
|
+ if (status) {
|
|
+ ssam_nfblk_remove(&n->base);
|
|
+ ssam_nf_refcount_dec_free(nf, n->event.reg, n->event.id);
|
|
+ mutex_unlock(&nf->lock);
|
|
+ synchronize_srcu(&nf_head->srcu);
|
|
+ return status;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&nf->lock);
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_notifier_register);
|
|
+
|
|
+/**
|
|
+ * ssam_notifier_unregister() - Unregister an event notifier.
|
|
+ * @ctrl: The controller the notifier has been registered on.
|
|
+ * @n: The event notifier to unregister.
|
|
+ *
|
|
+ * Unregister an event notifier. Decrement the usage counter of the associated
|
|
+ * SAM event if the notifier is not marked as an observer. If the usage counter
|
|
+ * reaches zero, the event will be disabled.
|
|
+ *
|
|
+ * Return: Returns zero on success, %-ENOENT if the given notifier block has
|
|
+ * not been registered on the controller. If the given notifier block was the
|
|
+ * last one associated with its specific event, returns the status of the
|
|
+ * event-disable EC-command.
|
|
+ */
|
|
+int ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_notifier *n)
|
|
+{
|
|
+ u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
|
|
+ struct ssam_nf_refcount_entry *entry;
|
|
+ struct ssam_nf_head *nf_head;
|
|
+ struct ssam_nf *nf;
|
|
+ int status = 0;
|
|
+
|
|
+ if (!ssh_rqid_is_event(rqid))
|
|
+ return -EINVAL;
|
|
+
|
|
+ nf = &ctrl->cplt.event.notif;
|
|
+ nf_head = &nf->head[ssh_rqid_to_event(rqid)];
|
|
+
|
|
+ mutex_lock(&nf->lock);
|
|
+
|
|
+ if (!ssam_nfblk_find(nf_head, &n->base)) {
|
|
+ mutex_unlock(&nf->lock);
|
|
+ return -ENOENT;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If this is an observer notifier, do not attempt to disable the
|
|
+ * event, just remove it.
|
|
+ */
|
|
+ if (!(n->flags & SSAM_EVENT_NOTIFIER_OBSERVER)) {
|
|
+ entry = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
|
|
+ if (WARN_ON(!entry)) {
|
|
+ /*
|
|
+ * If this does not return an entry, there's a logic
|
|
+ * error somewhere: The notifier block is registered,
|
|
+ * but the event refcount entry is not there. Remove
|
|
+ * the notifier block anyways.
|
|
+ */
|
|
+ status = -ENOENT;
|
|
+ goto remove;
|
|
+ }
|
|
+
|
|
+ status = ssam_nf_refcount_disable_free(ctrl, entry, n->event.flags);
|
|
+ }
|
|
+
|
|
+remove:
|
|
+ ssam_nfblk_remove(&n->base);
|
|
+ mutex_unlock(&nf->lock);
|
|
+ synchronize_srcu(&nf_head->srcu);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_notifier_unregister);
|
|
+
|
|
+/**
|
|
+ * ssam_controller_event_enable() - Enable the specified event.
|
|
+ * @ctrl: The controller to enable the event for.
|
|
+ * @reg: The event registry to use for enabling the event.
|
|
+ * @id: The event ID specifying the event to be enabled.
|
|
+ * @flags: The SAM event flags used for enabling the event.
|
|
+ *
|
|
+ * Increment the event reference count of the specified event. If the event has
|
|
+ * not been enabled previously, it will be enabled by this call.
|
|
+ *
|
|
+ * Note: In general, ssam_notifier_register() with a non-observer notifier
|
|
+ * should be preferred for enabling/disabling events, as this will guarantee
|
|
+ * proper ordering and event forwarding in case of errors during event
|
|
+ * enabling/disabling.
|
|
+ *
|
|
+ * Return: Returns zero on success, %-ENOSPC if the reference count for the
|
|
+ * specified event has reached its maximum, %-ENOMEM if the corresponding event
|
|
+ * entry could not be allocated. If this is the first time that this event has
|
|
+ * been enabled (i.e. the reference count was incremented from zero to one by
|
|
+ * this call), returns the status of the event-enable EC-command.
|
|
+ */
|
|
+int ssam_controller_event_enable(struct ssam_controller *ctrl,
|
|
+ struct ssam_event_registry reg,
|
|
+ struct ssam_event_id id, u8 flags)
|
|
+{
|
|
+ u16 rqid = ssh_tc_to_rqid(id.target_category);
|
|
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
|
|
+ struct ssam_nf_refcount_entry *entry;
|
|
+ int status;
|
|
+
|
|
+ if (!ssh_rqid_is_event(rqid))
|
|
+ return -EINVAL;
|
|
+
|
|
+ mutex_lock(&nf->lock);
|
|
+
|
|
+ entry = ssam_nf_refcount_inc(nf, reg, id);
|
|
+ if (IS_ERR(entry)) {
|
|
+ mutex_unlock(&nf->lock);
|
|
+ return PTR_ERR(entry);
|
|
+ }
|
|
+
|
|
+ status = ssam_nf_refcount_enable(ctrl, entry, flags);
|
|
+ if (status) {
|
|
+ ssam_nf_refcount_dec_free(nf, reg, id);
|
|
+ mutex_unlock(&nf->lock);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&nf->lock);
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_controller_event_enable);
|
|
+
|
|
+/**
|
|
+ * ssam_controller_event_disable() - Disable the specified event.
|
|
+ * @ctrl: The controller to disable the event for.
|
|
+ * @reg: The event registry to use for disabling the event.
|
|
+ * @id: The event ID specifying the event to be disabled.
|
|
+ * @flags: The flags used when enabling the event.
|
|
+ *
|
|
+ * Decrement the reference count of the specified event. If the reference count
|
|
+ * reaches zero, the event will be disabled.
|
|
+ *
|
|
+ * Note: In general, ssam_notifier_register()/ssam_notifier_unregister() with a
|
|
+ * non-observer notifier should be preferred for enabling/disabling events, as
|
|
+ * this will guarantee proper ordering and event forwarding in case of errors
|
|
+ * during event enabling/disabling.
|
|
+ *
|
|
+ * Return: Returns zero on success, %-ENOENT if the given event has not been
|
|
+ * enabled on the controller. If the reference count of the event reaches zero
|
|
+ * during this call, returns the status of the event-disable EC-command.
|
|
+ */
|
|
+int ssam_controller_event_disable(struct ssam_controller *ctrl,
|
|
+ struct ssam_event_registry reg,
|
|
+ struct ssam_event_id id, u8 flags)
|
|
+{
|
|
+ u16 rqid = ssh_tc_to_rqid(id.target_category);
|
|
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
|
|
+ struct ssam_nf_refcount_entry *entry;
|
|
+ int status;
|
|
+
|
|
+ if (!ssh_rqid_is_event(rqid))
|
|
+ return -EINVAL;
|
|
+
|
|
+ mutex_lock(&nf->lock);
|
|
+
|
|
+ entry = ssam_nf_refcount_dec(nf, reg, id);
|
|
+ if (!entry) {
|
|
+ mutex_unlock(&nf->lock);
|
|
+ return -ENOENT;
|
|
+ }
|
|
+
|
|
+ status = ssam_nf_refcount_disable_free(ctrl, entry, flags);
|
|
+
|
|
+ mutex_unlock(&nf->lock);
|
|
+ return status;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_controller_event_disable);
|
|
+
|
|
+/**
|
|
+ * ssam_notifier_disable_registered() - Disable events for all registered
|
|
+ * notifiers.
|
|
+ * @ctrl: The controller for which to disable the notifiers/events.
|
|
+ *
|
|
+ * Disables events for all currently registered notifiers. In case of an error
|
|
+ * (EC command failing), all previously disabled events will be restored and
|
|
+ * the error code returned.
|
|
+ *
|
|
+ * This function is intended to disable all events prior to hibernation entry.
|
|
+ * See ssam_notifier_restore_registered() to restore/re-enable all events
|
|
+ * disabled with this function.
|
|
+ *
|
|
+ * Note that this function will not disable events for notifiers registered
|
|
+ * after calling this function. It should thus be made sure that no new
|
|
+ * notifiers are going to be added after this call and before the corresponding
|
|
+ * call to ssam_notifier_restore_registered().
|
|
+ *
|
|
+ * Return: Returns zero on success. In case of failure returns the error code
|
|
+ * returned by the failed EC command to disable an event.
|
|
+ */
|
|
+int ssam_notifier_disable_registered(struct ssam_controller *ctrl)
|
|
+{
|
|
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
|
|
+ struct rb_node *n;
|
|
+ int status;
|
|
+
|
|
+ mutex_lock(&nf->lock);
|
|
+ for (n = rb_first(&nf->refcount); n; n = rb_next(n)) {
|
|
+ struct ssam_nf_refcount_entry *e;
|
|
+
|
|
+ e = rb_entry(n, struct ssam_nf_refcount_entry, node);
|
|
+ status = ssam_ssh_event_disable(ctrl, e->key.reg,
|
|
+ e->key.id, e->flags);
|
|
+ if (status)
|
|
+ goto err;
|
|
+ }
|
|
+ mutex_unlock(&nf->lock);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err:
|
|
+ for (n = rb_prev(n); n; n = rb_prev(n)) {
|
|
+ struct ssam_nf_refcount_entry *e;
|
|
+
|
|
+ e = rb_entry(n, struct ssam_nf_refcount_entry, node);
|
|
+ ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags);
|
|
+ }
|
|
+ mutex_unlock(&nf->lock);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_notifier_restore_registered() - Restore/re-enable events for all
|
|
+ * registered notifiers.
|
|
+ * @ctrl: The controller for which to restore the notifiers/events.
|
|
+ *
|
|
+ * Restores/re-enables all events for which notifiers have been registered on
|
|
+ * the given controller. In case of a failure, the error is logged and the
|
|
+ * function continues to try and enable the remaining events.
|
|
+ *
|
|
+ * This function is intended to restore/re-enable all registered events after
|
|
+ * hibernation. See ssam_notifier_disable_registered() for the counter part
|
|
+ * disabling the events and more details.
|
|
+ */
|
|
+void ssam_notifier_restore_registered(struct ssam_controller *ctrl)
|
|
+{
|
|
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
|
|
+ struct rb_node *n;
|
|
+
|
|
+ mutex_lock(&nf->lock);
|
|
+ for (n = rb_first(&nf->refcount); n; n = rb_next(n)) {
|
|
+ struct ssam_nf_refcount_entry *e;
|
|
+
|
|
+ e = rb_entry(n, struct ssam_nf_refcount_entry, node);
|
|
+
|
|
+ /* Ignore errors, will get logged in call. */
|
|
+ ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags);
|
|
+ }
|
|
+ mutex_unlock(&nf->lock);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_notifier_is_empty() - Check if there are any registered notifiers.
|
|
+ * @ctrl: The controller to check on.
|
|
+ *
|
|
+ * Return: Returns %true if there are currently no notifiers registered on the
|
|
+ * controller, %false otherwise.
|
|
+ */
|
|
+static bool ssam_notifier_is_empty(struct ssam_controller *ctrl)
|
|
+{
|
|
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
|
|
+ bool result;
|
|
+
|
|
+ mutex_lock(&nf->lock);
|
|
+ result = ssam_nf_refcount_empty(nf);
|
|
+ mutex_unlock(&nf->lock);
|
|
+
|
|
+ return result;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_notifier_unregister_all() - Unregister all currently registered
|
|
+ * notifiers.
|
|
+ * @ctrl: The controller to unregister the notifiers on.
|
|
+ *
|
|
+ * Unregisters all currently registered notifiers. This function is used to
|
|
+ * ensure that all notifiers will be unregistered and associated
|
|
+ * entries/resources freed when the controller is being shut down.
|
|
+ */
|
|
+static void ssam_notifier_unregister_all(struct ssam_controller *ctrl)
|
|
+{
|
|
+ struct ssam_nf *nf = &ctrl->cplt.event.notif;
|
|
+ struct ssam_nf_refcount_entry *e, *n;
|
|
+
|
|
+ mutex_lock(&nf->lock);
|
|
+ rbtree_postorder_for_each_entry_safe(e, n, &nf->refcount, node) {
|
|
+ /* Ignore errors, will get logged in call. */
|
|
+ ssam_ssh_event_disable(ctrl, e->key.reg, e->key.id, e->flags);
|
|
+ kfree(e);
|
|
+ }
|
|
+ nf->refcount = RB_ROOT;
|
|
+ mutex_unlock(&nf->lock);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Wakeup IRQ. ----------------------------------------------------------- */
|
|
+
|
|
+static irqreturn_t ssam_irq_handle(int irq, void *dev_id)
|
|
+{
|
|
+ struct ssam_controller *ctrl = dev_id;
|
|
+
|
|
+ ssam_dbg(ctrl, "pm: wake irq triggered\n");
|
|
+
|
|
+ /*
|
|
+ * Note: Proper wakeup detection is currently unimplemented.
|
|
+ * When the EC is in display-off or any other non-D0 state, it
|
|
+ * does not send events/notifications to the host. Instead it
|
|
+ * signals that there are events available via the wakeup IRQ.
|
|
+ * This driver is responsible for calling back to the EC to
|
|
+ * release these events one-by-one.
|
|
+ *
|
|
+ * This IRQ should not cause a full system resume by its own.
|
|
+ * Instead, events should be handled by their respective subsystem
|
|
+ * drivers, which in turn should signal whether a full system
|
|
+ * resume should be performed.
|
|
+ *
|
|
+ * TODO: Send GPIO callback command repeatedly to EC until callback
|
|
+ * returns 0x00. Return flag of callback is "has more events".
|
|
+ * Each time the command is sent, one event is "released". Once
|
|
+ * all events have been released (return = 0x00), the GPIO is
|
|
+ * re-armed. Detect wakeup events during this process, go back to
|
|
+ * sleep if no wakeup event has been received.
|
|
+ */
|
|
+
|
|
+ return IRQ_HANDLED;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_irq_setup() - Set up SAM EC wakeup-GPIO interrupt.
|
|
+ * @ctrl: The controller for which the IRQ should be set up.
|
|
+ *
|
|
+ * Set up an IRQ for the wakeup-GPIO pin of the SAM EC. This IRQ can be used
|
|
+ * to wake the device from a low power state.
|
|
+ *
|
|
+ * Note that this IRQ can only be triggered while the EC is in the display-off
|
|
+ * state. In this state, events are not sent to the host in the usual way.
|
|
+ * Instead the wakeup-GPIO gets pulled to "high" as long as there are pending
|
|
+ * events and these events need to be released one-by-one via the GPIO
|
|
+ * callback request, either until there are no events left and the GPIO is
|
|
+ * reset, or all at once by transitioning the EC out of the display-off state,
|
|
+ * which will also clear the GPIO.
|
|
+ *
|
|
+ * Not all events, however, should trigger a full system wakeup. Instead the
|
|
+ * driver should, if necessary, inspect and forward each event to the
|
|
+ * corresponding subsystem, which in turn should decide if the system needs to
|
|
+ * be woken up. This logic has not been implemented yet, thus wakeup by this
|
|
+ * IRQ should be disabled by default to avoid spurious wake-ups, caused, for
|
|
+ * example, by the remaining battery percentage changing. Refer to comments in
|
|
+ * this function and comments in the corresponding IRQ handler for more
|
|
+ * details on how this should be implemented.
|
|
+ *
|
|
+ * See also ssam_ctrl_notif_display_off() and ssam_ctrl_notif_display_off()
|
|
+ * for functions to transition the EC into and out of the display-off state as
|
|
+ * well as more details on it.
|
|
+ *
|
|
+ * The IRQ is disabled by default and has to be enabled before it can wake up
|
|
+ * the device from suspend via ssam_irq_arm_for_wakeup(). On teardown, the IRQ
|
|
+ * should be freed via ssam_irq_free().
|
|
+ */
|
|
+int ssam_irq_setup(struct ssam_controller *ctrl)
|
|
+{
|
|
+ struct device *dev = ssam_controller_device(ctrl);
|
|
+ struct gpio_desc *gpiod;
|
|
+ int irq;
|
|
+ int status;
|
|
+
|
|
+ /*
|
|
+ * The actual GPIO interrupt is declared in ACPI as TRIGGER_HIGH.
|
|
+ * However, the GPIO line only gets reset by sending the GPIO callback
|
|
+ * command to SAM (or alternatively the display-on notification). As
|
|
+ * proper handling for this interrupt is not implemented yet, leaving
|
|
+ * the IRQ at TRIGGER_HIGH would cause an IRQ storm (as the callback
|
|
+ * never gets sent and thus the line never gets reset). To avoid this,
|
|
+ * mark the IRQ as TRIGGER_RISING for now, only creating a single
|
|
+ * interrupt, and let the SAM resume callback during the controller
|
|
+ * resume process clear it.
|
|
+ */
|
|
+ const int irqf = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING;
|
|
+
|
|
+ gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
|
|
+ if (IS_ERR(gpiod))
|
|
+ return PTR_ERR(gpiod);
|
|
+
|
|
+ irq = gpiod_to_irq(gpiod);
|
|
+ gpiod_put(gpiod);
|
|
+
|
|
+ if (irq < 0)
|
|
+ return irq;
|
|
+
|
|
+ status = request_threaded_irq(irq, NULL, ssam_irq_handle, irqf,
|
|
+ "ssam_wakeup", ctrl);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ ctrl->irq.num = irq;
|
|
+ disable_irq(ctrl->irq.num);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_irq_free() - Free SAM EC wakeup-GPIO interrupt.
|
|
+ * @ctrl: The controller for which the IRQ should be freed.
|
|
+ *
|
|
+ * Free the wakeup-GPIO IRQ previously set-up via ssam_irq_setup().
|
|
+ */
|
|
+void ssam_irq_free(struct ssam_controller *ctrl)
|
|
+{
|
|
+ free_irq(ctrl->irq.num, ctrl);
|
|
+ ctrl->irq.num = -1;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_irq_arm_for_wakeup() - Arm the EC IRQ for wakeup, if enabled.
|
|
+ * @ctrl: The controller for which the IRQ should be armed.
|
|
+ *
|
|
+ * Sets up the IRQ so that it can be used to wake the device. Specifically,
|
|
+ * this function enables the irq and then, if the device is allowed to wake up
|
|
+ * the system, calls enable_irq_wake(). See ssam_irq_disarm_wakeup() for the
|
|
+ * corresponding function to disable the IRQ.
|
|
+ *
|
|
+ * This function is intended to arm the IRQ before entering S2idle suspend.
|
|
+ *
|
|
+ * Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must
|
|
+ * be balanced.
|
|
+ */
|
|
+int ssam_irq_arm_for_wakeup(struct ssam_controller *ctrl)
|
|
+{
|
|
+ struct device *dev = ssam_controller_device(ctrl);
|
|
+ int status;
|
|
+
|
|
+ enable_irq(ctrl->irq.num);
|
|
+ if (device_may_wakeup(dev)) {
|
|
+ status = enable_irq_wake(ctrl->irq.num);
|
|
+ if (status) {
|
|
+ ssam_err(ctrl, "failed to enable wake IRQ: %d\n", status);
|
|
+ disable_irq(ctrl->irq.num);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ ctrl->irq.wakeup_enabled = true;
|
|
+ } else {
|
|
+ ctrl->irq.wakeup_enabled = false;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_irq_disarm_wakeup() - Disarm the wakeup IRQ.
|
|
+ * @ctrl: The controller for which the IRQ should be disarmed.
|
|
+ *
|
|
+ * Disarm the IRQ previously set up for wake via ssam_irq_arm_for_wakeup().
|
|
+ *
|
|
+ * This function is intended to disarm the IRQ after exiting S2idle suspend.
|
|
+ *
|
|
+ * Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must
|
|
+ * be balanced.
|
|
+ */
|
|
+void ssam_irq_disarm_wakeup(struct ssam_controller *ctrl)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ if (ctrl->irq.wakeup_enabled) {
|
|
+ status = disable_irq_wake(ctrl->irq.num);
|
|
+ if (status)
|
|
+ ssam_err(ctrl, "failed to disable wake IRQ: %d\n", status);
|
|
+
|
|
+ ctrl->irq.wakeup_enabled = false;
|
|
+ }
|
|
+ disable_irq(ctrl->irq.num);
|
|
+}
|
|
diff --git a/drivers/platform/x86/surface_aggregator/controller.h b/drivers/platform/x86/surface_aggregator/controller.h
|
|
new file mode 100644
|
|
index 000000000000..a0963c3562ff
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator/controller.h
|
|
@@ -0,0 +1,285 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Main SSAM/SSH controller structure and functionality.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#ifndef _SURFACE_AGGREGATOR_CONTROLLER_H
|
|
+#define _SURFACE_AGGREGATOR_CONTROLLER_H
|
|
+
|
|
+#include <linux/kref.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/rbtree.h>
|
|
+#include <linux/rwsem.h>
|
|
+#include <linux/serdev.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/srcu.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/workqueue.h>
|
|
+
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+#include <linux/surface_aggregator/serial_hub.h>
|
|
+
|
|
+#include "ssh_request_layer.h"
|
|
+
|
|
+
|
|
+/* -- Safe counters. -------------------------------------------------------- */
|
|
+
|
|
+/**
|
|
+ * struct ssh_seq_counter - Safe counter for SSH sequence IDs.
|
|
+ * @value: The current counter value.
|
|
+ */
|
|
+struct ssh_seq_counter {
|
|
+ u8 value;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssh_rqid_counter - Safe counter for SSH request IDs.
|
|
+ * @value: The current counter value.
|
|
+ */
|
|
+struct ssh_rqid_counter {
|
|
+ u16 value;
|
|
+};
|
|
+
|
|
+
|
|
+/* -- Event/notification system. -------------------------------------------- */
|
|
+
|
|
+/**
|
|
+ * struct ssam_nf_head - Notifier head for SSAM events.
|
|
+ * @srcu: The SRCU struct for synchronization.
|
|
+ * @head: List-head for notifier blocks registered under this head.
|
|
+ */
|
|
+struct ssam_nf_head {
|
|
+ struct srcu_struct srcu;
|
|
+ struct list_head head;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_nf - Notifier callback- and activation-registry for SSAM events.
|
|
+ * @lock: Lock guarding (de-)registration of notifier blocks. Note: This
|
|
+ * lock does not need to be held for notifier calls, only
|
|
+ * registration and deregistration.
|
|
+ * @refcount: The root of the RB-tree used for reference-counting enabled
|
|
+ * events/notifications.
|
|
+ * @head: The list of notifier heads for event/notification callbacks.
|
|
+ */
|
|
+struct ssam_nf {
|
|
+ struct mutex lock;
|
|
+ struct rb_root refcount;
|
|
+ struct ssam_nf_head head[SSH_NUM_EVENTS];
|
|
+};
|
|
+
|
|
+
|
|
+/* -- Event/async request completion system. -------------------------------- */
|
|
+
|
|
+struct ssam_cplt;
|
|
+
|
|
+/**
|
|
+ * struct ssam_event_item - Struct for event queuing and completion.
|
|
+ * @node: The node in the queue.
|
|
+ * @rqid: The request ID of the event.
|
|
+ * @ops: Instance specific functions.
|
|
+ * @ops.free: Callback for freeing this event item.
|
|
+ * @event: Actual event data.
|
|
+ */
|
|
+struct ssam_event_item {
|
|
+ struct list_head node;
|
|
+ u16 rqid;
|
|
+
|
|
+ struct {
|
|
+ void (*free)(struct ssam_event_item *event);
|
|
+ } ops;
|
|
+
|
|
+ struct ssam_event event; /* must be last */
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_event_queue - Queue for completing received events.
|
|
+ * @cplt: Reference to the completion system on which this queue is active.
|
|
+ * @lock: The lock for any operation on the queue.
|
|
+ * @head: The list-head of the queue.
|
|
+ * @work: The &struct work_struct performing completion work for this queue.
|
|
+ */
|
|
+struct ssam_event_queue {
|
|
+ struct ssam_cplt *cplt;
|
|
+
|
|
+ spinlock_t lock;
|
|
+ struct list_head head;
|
|
+ struct work_struct work;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_event_target - Set of queues for a single SSH target ID.
|
|
+ * @queue: The array of queues, one queue per event ID.
|
|
+ */
|
|
+struct ssam_event_target {
|
|
+ struct ssam_event_queue queue[SSH_NUM_EVENTS];
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_cplt - SSAM event/async request completion system.
|
|
+ * @dev: The device with which this system is associated. Only used
|
|
+ * for logging.
|
|
+ * @wq: The &struct workqueue_struct on which all completion work
|
|
+ * items are queued.
|
|
+ * @event: Event completion management.
|
|
+ * @event.target: Array of &struct ssam_event_target, one for each target.
|
|
+ * @event.notif: Notifier callbacks and event activation reference counting.
|
|
+ */
|
|
+struct ssam_cplt {
|
|
+ struct device *dev;
|
|
+ struct workqueue_struct *wq;
|
|
+
|
|
+ struct {
|
|
+ struct ssam_event_target target[SSH_NUM_TARGETS];
|
|
+ struct ssam_nf notif;
|
|
+ } event;
|
|
+};
|
|
+
|
|
+
|
|
+/* -- Main SSAM device structures. ------------------------------------------ */
|
|
+
|
|
+/**
|
|
+ * enum ssam_controller_state - State values for &struct ssam_controller.
|
|
+ * @SSAM_CONTROLLER_UNINITIALIZED:
|
|
+ * The controller has not been initialized yet or has been deinitialized.
|
|
+ * @SSAM_CONTROLLER_INITIALIZED:
|
|
+ * The controller is initialized, but has not been started yet.
|
|
+ * @SSAM_CONTROLLER_STARTED:
|
|
+ * The controller has been started and is ready to use.
|
|
+ * @SSAM_CONTROLLER_STOPPED:
|
|
+ * The controller has been stopped.
|
|
+ * @SSAM_CONTROLLER_SUSPENDED:
|
|
+ * The controller has been suspended.
|
|
+ */
|
|
+enum ssam_controller_state {
|
|
+ SSAM_CONTROLLER_UNINITIALIZED,
|
|
+ SSAM_CONTROLLER_INITIALIZED,
|
|
+ SSAM_CONTROLLER_STARTED,
|
|
+ SSAM_CONTROLLER_STOPPED,
|
|
+ SSAM_CONTROLLER_SUSPENDED,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_controller_caps - Controller device capabilities.
|
|
+ * @ssh_power_profile: SSH power profile.
|
|
+ * @ssh_buffer_size: SSH driver UART buffer size.
|
|
+ * @screen_on_sleep_idle_timeout: SAM UART screen-on sleep idle timeout.
|
|
+ * @screen_off_sleep_idle_timeout: SAM UART screen-off sleep idle timeout.
|
|
+ * @d3_closes_handle: SAM closes UART handle in D3.
|
|
+ *
|
|
+ * Controller and SSH device capabilities found in ACPI.
|
|
+ */
|
|
+struct ssam_controller_caps {
|
|
+ u32 ssh_power_profile;
|
|
+ u32 ssh_buffer_size;
|
|
+ u32 screen_on_sleep_idle_timeout;
|
|
+ u32 screen_off_sleep_idle_timeout;
|
|
+ u32 d3_closes_handle:1;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_controller - SSAM controller device.
|
|
+ * @kref: Reference count of the controller.
|
|
+ * @lock: Main lock for the controller, used to guard state changes.
|
|
+ * @state: Controller state.
|
|
+ * @rtl: Request transport layer for SSH I/O.
|
|
+ * @cplt: Completion system for SSH/SSAM events and asynchronous requests.
|
|
+ * @counter: Safe SSH message ID counters.
|
|
+ * @counter.seq: Sequence ID counter.
|
|
+ * @counter.rqid: Request ID counter.
|
|
+ * @irq: Wakeup IRQ resources.
|
|
+ * @irq.num: The wakeup IRQ number.
|
|
+ * @irq.wakeup_enabled: Whether wakeup by IRQ is enabled during suspend.
|
|
+ * @caps: The controller device capabilities.
|
|
+ */
|
|
+struct ssam_controller {
|
|
+ struct kref kref;
|
|
+
|
|
+ struct rw_semaphore lock;
|
|
+ enum ssam_controller_state state;
|
|
+
|
|
+ struct ssh_rtl rtl;
|
|
+ struct ssam_cplt cplt;
|
|
+
|
|
+ struct {
|
|
+ struct ssh_seq_counter seq;
|
|
+ struct ssh_rqid_counter rqid;
|
|
+ } counter;
|
|
+
|
|
+ struct {
|
|
+ int num;
|
|
+ bool wakeup_enabled;
|
|
+ } irq;
|
|
+
|
|
+ struct ssam_controller_caps caps;
|
|
+};
|
|
+
|
|
+#define to_ssam_controller(ptr, member) \
|
|
+ container_of(ptr, struct ssam_controller, member)
|
|
+
|
|
+#define ssam_dbg(ctrl, fmt, ...) rtl_dbg(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
|
|
+#define ssam_info(ctrl, fmt, ...) rtl_info(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
|
|
+#define ssam_warn(ctrl, fmt, ...) rtl_warn(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
|
|
+#define ssam_err(ctrl, fmt, ...) rtl_err(&(ctrl)->rtl, fmt, ##__VA_ARGS__)
|
|
+
|
|
+/**
|
|
+ * ssam_controller_receive_buf() - Provide input-data to the controller.
|
|
+ * @ctrl: The controller.
|
|
+ * @buf: The input buffer.
|
|
+ * @n: The number of bytes in the input buffer.
|
|
+ *
|
|
+ * Provide input data to be evaluated by the controller, which has been
|
|
+ * received via the lower-level transport.
|
|
+ *
|
|
+ * Return: Returns the number of bytes consumed, or, if the packet transport
|
|
+ * layer of the controller has been shut down, %-ESHUTDOWN.
|
|
+ */
|
|
+static inline
|
|
+int ssam_controller_receive_buf(struct ssam_controller *ctrl,
|
|
+ const unsigned char *buf, size_t n)
|
|
+{
|
|
+ return ssh_ptl_rx_rcvbuf(&ctrl->rtl.ptl, buf, n);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_controller_write_wakeup() - Notify the controller that the underlying
|
|
+ * device has space available for data to be written.
|
|
+ * @ctrl: The controller.
|
|
+ */
|
|
+static inline void ssam_controller_write_wakeup(struct ssam_controller *ctrl)
|
|
+{
|
|
+ ssh_ptl_tx_wakeup_transfer(&ctrl->rtl.ptl);
|
|
+}
|
|
+
|
|
+int ssam_controller_init(struct ssam_controller *ctrl, struct serdev_device *s);
|
|
+int ssam_controller_start(struct ssam_controller *ctrl);
|
|
+void ssam_controller_shutdown(struct ssam_controller *ctrl);
|
|
+void ssam_controller_destroy(struct ssam_controller *ctrl);
|
|
+
|
|
+int ssam_notifier_disable_registered(struct ssam_controller *ctrl);
|
|
+void ssam_notifier_restore_registered(struct ssam_controller *ctrl);
|
|
+
|
|
+int ssam_irq_setup(struct ssam_controller *ctrl);
|
|
+void ssam_irq_free(struct ssam_controller *ctrl);
|
|
+int ssam_irq_arm_for_wakeup(struct ssam_controller *ctrl);
|
|
+void ssam_irq_disarm_wakeup(struct ssam_controller *ctrl);
|
|
+
|
|
+void ssam_controller_lock(struct ssam_controller *c);
|
|
+void ssam_controller_unlock(struct ssam_controller *c);
|
|
+
|
|
+int ssam_get_firmware_version(struct ssam_controller *ctrl, u32 *version);
|
|
+int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl);
|
|
+int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl);
|
|
+int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl);
|
|
+int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl);
|
|
+
|
|
+int ssam_controller_suspend(struct ssam_controller *ctrl);
|
|
+int ssam_controller_resume(struct ssam_controller *ctrl);
|
|
+
|
|
+int ssam_event_item_cache_init(void);
|
|
+void ssam_event_item_cache_destroy(void);
|
|
+
|
|
+#endif /* _SURFACE_AGGREGATOR_CONTROLLER_H */
|
|
diff --git a/drivers/platform/x86/surface_aggregator/core.c b/drivers/platform/x86/surface_aggregator/core.c
|
|
new file mode 100644
|
|
index 000000000000..698be0e24e9e
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator/core.c
|
|
@@ -0,0 +1,850 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Surface Serial Hub (SSH) driver for communication with the Surface/System
|
|
+ * Aggregator Module (SSAM/SAM).
|
|
+ *
|
|
+ * Provides access to a SAM-over-SSH connected EC via a controller device.
|
|
+ * Handles communication via requests as well as enabling, disabling, and
|
|
+ * relaying of events.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <linux/acpi.h>
|
|
+#include <linux/atomic.h>
|
|
+#include <linux/completion.h>
|
|
+#include <linux/gpio/consumer.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/kref.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/pm.h>
|
|
+#include <linux/serdev.h>
|
|
+#include <linux/sysfs.h>
|
|
+
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+
|
|
+#include "bus.h"
|
|
+#include "controller.h"
|
|
+
|
|
+#define CREATE_TRACE_POINTS
|
|
+#include "trace.h"
|
|
+
|
|
+
|
|
+/* -- Static controller reference. ------------------------------------------ */
|
|
+
|
|
+/*
|
|
+ * Main controller reference. The corresponding lock must be held while
|
|
+ * accessing (reading/writing) the reference.
|
|
+ */
|
|
+static struct ssam_controller *__ssam_controller;
|
|
+static DEFINE_SPINLOCK(__ssam_controller_lock);
|
|
+
|
|
+/**
|
|
+ * ssam_get_controller() - Get reference to SSAM controller.
|
|
+ *
|
|
+ * Returns a reference to the SSAM controller of the system or %NULL if there
|
|
+ * is none, it hasn't been set up yet, or it has already been unregistered.
|
|
+ * This function automatically increments the reference count of the
|
|
+ * controller, thus the calling party must ensure that ssam_controller_put()
|
|
+ * is called when it doesn't need the controller any more.
|
|
+ */
|
|
+struct ssam_controller *ssam_get_controller(void)
|
|
+{
|
|
+ struct ssam_controller *ctrl;
|
|
+
|
|
+ spin_lock(&__ssam_controller_lock);
|
|
+
|
|
+ ctrl = __ssam_controller;
|
|
+ if (!ctrl)
|
|
+ goto out;
|
|
+
|
|
+ if (WARN_ON(!kref_get_unless_zero(&ctrl->kref)))
|
|
+ ctrl = NULL;
|
|
+
|
|
+out:
|
|
+ spin_unlock(&__ssam_controller_lock);
|
|
+ return ctrl;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_get_controller);
|
|
+
|
|
+/**
|
|
+ * ssam_try_set_controller() - Try to set the main controller reference.
|
|
+ * @ctrl: The controller to which the reference should point.
|
|
+ *
|
|
+ * Set the main controller reference to the given pointer if the reference
|
|
+ * hasn't been set already.
|
|
+ *
|
|
+ * Return: Returns zero on success or %-EEXIST if the reference has already
|
|
+ * been set.
|
|
+ */
|
|
+static int ssam_try_set_controller(struct ssam_controller *ctrl)
|
|
+{
|
|
+ int status = 0;
|
|
+
|
|
+ spin_lock(&__ssam_controller_lock);
|
|
+ if (!__ssam_controller)
|
|
+ __ssam_controller = ctrl;
|
|
+ else
|
|
+ status = -EEXIST;
|
|
+ spin_unlock(&__ssam_controller_lock);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_clear_controller() - Remove/clear the main controller reference.
|
|
+ *
|
|
+ * Clears the main controller reference, i.e. sets it to %NULL. This function
|
|
+ * should be called before the controller is shut down.
|
|
+ */
|
|
+static void ssam_clear_controller(void)
|
|
+{
|
|
+ spin_lock(&__ssam_controller_lock);
|
|
+ __ssam_controller = NULL;
|
|
+ spin_unlock(&__ssam_controller_lock);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_client_link() - Link an arbitrary client device to the controller.
|
|
+ * @c: The controller to link to.
|
|
+ * @client: The client device.
|
|
+ *
|
|
+ * Link an arbitrary client device to the controller by creating a device link
|
|
+ * between it as consumer and the controller device as provider. This function
|
|
+ * can be used for non-SSAM devices (or SSAM devices not registered as child
|
|
+ * under the controller) to guarantee that the controller is valid for as long
|
|
+ * as the driver of the client device is bound, and that proper suspend and
|
|
+ * resume ordering is guaranteed.
|
|
+ *
|
|
+ * The device link does not have to be destructed manually. It is removed
|
|
+ * automatically once the driver of the client device unbinds.
|
|
+ *
|
|
+ * Return: Returns zero on success, %-ENODEV if the controller is not ready or
|
|
+ * going to be removed soon, or %-ENOMEM if the device link could not be
|
|
+ * created for other reasons.
|
|
+ */
|
|
+int ssam_client_link(struct ssam_controller *c, struct device *client)
|
|
+{
|
|
+ const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
|
|
+ struct device_link *link;
|
|
+ struct device *ctrldev;
|
|
+
|
|
+ ssam_controller_statelock(c);
|
|
+
|
|
+ if (c->state != SSAM_CONTROLLER_STARTED) {
|
|
+ ssam_controller_stateunlock(c);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ ctrldev = ssam_controller_device(c);
|
|
+ if (!ctrldev) {
|
|
+ ssam_controller_stateunlock(c);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ link = device_link_add(client, ctrldev, flags);
|
|
+ if (!link) {
|
|
+ ssam_controller_stateunlock(c);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Return -ENODEV if supplier driver is on its way to be removed. In
|
|
+ * this case, the controller won't be around for much longer and the
|
|
+ * device link is not going to save us any more, as unbinding is
|
|
+ * already in progress.
|
|
+ */
|
|
+ if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) {
|
|
+ ssam_controller_stateunlock(c);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ ssam_controller_stateunlock(c);
|
|
+ return 0;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_client_link);
|
|
+
|
|
+/**
|
|
+ * ssam_client_bind() - Bind an arbitrary client device to the controller.
|
|
+ * @client: The client device.
|
|
+ *
|
|
+ * Link an arbitrary client device to the controller by creating a device link
|
|
+ * between it as consumer and the main controller device as provider. This
|
|
+ * function can be used for non-SSAM devices to guarantee that the controller
|
|
+ * returned by this function is valid for as long as the driver of the client
|
|
+ * device is bound, and that proper suspend and resume ordering is guaranteed.
|
|
+ *
|
|
+ * This function does essentially the same as ssam_client_link(), except that
|
|
+ * it first fetches the main controller reference, then creates the link, and
|
|
+ * finally returns this reference. Note that this function does not increment
|
|
+ * the reference counter of the controller, as, due to the link, the
|
|
+ * controller lifetime is assured as long as the driver of the client device
|
|
+ * is bound.
|
|
+ *
|
|
+ * It is not valid to use the controller reference obtained by this method
|
|
+ * outside of the driver bound to the client device at the time of calling
|
|
+ * this function, without first incrementing the reference count of the
|
|
+ * controller via ssam_controller_get(). Even after doing this, care must be
|
|
+ * taken that requests are only submitted and notifiers are only
|
|
+ * (un-)registered when the controller is active and not suspended. In other
|
|
+ * words: The device link only lives as long as the client driver is bound and
|
|
+ * any guarantees enforced by this link (e.g. active controller state) can
|
|
+ * only be relied upon as long as this link exists and may need to be enforced
|
|
+ * in other ways afterwards.
|
|
+ *
|
|
+ * The created device link does not have to be destructed manually. It is
|
|
+ * removed automatically once the driver of the client device unbinds.
|
|
+ *
|
|
+ * Return: Returns the controller on success, an error pointer with %-ENODEV
|
|
+ * if the controller is not present, not ready or going to be removed soon, or
|
|
+ * %-ENOMEM if the device link could not be created for other reasons.
|
|
+ */
|
|
+struct ssam_controller *ssam_client_bind(struct device *client)
|
|
+{
|
|
+ struct ssam_controller *c;
|
|
+ int status;
|
|
+
|
|
+ c = ssam_get_controller();
|
|
+ if (!c)
|
|
+ return ERR_PTR(-ENODEV);
|
|
+
|
|
+ status = ssam_client_link(c, client);
|
|
+
|
|
+ /*
|
|
+ * Note that we can drop our controller reference in both success and
|
|
+ * failure cases: On success, we have bound the controller lifetime
|
|
+ * inherently to the client driver lifetime, i.e. it the controller is
|
|
+ * now guaranteed to outlive the client driver. On failure, we're not
|
|
+ * going to use the controller any more.
|
|
+ */
|
|
+ ssam_controller_put(c);
|
|
+
|
|
+ return status >= 0 ? c : ERR_PTR(status);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssam_client_bind);
|
|
+
|
|
+
|
|
+/* -- Glue layer (serdev_device -> ssam_controller). ------------------------ */
|
|
+
|
|
+static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
|
|
+ size_t n)
|
|
+{
|
|
+ struct ssam_controller *ctrl;
|
|
+
|
|
+ ctrl = serdev_device_get_drvdata(dev);
|
|
+ return ssam_controller_receive_buf(ctrl, buf, n);
|
|
+}
|
|
+
|
|
+static void ssam_write_wakeup(struct serdev_device *dev)
|
|
+{
|
|
+ ssam_controller_write_wakeup(serdev_device_get_drvdata(dev));
|
|
+}
|
|
+
|
|
+static const struct serdev_device_ops ssam_serdev_ops = {
|
|
+ .receive_buf = ssam_receive_buf,
|
|
+ .write_wakeup = ssam_write_wakeup,
|
|
+};
|
|
+
|
|
+
|
|
+/* -- SysFS and misc. ------------------------------------------------------- */
|
|
+
|
|
+static int ssam_log_firmware_version(struct ssam_controller *ctrl)
|
|
+{
|
|
+ u32 version, a, b, c;
|
|
+ int status;
|
|
+
|
|
+ status = ssam_get_firmware_version(ctrl, &version);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ a = (version >> 24) & 0xff;
|
|
+ b = ((version >> 8) & 0xffff);
|
|
+ c = version & 0xff;
|
|
+
|
|
+ ssam_info(ctrl, "SAM firmware version: %u.%u.%u\n", a, b, c);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static ssize_t firmware_version_show(struct device *dev,
|
|
+ struct device_attribute *attr, char *buf)
|
|
+{
|
|
+ struct ssam_controller *ctrl = dev_get_drvdata(dev);
|
|
+ u32 version, a, b, c;
|
|
+ int status;
|
|
+
|
|
+ status = ssam_get_firmware_version(ctrl, &version);
|
|
+ if (status < 0)
|
|
+ return status;
|
|
+
|
|
+ a = (version >> 24) & 0xff;
|
|
+ b = ((version >> 8) & 0xffff);
|
|
+ c = version & 0xff;
|
|
+
|
|
+ return scnprintf(buf, PAGE_SIZE, "%u.%u.%u\n", a, b, c);
|
|
+}
|
|
+static DEVICE_ATTR_RO(firmware_version);
|
|
+
|
|
+static struct attribute *ssam_sam_attrs[] = {
|
|
+ &dev_attr_firmware_version.attr,
|
|
+ NULL
|
|
+};
|
|
+
|
|
+static const struct attribute_group ssam_sam_group = {
|
|
+ .name = "sam",
|
|
+ .attrs = ssam_sam_attrs,
|
|
+};
|
|
+
|
|
+
|
|
+/* -- ACPI based device setup. ---------------------------------------------- */
|
|
+
|
|
+static acpi_status ssam_serdev_setup_via_acpi_crs(struct acpi_resource *rsc,
|
|
+ void *ctx)
|
|
+{
|
|
+ struct serdev_device *serdev = ctx;
|
|
+ struct acpi_resource_common_serialbus *serial;
|
|
+ struct acpi_resource_uart_serialbus *uart;
|
|
+ bool flow_control;
|
|
+ int status = 0;
|
|
+
|
|
+ if (rsc->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
|
|
+ return AE_OK;
|
|
+
|
|
+ serial = &rsc->data.common_serial_bus;
|
|
+ if (serial->type != ACPI_RESOURCE_SERIAL_TYPE_UART)
|
|
+ return AE_OK;
|
|
+
|
|
+ uart = &rsc->data.uart_serial_bus;
|
|
+
|
|
+ /* Set up serdev device. */
|
|
+ serdev_device_set_baudrate(serdev, uart->default_baud_rate);
|
|
+
|
|
+ /* serdev currently only supports RTSCTS flow control. */
|
|
+ if (uart->flow_control & (~((u8)ACPI_UART_FLOW_CONTROL_HW))) {
|
|
+ dev_warn(&serdev->dev, "setup: unsupported flow control (value: %#04x)\n",
|
|
+ uart->flow_control);
|
|
+ }
|
|
+
|
|
+ /* Set RTSCTS flow control. */
|
|
+ flow_control = uart->flow_control & ACPI_UART_FLOW_CONTROL_HW;
|
|
+ serdev_device_set_flow_control(serdev, flow_control);
|
|
+
|
|
+ /* serdev currently only supports EVEN/ODD parity. */
|
|
+ switch (uart->parity) {
|
|
+ case ACPI_UART_PARITY_NONE:
|
|
+ status = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
|
|
+ break;
|
|
+ case ACPI_UART_PARITY_EVEN:
|
|
+ status = serdev_device_set_parity(serdev, SERDEV_PARITY_EVEN);
|
|
+ break;
|
|
+ case ACPI_UART_PARITY_ODD:
|
|
+ status = serdev_device_set_parity(serdev, SERDEV_PARITY_ODD);
|
|
+ break;
|
|
+ default:
|
|
+ dev_warn(&serdev->dev, "setup: unsupported parity (value: %#04x)\n",
|
|
+ uart->parity);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (status) {
|
|
+ dev_err(&serdev->dev, "setup: failed to set parity (value: %#04x, error: %d)\n",
|
|
+ uart->parity, status);
|
|
+ return AE_ERROR;
|
|
+ }
|
|
+
|
|
+ /* We've found the resource and are done. */
|
|
+ return AE_CTRL_TERMINATE;
|
|
+}
|
|
+
|
|
+static acpi_status ssam_serdev_setup_via_acpi(acpi_handle handle,
|
|
+ struct serdev_device *serdev)
|
|
+{
|
|
+ return acpi_walk_resources(handle, METHOD_NAME__CRS,
|
|
+ ssam_serdev_setup_via_acpi_crs, serdev);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Power management. ----------------------------------------------------- */
|
|
+
|
|
+static void ssam_serial_hub_shutdown(struct device *dev)
|
|
+{
|
|
+ struct ssam_controller *c = dev_get_drvdata(dev);
|
|
+ int status;
|
|
+
|
|
+ /*
|
|
+ * Try to disable notifiers, signal display-off and D0-exit, ignore any
|
|
+ * errors.
|
|
+ *
|
|
+ * Note: It has not been established yet if this is actually
|
|
+ * necessary/useful for shutdown.
|
|
+ */
|
|
+
|
|
+ status = ssam_notifier_disable_registered(c);
|
|
+ if (status) {
|
|
+ ssam_err(c, "pm: failed to disable notifiers for shutdown: %d\n",
|
|
+ status);
|
|
+ }
|
|
+
|
|
+ status = ssam_ctrl_notif_display_off(c);
|
|
+ if (status)
|
|
+ ssam_err(c, "pm: display-off notification failed: %d\n", status);
|
|
+
|
|
+ status = ssam_ctrl_notif_d0_exit(c);
|
|
+ if (status)
|
|
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
+
|
|
+static int ssam_serial_hub_pm_prepare(struct device *dev)
|
|
+{
|
|
+ struct ssam_controller *c = dev_get_drvdata(dev);
|
|
+ int status;
|
|
+
|
|
+ /*
|
|
+ * Try to signal display-off, This will quiesce events.
|
|
+ *
|
|
+ * Note: Signaling display-off/display-on should normally be done from
|
|
+ * some sort of display state notifier. As that is not available,
|
|
+ * signal it here.
|
|
+ */
|
|
+
|
|
+ status = ssam_ctrl_notif_display_off(c);
|
|
+ if (status)
|
|
+ ssam_err(c, "pm: display-off notification failed: %d\n", status);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static void ssam_serial_hub_pm_complete(struct device *dev)
|
|
+{
|
|
+ struct ssam_controller *c = dev_get_drvdata(dev);
|
|
+ int status;
|
|
+
|
|
+ /*
|
|
+ * Try to signal display-on. This will restore events.
|
|
+ *
|
|
+ * Note: Signaling display-off/display-on should normally be done from
|
|
+ * some sort of display state notifier. As that is not available,
|
|
+ * signal it here.
|
|
+ */
|
|
+
|
|
+ status = ssam_ctrl_notif_display_on(c);
|
|
+ if (status)
|
|
+ ssam_err(c, "pm: display-on notification failed: %d\n", status);
|
|
+}
|
|
+
|
|
+static int ssam_serial_hub_pm_suspend(struct device *dev)
|
|
+{
|
|
+ struct ssam_controller *c = dev_get_drvdata(dev);
|
|
+ int status;
|
|
+
|
|
+ /*
|
|
+ * Try to signal D0-exit, enable IRQ wakeup if specified. Abort on
|
|
+ * error.
|
|
+ */
|
|
+
|
|
+ status = ssam_ctrl_notif_d0_exit(c);
|
|
+ if (status) {
|
|
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
|
|
+ goto err_notif;
|
|
+ }
|
|
+
|
|
+ status = ssam_irq_arm_for_wakeup(c);
|
|
+ if (status)
|
|
+ goto err_irq;
|
|
+
|
|
+ WARN_ON(ssam_controller_suspend(c));
|
|
+ return 0;
|
|
+
|
|
+err_irq:
|
|
+ ssam_ctrl_notif_d0_entry(c);
|
|
+err_notif:
|
|
+ ssam_ctrl_notif_display_on(c);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static int ssam_serial_hub_pm_resume(struct device *dev)
|
|
+{
|
|
+ struct ssam_controller *c = dev_get_drvdata(dev);
|
|
+ int status;
|
|
+
|
|
+ WARN_ON(ssam_controller_resume(c));
|
|
+
|
|
+ /*
|
|
+ * Try to disable IRQ wakeup (if specified) and signal D0-entry. In
|
|
+ * case of errors, log them and try to restore normal operation state
|
|
+ * as far as possible.
|
|
+ *
|
|
+ * Note: Signaling display-off/display-on should normally be done from
|
|
+ * some sort of display state notifier. As that is not available,
|
|
+ * signal it here.
|
|
+ */
|
|
+
|
|
+ ssam_irq_disarm_wakeup(c);
|
|
+
|
|
+ status = ssam_ctrl_notif_d0_entry(c);
|
|
+ if (status)
|
|
+ ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ssam_serial_hub_pm_freeze(struct device *dev)
|
|
+{
|
|
+ struct ssam_controller *c = dev_get_drvdata(dev);
|
|
+ int status;
|
|
+
|
|
+ /*
|
|
+ * During hibernation image creation, we only have to ensure that the
|
|
+ * EC doesn't send us any events. This is done via the display-off
|
|
+ * and D0-exit notifications. Note that this sets up the wakeup IRQ
|
|
+ * on the EC side, however, we have disabled it by default on our side
|
|
+ * and won't enable it here.
|
|
+ *
|
|
+ * See ssam_serial_hub_poweroff() for more details on the hibernation
|
|
+ * process.
|
|
+ */
|
|
+
|
|
+ status = ssam_ctrl_notif_d0_exit(c);
|
|
+ if (status) {
|
|
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
|
|
+ ssam_ctrl_notif_display_on(c);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ WARN_ON(ssam_controller_suspend(c));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ssam_serial_hub_pm_thaw(struct device *dev)
|
|
+{
|
|
+ struct ssam_controller *c = dev_get_drvdata(dev);
|
|
+ int status;
|
|
+
|
|
+ WARN_ON(ssam_controller_resume(c));
|
|
+
|
|
+ status = ssam_ctrl_notif_d0_entry(c);
|
|
+ if (status)
|
|
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static int ssam_serial_hub_pm_poweroff(struct device *dev)
|
|
+{
|
|
+ struct ssam_controller *c = dev_get_drvdata(dev);
|
|
+ int status;
|
|
+
|
|
+ /*
|
|
+ * When entering hibernation and powering off the system, the EC, at
|
|
+ * least on some models, may disable events. Without us taking care of
|
|
+ * that, this leads to events not being enabled/restored when the
|
|
+ * system resumes from hibernation, resulting SAM-HID subsystem devices
|
|
+ * (i.e. keyboard, touchpad) not working, AC-plug/AC-unplug events being
|
|
+ * gone, etc.
|
|
+ *
|
|
+ * To avoid these issues, we disable all registered events here (this is
|
|
+ * likely not actually required) and restore them during the drivers PM
|
|
+ * restore callback.
|
|
+ *
|
|
+ * Wakeup from the EC interrupt is not supported during hibernation,
|
|
+ * so don't arm the IRQ here.
|
|
+ */
|
|
+
|
|
+ status = ssam_notifier_disable_registered(c);
|
|
+ if (status) {
|
|
+ ssam_err(c, "pm: failed to disable notifiers for hibernation: %d\n",
|
|
+ status);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ status = ssam_ctrl_notif_d0_exit(c);
|
|
+ if (status) {
|
|
+ ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
|
|
+ ssam_notifier_restore_registered(c);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ WARN_ON(ssam_controller_suspend(c));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ssam_serial_hub_pm_restore(struct device *dev)
|
|
+{
|
|
+ struct ssam_controller *c = dev_get_drvdata(dev);
|
|
+ int status;
|
|
+
|
|
+ /*
|
|
+ * Ignore but log errors, try to restore state as much as possible in
|
|
+ * case of failures. See ssam_serial_hub_poweroff() for more details on
|
|
+ * the hibernation process.
|
|
+ */
|
|
+
|
|
+ WARN_ON(ssam_controller_resume(c));
|
|
+
|
|
+ status = ssam_ctrl_notif_d0_entry(c);
|
|
+ if (status)
|
|
+ ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
|
|
+
|
|
+ ssam_notifier_restore_registered(c);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct dev_pm_ops ssam_serial_hub_pm_ops = {
|
|
+ .prepare = ssam_serial_hub_pm_prepare,
|
|
+ .complete = ssam_serial_hub_pm_complete,
|
|
+ .suspend = ssam_serial_hub_pm_suspend,
|
|
+ .resume = ssam_serial_hub_pm_resume,
|
|
+ .freeze = ssam_serial_hub_pm_freeze,
|
|
+ .thaw = ssam_serial_hub_pm_thaw,
|
|
+ .poweroff = ssam_serial_hub_pm_poweroff,
|
|
+ .restore = ssam_serial_hub_pm_restore,
|
|
+};
|
|
+
|
|
+#else /* CONFIG_PM_SLEEP */
|
|
+
|
|
+static const struct dev_pm_ops ssam_serial_hub_pm_ops = { };
|
|
+
|
|
+#endif /* CONFIG_PM_SLEEP */
|
|
+
|
|
+
|
|
+/* -- Device/driver setup. -------------------------------------------------- */
|
|
+
|
|
+static const struct acpi_gpio_params gpio_ssam_wakeup_int = { 0, 0, false };
|
|
+static const struct acpi_gpio_params gpio_ssam_wakeup = { 1, 0, false };
|
|
+
|
|
+static const struct acpi_gpio_mapping ssam_acpi_gpios[] = {
|
|
+ { "ssam_wakeup-int-gpio", &gpio_ssam_wakeup_int, 1 },
|
|
+ { "ssam_wakeup-gpio", &gpio_ssam_wakeup, 1 },
|
|
+ { },
|
|
+};
|
|
+
|
|
+static int ssam_serial_hub_probe(struct serdev_device *serdev)
|
|
+{
|
|
+ struct ssam_controller *ctrl;
|
|
+ acpi_handle *ssh = ACPI_HANDLE(&serdev->dev);
|
|
+ acpi_status astatus;
|
|
+ int status;
|
|
+
|
|
+ if (gpiod_count(&serdev->dev, NULL) < 0)
|
|
+ return -ENODEV;
|
|
+
|
|
+ status = devm_acpi_dev_add_driver_gpios(&serdev->dev, ssam_acpi_gpios);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ /* Allocate controller. */
|
|
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
|
|
+ if (!ctrl)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ /* Initialize controller. */
|
|
+ status = ssam_controller_init(ctrl, serdev);
|
|
+ if (status)
|
|
+ goto err_ctrl_init;
|
|
+
|
|
+ ssam_controller_lock(ctrl);
|
|
+
|
|
+ /* Set up serdev device. */
|
|
+ serdev_device_set_drvdata(serdev, ctrl);
|
|
+ serdev_device_set_client_ops(serdev, &ssam_serdev_ops);
|
|
+ status = serdev_device_open(serdev);
|
|
+ if (status)
|
|
+ goto err_devopen;
|
|
+
|
|
+ astatus = ssam_serdev_setup_via_acpi(ssh, serdev);
|
|
+ if (ACPI_FAILURE(astatus)) {
|
|
+ status = -ENXIO;
|
|
+ goto err_devinit;
|
|
+ }
|
|
+
|
|
+ /* Start controller. */
|
|
+ status = ssam_controller_start(ctrl);
|
|
+ if (status)
|
|
+ goto err_devinit;
|
|
+
|
|
+ ssam_controller_unlock(ctrl);
|
|
+
|
|
+ /*
|
|
+ * Initial SAM requests: Log version and notify default/init power
|
|
+ * states.
|
|
+ */
|
|
+ status = ssam_log_firmware_version(ctrl);
|
|
+ if (status)
|
|
+ goto err_initrq;
|
|
+
|
|
+ status = ssam_ctrl_notif_d0_entry(ctrl);
|
|
+ if (status)
|
|
+ goto err_initrq;
|
|
+
|
|
+ status = ssam_ctrl_notif_display_on(ctrl);
|
|
+ if (status)
|
|
+ goto err_initrq;
|
|
+
|
|
+ status = sysfs_create_group(&serdev->dev.kobj, &ssam_sam_group);
|
|
+ if (status)
|
|
+ goto err_initrq;
|
|
+
|
|
+ /* Set up IRQ. */
|
|
+ status = ssam_irq_setup(ctrl);
|
|
+ if (status)
|
|
+ goto err_irq;
|
|
+
|
|
+ /* Finally, set main controller reference. */
|
|
+ status = ssam_try_set_controller(ctrl);
|
|
+ if (WARN_ON(status)) /* Currently, we're the only provider. */
|
|
+ goto err_mainref;
|
|
+
|
|
+ /*
|
|
+ * TODO: The EC can wake up the system via the associated GPIO interrupt
|
|
+ * in multiple situations. One of which is the remaining battery
|
|
+ * capacity falling below a certain threshold. Normally, we should
|
|
+ * use the device_init_wakeup function, however, the EC also seems
|
|
+ * to have other reasons for waking up the system and it seems
|
|
+ * that Windows has additional checks whether the system should be
|
|
+ * resumed. In short, this causes some spurious unwanted wake-ups.
|
|
+ * For now let's thus default power/wakeup to false.
|
|
+ */
|
|
+ device_set_wakeup_capable(&serdev->dev, true);
|
|
+ acpi_walk_dep_device_list(ssh);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_mainref:
|
|
+ ssam_irq_free(ctrl);
|
|
+err_irq:
|
|
+ sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group);
|
|
+err_initrq:
|
|
+ ssam_controller_lock(ctrl);
|
|
+ ssam_controller_shutdown(ctrl);
|
|
+err_devinit:
|
|
+ serdev_device_close(serdev);
|
|
+err_devopen:
|
|
+ ssam_controller_destroy(ctrl);
|
|
+ ssam_controller_unlock(ctrl);
|
|
+err_ctrl_init:
|
|
+ kfree(ctrl);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static void ssam_serial_hub_remove(struct serdev_device *serdev)
|
|
+{
|
|
+ struct ssam_controller *ctrl = serdev_device_get_drvdata(serdev);
|
|
+ int status;
|
|
+
|
|
+ /* Clear static reference so that no one else can get a new one. */
|
|
+ ssam_clear_controller();
|
|
+
|
|
+ /* Disable and free IRQ. */
|
|
+ ssam_irq_free(ctrl);
|
|
+
|
|
+ sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group);
|
|
+ ssam_controller_lock(ctrl);
|
|
+
|
|
+ /* Remove all client devices. */
|
|
+ ssam_controller_remove_clients(ctrl);
|
|
+
|
|
+ /* Act as if suspending to silence events. */
|
|
+ status = ssam_ctrl_notif_display_off(ctrl);
|
|
+ if (status) {
|
|
+ dev_err(&serdev->dev, "display-off notification failed: %d\n",
|
|
+ status);
|
|
+ }
|
|
+
|
|
+ status = ssam_ctrl_notif_d0_exit(ctrl);
|
|
+ if (status) {
|
|
+ dev_err(&serdev->dev, "D0-exit notification failed: %d\n",
|
|
+ status);
|
|
+ }
|
|
+
|
|
+ /* Shut down controller and remove serdev device reference from it. */
|
|
+ ssam_controller_shutdown(ctrl);
|
|
+
|
|
+ /* Shut down actual transport. */
|
|
+ serdev_device_wait_until_sent(serdev, 0);
|
|
+ serdev_device_close(serdev);
|
|
+
|
|
+ /* Drop our controller reference. */
|
|
+ ssam_controller_unlock(ctrl);
|
|
+ ssam_controller_put(ctrl);
|
|
+
|
|
+ device_set_wakeup_capable(&serdev->dev, false);
|
|
+}
|
|
+
|
|
+static const struct acpi_device_id ssam_serial_hub_match[] = {
|
|
+ { "MSHW0084", 0 },
|
|
+ { },
|
|
+};
|
|
+MODULE_DEVICE_TABLE(acpi, ssam_serial_hub_match);
|
|
+
|
|
+static struct serdev_device_driver ssam_serial_hub = {
|
|
+ .probe = ssam_serial_hub_probe,
|
|
+ .remove = ssam_serial_hub_remove,
|
|
+ .driver = {
|
|
+ .name = "surface_serial_hub",
|
|
+ .acpi_match_table = ssam_serial_hub_match,
|
|
+ .pm = &ssam_serial_hub_pm_ops,
|
|
+ .shutdown = ssam_serial_hub_shutdown,
|
|
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
|
+ },
|
|
+};
|
|
+
|
|
+
|
|
+/* -- Module setup. --------------------------------------------------------- */
|
|
+
|
|
+static int __init ssam_core_init(void)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = ssam_bus_register();
|
|
+ if (status)
|
|
+ goto err_bus;
|
|
+
|
|
+ status = ssh_ctrl_packet_cache_init();
|
|
+ if (status)
|
|
+ goto err_cpkg;
|
|
+
|
|
+ status = ssam_event_item_cache_init();
|
|
+ if (status)
|
|
+ goto err_evitem;
|
|
+
|
|
+ status = serdev_device_driver_register(&ssam_serial_hub);
|
|
+ if (status)
|
|
+ goto err_register;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_register:
|
|
+ ssam_event_item_cache_destroy();
|
|
+err_evitem:
|
|
+ ssh_ctrl_packet_cache_destroy();
|
|
+err_cpkg:
|
|
+ ssam_bus_unregister();
|
|
+err_bus:
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static void __exit ssam_core_exit(void)
|
|
+{
|
|
+ serdev_device_driver_unregister(&ssam_serial_hub);
|
|
+ ssam_event_item_cache_destroy();
|
|
+ ssh_ctrl_packet_cache_destroy();
|
|
+ ssam_bus_unregister();
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Ensure that the driver is loaded late due to some issues with the UART
|
|
+ * communication. Specifically, we want to ensure that DMA is ready and being
|
|
+ * used. Not using DMA can result in spurious communication failures,
|
|
+ * especially during boot, which among other things will result in wrong
|
|
+ * battery information (via ACPI _BIX) being displayed. Using a late init_call
|
|
+ * instead of the normal module_init gives the DMA subsystem time to
|
|
+ * initialize and via that results in a more stable communication, avoiding
|
|
+ * such failures.
|
|
+ */
|
|
+late_initcall(ssam_core_init);
|
|
+module_exit(ssam_core_exit);
|
|
+
|
|
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
|
|
+MODULE_DESCRIPTION("Subsystem and Surface Serial Hub driver for Surface System Aggregator Module");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/platform/x86/surface_aggregator/ssh_msgb.h b/drivers/platform/x86/surface_aggregator/ssh_msgb.h
|
|
new file mode 100644
|
|
index 000000000000..e562958ffdf0
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator/ssh_msgb.h
|
|
@@ -0,0 +1,205 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * SSH message builder functions.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#ifndef _SURFACE_AGGREGATOR_SSH_MSGB_H
|
|
+#define _SURFACE_AGGREGATOR_SSH_MSGB_H
|
|
+
|
|
+#include <asm/unaligned.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+#include <linux/surface_aggregator/serial_hub.h>
|
|
+
|
|
+/**
|
|
+ * struct msgbuf - Buffer struct to construct SSH messages.
|
|
+ * @begin: Pointer to the beginning of the allocated buffer space.
|
|
+ * @end: Pointer to the end (one past last element) of the allocated buffer
|
|
+ * space.
|
|
+ * @ptr: Pointer to the first free element in the buffer.
|
|
+ */
|
|
+struct msgbuf {
|
|
+ u8 *begin;
|
|
+ u8 *end;
|
|
+ u8 *ptr;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * msgb_init() - Initialize the given message buffer struct.
|
|
+ * @msgb: The buffer struct to initialize
|
|
+ * @ptr: Pointer to the underlying memory by which the buffer will be backed.
|
|
+ * @cap: Size of the underlying memory.
|
|
+ *
|
|
+ * Initialize the given message buffer struct using the provided memory as
|
|
+ * backing.
|
|
+ */
|
|
+static inline void msgb_init(struct msgbuf *msgb, u8 *ptr, size_t cap)
|
|
+{
|
|
+ msgb->begin = ptr;
|
|
+ msgb->end = ptr + cap;
|
|
+ msgb->ptr = ptr;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * msgb_bytes_used() - Return the current number of bytes used in the buffer.
|
|
+ * @msgb: The message buffer.
|
|
+ */
|
|
+static inline size_t msgb_bytes_used(const struct msgbuf *msgb)
|
|
+{
|
|
+ return msgb->ptr - msgb->begin;
|
|
+}
|
|
+
|
|
+static inline void __msgb_push_u8(struct msgbuf *msgb, u8 value)
|
|
+{
|
|
+ *msgb->ptr = value;
|
|
+ msgb->ptr += sizeof(u8);
|
|
+}
|
|
+
|
|
+static inline void __msgb_push_u16(struct msgbuf *msgb, u16 value)
|
|
+{
|
|
+ put_unaligned_le16(value, msgb->ptr);
|
|
+ msgb->ptr += sizeof(u16);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * msgb_push_u16() - Push a u16 value to the buffer.
|
|
+ * @msgb: The message buffer.
|
|
+ * @value: The value to push to the buffer.
|
|
+ */
|
|
+static inline void msgb_push_u16(struct msgbuf *msgb, u16 value)
|
|
+{
|
|
+ if (WARN_ON(msgb->ptr + sizeof(u16) > msgb->end))
|
|
+ return;
|
|
+
|
|
+ __msgb_push_u16(msgb, value);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * msgb_push_syn() - Push SSH SYN bytes to the buffer.
|
|
+ * @msgb: The message buffer.
|
|
+ */
|
|
+static inline void msgb_push_syn(struct msgbuf *msgb)
|
|
+{
|
|
+ msgb_push_u16(msgb, SSH_MSG_SYN);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * msgb_push_buf() - Push raw data to the buffer.
|
|
+ * @msgb: The message buffer.
|
|
+ * @buf: The data to push to the buffer.
|
|
+ * @len: The length of the data to push to the buffer.
|
|
+ */
|
|
+static inline void msgb_push_buf(struct msgbuf *msgb, const u8 *buf, size_t len)
|
|
+{
|
|
+ msgb->ptr = memcpy(msgb->ptr, buf, len) + len;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * msgb_push_crc() - Compute CRC and push it to the buffer.
|
|
+ * @msgb: The message buffer.
|
|
+ * @buf: The data for which the CRC should be computed.
|
|
+ * @len: The length of the data for which the CRC should be computed.
|
|
+ */
|
|
+static inline void msgb_push_crc(struct msgbuf *msgb, const u8 *buf, size_t len)
|
|
+{
|
|
+ msgb_push_u16(msgb, ssh_crc(buf, len));
|
|
+}
|
|
+
|
|
+/**
|
|
+ * msgb_push_frame() - Push a SSH message frame header to the buffer.
|
|
+ * @msgb: The message buffer
|
|
+ * @ty: The type of the frame.
|
|
+ * @len: The length of the payload of the frame.
|
|
+ * @seq: The sequence ID of the frame/packet.
|
|
+ */
|
|
+static inline void msgb_push_frame(struct msgbuf *msgb, u8 ty, u16 len, u8 seq)
|
|
+{
|
|
+ u8 *const begin = msgb->ptr;
|
|
+
|
|
+ if (WARN_ON(msgb->ptr + sizeof(struct ssh_frame) > msgb->end))
|
|
+ return;
|
|
+
|
|
+ __msgb_push_u8(msgb, ty); /* Frame type. */
|
|
+ __msgb_push_u16(msgb, len); /* Frame payload length. */
|
|
+ __msgb_push_u8(msgb, seq); /* Frame sequence ID. */
|
|
+
|
|
+ msgb_push_crc(msgb, begin, msgb->ptr - begin);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * msgb_push_ack() - Push a SSH ACK frame to the buffer.
|
|
+ * @msgb: The message buffer
|
|
+ * @seq: The sequence ID of the frame/packet to be ACKed.
|
|
+ */
|
|
+static inline void msgb_push_ack(struct msgbuf *msgb, u8 seq)
|
|
+{
|
|
+ /* SYN. */
|
|
+ msgb_push_syn(msgb);
|
|
+
|
|
+ /* ACK-type frame + CRC. */
|
|
+ msgb_push_frame(msgb, SSH_FRAME_TYPE_ACK, 0x00, seq);
|
|
+
|
|
+ /* Payload CRC (ACK-type frames do not have a payload). */
|
|
+ msgb_push_crc(msgb, msgb->ptr, 0);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * msgb_push_nak() - Push a SSH NAK frame to the buffer.
|
|
+ * @msgb: The message buffer
|
|
+ */
|
|
+static inline void msgb_push_nak(struct msgbuf *msgb)
|
|
+{
|
|
+ /* SYN. */
|
|
+ msgb_push_syn(msgb);
|
|
+
|
|
+ /* NAK-type frame + CRC. */
|
|
+ msgb_push_frame(msgb, SSH_FRAME_TYPE_NAK, 0x00, 0x00);
|
|
+
|
|
+ /* Payload CRC (ACK-type frames do not have a payload). */
|
|
+ msgb_push_crc(msgb, msgb->ptr, 0);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * msgb_push_cmd() - Push a SSH command frame with payload to the buffer.
|
|
+ * @msgb: The message buffer.
|
|
+ * @seq: The sequence ID (SEQ) of the frame/packet.
|
|
+ * @rqid: The request ID (RQID) of the request contained in the frame.
|
|
+ * @rqst: The request to wrap in the frame.
|
|
+ */
|
|
+static inline void msgb_push_cmd(struct msgbuf *msgb, u8 seq, u16 rqid,
|
|
+ const struct ssam_request *rqst)
|
|
+{
|
|
+ const u8 type = SSH_FRAME_TYPE_DATA_SEQ;
|
|
+ u8 *cmd;
|
|
+
|
|
+ /* SYN. */
|
|
+ msgb_push_syn(msgb);
|
|
+
|
|
+ /* Command frame + CRC. */
|
|
+ msgb_push_frame(msgb, type, sizeof(struct ssh_command) + rqst->length, seq);
|
|
+
|
|
+ /* Frame payload: Command struct + payload. */
|
|
+ if (WARN_ON(msgb->ptr + sizeof(struct ssh_command) > msgb->end))
|
|
+ return;
|
|
+
|
|
+ cmd = msgb->ptr;
|
|
+
|
|
+ __msgb_push_u8(msgb, SSH_PLD_TYPE_CMD); /* Payload type. */
|
|
+ __msgb_push_u8(msgb, rqst->target_category); /* Target category. */
|
|
+ __msgb_push_u8(msgb, rqst->target_id); /* Target ID (out). */
|
|
+ __msgb_push_u8(msgb, 0x00); /* Target ID (in). */
|
|
+ __msgb_push_u8(msgb, rqst->instance_id); /* Instance ID. */
|
|
+ __msgb_push_u16(msgb, rqid); /* Request ID. */
|
|
+ __msgb_push_u8(msgb, rqst->command_id); /* Command ID. */
|
|
+
|
|
+ /* Command payload. */
|
|
+ msgb_push_buf(msgb, rqst->payload, rqst->length);
|
|
+
|
|
+ /* CRC for command struct + payload. */
|
|
+ msgb_push_crc(msgb, cmd, msgb->ptr - cmd);
|
|
+}
|
|
+
|
|
+#endif /* _SURFACE_AGGREGATOR_SSH_MSGB_H */
|
|
diff --git a/drivers/platform/x86/surface_aggregator/ssh_packet_layer.c b/drivers/platform/x86/surface_aggregator/ssh_packet_layer.c
|
|
new file mode 100644
|
|
index 000000000000..8a06beb39aee
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator/ssh_packet_layer.c
|
|
@@ -0,0 +1,2074 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * SSH packet transport layer.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <asm/unaligned.h>
|
|
+#include <linux/atomic.h>
|
|
+#include <linux/error-injection.h>
|
|
+#include <linux/jiffies.h>
|
|
+#include <linux/kfifo.h>
|
|
+#include <linux/kref.h>
|
|
+#include <linux/kthread.h>
|
|
+#include <linux/ktime.h>
|
|
+#include <linux/limits.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/lockdep.h>
|
|
+#include <linux/serdev.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/workqueue.h>
|
|
+
|
|
+#include <linux/surface_aggregator/serial_hub.h>
|
|
+
|
|
+#include "ssh_msgb.h"
|
|
+#include "ssh_packet_layer.h"
|
|
+#include "ssh_parser.h"
|
|
+
|
|
+#include "trace.h"
|
|
+
|
|
+/*
|
|
+ * To simplify reasoning about the code below, we define a few concepts. The
|
|
+ * system below is similar to a state-machine for packets, however, there are
|
|
+ * too many states to explicitly write them down. To (somewhat) manage the
|
|
+ * states and packets we rely on flags, reference counting, and some simple
|
|
+ * concepts. State transitions are triggered by actions.
|
|
+ *
|
|
+ * >> Actions <<
|
|
+ *
|
|
+ * - submit
|
|
+ * - transmission start (process next item in queue)
|
|
+ * - transmission finished (guaranteed to never be parallel to transmission
|
|
+ * start)
|
|
+ * - ACK received
|
|
+ * - NAK received (this is equivalent to issuing re-submit for all pending
|
|
+ * packets)
|
|
+ * - timeout (this is equivalent to re-issuing a submit or canceling)
|
|
+ * - cancel (non-pending and pending)
|
|
+ *
|
|
+ * >> Data Structures, Packet Ownership, General Overview <<
|
|
+ *
|
|
+ * The code below employs two main data structures: The packet queue,
|
|
+ * containing all packets scheduled for transmission, and the set of pending
|
|
+ * packets, containing all packets awaiting an ACK.
|
|
+ *
|
|
+ * Shared ownership of a packet is controlled via reference counting. Inside
|
|
+ * the transport system are a total of five packet owners:
|
|
+ *
|
|
+ * - the packet queue,
|
|
+ * - the pending set,
|
|
+ * - the transmitter thread,
|
|
+ * - the receiver thread (via ACKing), and
|
|
+ * - the timeout work item.
|
|
+ *
|
|
+ * Normal operation is as follows: The initial reference of the packet is
|
|
+ * obtained by submitting the packet and queuing it. The receiver thread takes
|
|
+ * packets from the queue. By doing this, it does not increment the refcount
|
|
+ * but takes over the reference (removing it from the queue). If the packet is
|
|
+ * sequenced (i.e. needs to be ACKed by the client), the transmitter thread
|
|
+ * sets-up the timeout and adds the packet to the pending set before starting
|
|
+ * to transmit it. As the timeout is handled by a reaper task, no additional
|
|
+ * reference for it is needed. After the transmit is done, the reference held
|
|
+ * by the transmitter thread is dropped. If the packet is unsequenced (i.e.
|
|
+ * does not need an ACK), the packet is completed by the transmitter thread
|
|
+ * before dropping that reference.
|
|
+ *
|
|
+ * On receival of an ACK, the receiver thread removes and obtains the
|
|
+ * reference to the packet from the pending set. The receiver thread will then
|
|
+ * complete the packet and drop its reference.
|
|
+ *
|
|
+ * On receival of a NAK, the receiver thread re-submits all currently pending
|
|
+ * packets.
|
|
+ *
|
|
+ * Packet timeouts are detected by the timeout reaper. This is a task,
|
|
+ * scheduled depending on the earliest packet timeout expiration date,
|
|
+ * checking all currently pending packets if their timeout has expired. If the
|
|
+ * timeout of a packet has expired, it is re-submitted and the number of tries
|
|
+ * of this packet is incremented. If this number reaches its limit, the packet
|
|
+ * will be completed with a failure.
|
|
+ *
|
|
+ * On transmission failure (such as repeated packet timeouts), the completion
|
|
+ * callback is immediately run by on thread on which the error was detected.
|
|
+ *
|
|
+ * To ensure that a packet eventually leaves the system it is marked as
|
|
+ * "locked" directly before it is going to be completed or when it is
|
|
+ * canceled. Marking a packet as "locked" has the effect that passing and
|
|
+ * creating new references of the packet is disallowed. This means that the
|
|
+ * packet cannot be added to the queue, the pending set, and the timeout, or
|
|
+ * be picked up by the transmitter thread or receiver thread. To remove a
|
|
+ * packet from the system it has to be marked as locked and subsequently all
|
|
+ * references from the data structures (queue, pending) have to be removed.
|
|
+ * References held by threads will eventually be dropped automatically as
|
|
+ * their execution progresses.
|
|
+ *
|
|
+ * Note that the packet completion callback is, in case of success and for a
|
|
+ * sequenced packet, guaranteed to run on the receiver thread, thus providing
|
|
+ * a way to reliably identify responses to the packet. The packet completion
|
|
+ * callback is only run once and it does not indicate that the packet has
|
|
+ * fully left the system (for this, one should rely on the release method,
|
|
+ * triggered when the reference count of the packet reaches zero). In case of
|
|
+ * re-submission (and with somewhat unlikely timing), it may be possible that
|
|
+ * the packet is being re-transmitted while the completion callback runs.
|
|
+ * Completion will occur both on success and internal error, as well as when
|
|
+ * the packet is canceled.
|
|
+ *
|
|
+ * >> Flags <<
|
|
+ *
|
|
+ * Flags are used to indicate the state and progression of a packet. Some flags
|
|
+ * have stricter guarantees than other:
|
|
+ *
|
|
+ * - locked
|
|
+ * Indicates if the packet is locked. If the packet is locked, passing and/or
|
|
+ * creating additional references to the packet is forbidden. The packet thus
|
|
+ * may not be queued, dequeued, or removed or added to the pending set. Note
|
|
+ * that the packet state flags may still change (e.g. it may be marked as
|
|
+ * ACKed, transmitted, ...).
|
|
+ *
|
|
+ * - completed
|
|
+ * Indicates if the packet completion callback has been executed or is about
|
|
+ * to be executed. This flag is used to ensure that the packet completion
|
|
+ * callback is only run once.
|
|
+ *
|
|
+ * - queued
|
|
+ * Indicates if a packet is present in the submission queue or not. This flag
|
|
+ * must only be modified with the queue lock held, and must be coherent to the
|
|
+ * presence of the packet in the queue.
|
|
+ *
|
|
+ * - pending
|
|
+ * Indicates if a packet is present in the set of pending packets or not.
|
|
+ * This flag must only be modified with the pending lock held, and must be
|
|
+ * coherent to the presence of the packet in the pending set.
|
|
+ *
|
|
+ * - transmitting
|
|
+ * Indicates if the packet is currently transmitting. In case of
|
|
+ * re-transmissions, it is only safe to wait on the "transmitted" completion
|
|
+ * after this flag has been set. The completion will be set both in success
|
|
+ * and error case.
|
|
+ *
|
|
+ * - transmitted
|
|
+ * Indicates if the packet has been transmitted. This flag is not cleared by
|
|
+ * the system, thus it indicates the first transmission only.
|
|
+ *
|
|
+ * - acked
|
|
+ * Indicates if the packet has been acknowledged by the client. There are no
|
|
+ * other guarantees given. For example, the packet may still be canceled
|
|
+ * and/or the completion may be triggered an error even though this bit is
|
|
+ * set. Rely on the status provided to the completion callback instead.
|
|
+ *
|
|
+ * - canceled
|
|
+ * Indicates if the packet has been canceled from the outside. There are no
|
|
+ * other guarantees given. Specifically, the packet may be completed by
|
|
+ * another part of the system before the cancellation attempts to complete it.
|
|
+ *
|
|
+ * >> General Notes <<
|
|
+ *
|
|
+ * - To avoid deadlocks, if both queue and pending locks are required, the
|
|
+ * pending lock must be acquired before the queue lock.
|
|
+ *
|
|
+ * - The packet priority must be accessed only while holding the queue lock.
|
|
+ *
|
|
+ * - The packet timestamp must be accessed only while holding the pending
|
|
+ * lock.
|
|
+ */
|
|
+
|
|
+/*
|
|
+ * SSH_PTL_MAX_PACKET_TRIES - Maximum transmission attempts for packet.
|
|
+ *
|
|
+ * Maximum number of transmission attempts per sequenced packet in case of
|
|
+ * time-outs. Must be smaller than 16. If the packet times out after this
|
|
+ * amount of tries, the packet will be completed with %-ETIMEDOUT as status
|
|
+ * code.
|
|
+ */
|
|
+#define SSH_PTL_MAX_PACKET_TRIES 3
|
|
+
|
|
+/*
|
|
+ * SSH_PTL_TX_TIMEOUT - Packet transmission timeout.
|
|
+ *
|
|
+ * Timeout in jiffies for packet transmission via the underlying serial
|
|
+ * device. If transmitting the packet takes longer than this timeout, the
|
|
+ * packet will be completed with -ETIMEDOUT. It will not be re-submitted.
|
|
+ */
|
|
+#define SSH_PTL_TX_TIMEOUT HZ
|
|
+
|
|
+/*
|
|
+ * SSH_PTL_PACKET_TIMEOUT - Packet response timeout.
|
|
+ *
|
|
+ * Timeout as ktime_t delta for ACKs. If we have not received an ACK in this
|
|
+ * time-frame after starting transmission, the packet will be re-submitted.
|
|
+ */
|
|
+#define SSH_PTL_PACKET_TIMEOUT ms_to_ktime(1000)
|
|
+
|
|
+/*
|
|
+ * SSH_PTL_PACKET_TIMEOUT_RESOLUTION - Packet timeout granularity.
|
|
+ *
|
|
+ * Time-resolution for timeouts. Should be larger than one jiffy to avoid
|
|
+ * direct re-scheduling of reaper work_struct.
|
|
+ */
|
|
+#define SSH_PTL_PACKET_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
|
|
+
|
|
+/*
|
|
+ * SSH_PTL_MAX_PENDING - Maximum number of pending packets.
|
|
+ *
|
|
+ * Maximum number of sequenced packets concurrently waiting for an ACK.
|
|
+ * Packets marked as blocking will not be transmitted while this limit is
|
|
+ * reached.
|
|
+ */
|
|
+#define SSH_PTL_MAX_PENDING 1
|
|
+
|
|
+/*
|
|
+ * SSH_PTL_RX_BUF_LEN - Evaluation-buffer size in bytes.
|
|
+ */
|
|
+#define SSH_PTL_RX_BUF_LEN 4096
|
|
+
|
|
+/*
|
|
+ * SSH_PTL_RX_FIFO_LEN - Fifo input-buffer size in bytes.
|
|
+ */
|
|
+#define SSH_PTL_RX_FIFO_LEN 4096
|
|
+
|
|
+#ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_should_drop_ack_packet() - Error injection hook to drop ACK packets.
|
|
+ *
|
|
+ * Useful to test detection and handling of automated re-transmits by the EC.
|
|
+ * Specifically of packets that the EC considers not-ACKed but the driver
|
|
+ * already considers ACKed (due to dropped ACK). In this case, the EC
|
|
+ * re-transmits the packet-to-be-ACKed and the driver should detect it as
|
|
+ * duplicate/already handled. Note that the driver should still send an ACK
|
|
+ * for the re-transmitted packet.
|
|
+ */
|
|
+static noinline bool ssh_ptl_should_drop_ack_packet(void)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_ack_packet, TRUE);
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_should_drop_nak_packet() - Error injection hook to drop NAK packets.
|
|
+ *
|
|
+ * Useful to test/force automated (timeout-based) re-transmit by the EC.
|
|
+ * Specifically, packets that have not reached the driver completely/with valid
|
|
+ * checksums. Only useful in combination with receival of (injected) bad data.
|
|
+ */
|
|
+static noinline bool ssh_ptl_should_drop_nak_packet(void)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_nak_packet, TRUE);
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_should_drop_dsq_packet() - Error injection hook to drop sequenced
|
|
+ * data packet.
|
|
+ *
|
|
+ * Useful to test re-transmit timeout of the driver. If the data packet has not
|
|
+ * been ACKed after a certain time, the driver should re-transmit the packet up
|
|
+ * to limited number of times defined in SSH_PTL_MAX_PACKET_TRIES.
|
|
+ */
|
|
+static noinline bool ssh_ptl_should_drop_dsq_packet(void)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_dsq_packet, TRUE);
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_should_fail_write() - Error injection hook to make
|
|
+ * serdev_device_write() fail.
|
|
+ *
|
|
+ * Hook to simulate errors in serdev_device_write when transmitting packets.
|
|
+ */
|
|
+static noinline int ssh_ptl_should_fail_write(void)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+ALLOW_ERROR_INJECTION(ssh_ptl_should_fail_write, ERRNO);
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_should_corrupt_tx_data() - Error injection hook to simulate invalid
|
|
+ * data being sent to the EC.
|
|
+ *
|
|
+ * Hook to simulate corrupt/invalid data being sent from host (driver) to EC.
|
|
+ * Causes the packet data to be actively corrupted by overwriting it with
|
|
+ * pre-defined values, such that it becomes invalid, causing the EC to respond
|
|
+ * with a NAK packet. Useful to test handling of NAK packets received by the
|
|
+ * driver.
|
|
+ */
|
|
+static noinline bool ssh_ptl_should_corrupt_tx_data(void)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_tx_data, TRUE);
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_should_corrupt_rx_syn() - Error injection hook to simulate invalid
|
|
+ * data being sent by the EC.
|
|
+ *
|
|
+ * Hook to simulate invalid SYN bytes, i.e. an invalid start of messages and
|
|
+ * test handling thereof in the driver.
|
|
+ */
|
|
+static noinline bool ssh_ptl_should_corrupt_rx_syn(void)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_syn, TRUE);
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_should_corrupt_rx_data() - Error injection hook to simulate invalid
|
|
+ * data being sent by the EC.
|
|
+ *
|
|
+ * Hook to simulate invalid data/checksum of the message frame and test handling
|
|
+ * thereof in the driver.
|
|
+ */
|
|
+static noinline bool ssh_ptl_should_corrupt_rx_data(void)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_data, TRUE);
|
|
+
|
|
+static bool __ssh_ptl_should_drop_ack_packet(struct ssh_packet *packet)
|
|
+{
|
|
+ if (likely(!ssh_ptl_should_drop_ack_packet()))
|
|
+ return false;
|
|
+
|
|
+ trace_ssam_ei_tx_drop_ack_packet(packet);
|
|
+ ptl_info(packet->ptl, "packet error injection: dropping ACK packet %p\n",
|
|
+ packet);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool __ssh_ptl_should_drop_nak_packet(struct ssh_packet *packet)
|
|
+{
|
|
+ if (likely(!ssh_ptl_should_drop_nak_packet()))
|
|
+ return false;
|
|
+
|
|
+ trace_ssam_ei_tx_drop_nak_packet(packet);
|
|
+ ptl_info(packet->ptl, "packet error injection: dropping NAK packet %p\n",
|
|
+ packet);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool __ssh_ptl_should_drop_dsq_packet(struct ssh_packet *packet)
|
|
+{
|
|
+ if (likely(!ssh_ptl_should_drop_dsq_packet()))
|
|
+ return false;
|
|
+
|
|
+ trace_ssam_ei_tx_drop_dsq_packet(packet);
|
|
+ ptl_info(packet->ptl,
|
|
+ "packet error injection: dropping sequenced data packet %p\n",
|
|
+ packet);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
|
|
+{
|
|
+ /* Ignore packets that don't carry any data (i.e. flush). */
|
|
+ if (!packet->data.ptr || !packet->data.len)
|
|
+ return false;
|
|
+
|
|
+ switch (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)]) {
|
|
+ case SSH_FRAME_TYPE_ACK:
|
|
+ return __ssh_ptl_should_drop_ack_packet(packet);
|
|
+
|
|
+ case SSH_FRAME_TYPE_NAK:
|
|
+ return __ssh_ptl_should_drop_nak_packet(packet);
|
|
+
|
|
+ case SSH_FRAME_TYPE_DATA_SEQ:
|
|
+ return __ssh_ptl_should_drop_dsq_packet(packet);
|
|
+
|
|
+ default:
|
|
+ return false;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int ssh_ptl_write_buf(struct ssh_ptl *ptl, struct ssh_packet *packet,
|
|
+ const unsigned char *buf, size_t count)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = ssh_ptl_should_fail_write();
|
|
+ if (unlikely(status)) {
|
|
+ trace_ssam_ei_tx_fail_write(packet, status);
|
|
+ ptl_info(packet->ptl,
|
|
+ "packet error injection: simulating transmit error %d, packet %p\n",
|
|
+ status, packet);
|
|
+
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ return serdev_device_write_buf(ptl->serdev, buf, count);
|
|
+}
|
|
+
|
|
+static void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
|
|
+{
|
|
+ /* Ignore packets that don't carry any data (i.e. flush). */
|
|
+ if (!packet->data.ptr || !packet->data.len)
|
|
+ return;
|
|
+
|
|
+ /* Only allow sequenced data packets to be modified. */
|
|
+ if (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)] != SSH_FRAME_TYPE_DATA_SEQ)
|
|
+ return;
|
|
+
|
|
+ if (likely(!ssh_ptl_should_corrupt_tx_data()))
|
|
+ return;
|
|
+
|
|
+ trace_ssam_ei_tx_corrupt_data(packet);
|
|
+ ptl_info(packet->ptl,
|
|
+ "packet error injection: simulating invalid transmit data on packet %p\n",
|
|
+ packet);
|
|
+
|
|
+ /*
|
|
+ * NB: The value 0xb3 has been chosen more or less randomly so that it
|
|
+ * doesn't have any (major) overlap with the SYN bytes (aa 55) and is
|
|
+ * non-trivial (i.e. non-zero, non-0xff).
|
|
+ */
|
|
+ memset(packet->data.ptr, 0xb3, packet->data.len);
|
|
+}
|
|
+
|
|
+static void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
|
|
+ struct ssam_span *data)
|
|
+{
|
|
+ struct ssam_span frame;
|
|
+
|
|
+ /* Check if there actually is something to corrupt. */
|
|
+ if (!sshp_find_syn(data, &frame))
|
|
+ return;
|
|
+
|
|
+ if (likely(!ssh_ptl_should_corrupt_rx_syn()))
|
|
+ return;
|
|
+
|
|
+ trace_ssam_ei_rx_corrupt_syn(data->len);
|
|
+
|
|
+ data->ptr[1] = 0xb3; /* Set second byte of SYN to "random" value. */
|
|
+}
|
|
+
|
|
+static void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
|
|
+ struct ssam_span *frame)
|
|
+{
|
|
+ size_t payload_len, message_len;
|
|
+ struct ssh_frame *sshf;
|
|
+
|
|
+ /* Ignore incomplete messages, will get handled once it's complete. */
|
|
+ if (frame->len < SSH_MESSAGE_LENGTH(0))
|
|
+ return;
|
|
+
|
|
+ /* Ignore incomplete messages, part 2. */
|
|
+ payload_len = get_unaligned_le16(&frame->ptr[SSH_MSGOFFSET_FRAME(len)]);
|
|
+ message_len = SSH_MESSAGE_LENGTH(payload_len);
|
|
+ if (frame->len < message_len)
|
|
+ return;
|
|
+
|
|
+ if (likely(!ssh_ptl_should_corrupt_rx_data()))
|
|
+ return;
|
|
+
|
|
+ sshf = (struct ssh_frame *)&frame->ptr[SSH_MSGOFFSET_FRAME(type)];
|
|
+ trace_ssam_ei_rx_corrupt_data(sshf);
|
|
+
|
|
+ /*
|
|
+ * Flip bits in first byte of payload checksum. This is basically
|
|
+ * equivalent to a payload/frame data error without us having to worry
|
|
+ * about (the, arguably pretty small, probability of) accidental
|
|
+ * checksum collisions.
|
|
+ */
|
|
+ frame->ptr[frame->len - 2] = ~frame->ptr[frame->len - 2];
|
|
+}
|
|
+
|
|
+#else /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */
|
|
+
|
|
+static inline bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static inline int ssh_ptl_write_buf(struct ssh_ptl *ptl,
|
|
+ struct ssh_packet *packet,
|
|
+ const unsigned char *buf,
|
|
+ size_t count)
|
|
+{
|
|
+ return serdev_device_write_buf(ptl->serdev, buf, count);
|
|
+}
|
|
+
|
|
+static inline void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
|
|
+{
|
|
+}
|
|
+
|
|
+static inline void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
|
|
+ struct ssam_span *data)
|
|
+{
|
|
+}
|
|
+
|
|
+static inline void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
|
|
+ struct ssam_span *frame)
|
|
+{
|
|
+}
|
|
+
|
|
+#endif /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */
|
|
+
|
|
+static void __ssh_ptl_packet_release(struct kref *kref)
|
|
+{
|
|
+ struct ssh_packet *p = container_of(kref, struct ssh_packet, refcnt);
|
|
+
|
|
+ trace_ssam_packet_release(p);
|
|
+
|
|
+ ptl_dbg_cond(p->ptl, "ptl: releasing packet %p\n", p);
|
|
+ p->ops->release(p);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_packet_get() - Increment reference count of packet.
|
|
+ * @packet: The packet to increment the reference count of.
|
|
+ *
|
|
+ * Increments the reference count of the given packet. See ssh_packet_put()
|
|
+ * for the counter-part of this function.
|
|
+ *
|
|
+ * Return: Returns the packet provided as input.
|
|
+ */
|
|
+struct ssh_packet *ssh_packet_get(struct ssh_packet *packet)
|
|
+{
|
|
+ if (packet)
|
|
+ kref_get(&packet->refcnt);
|
|
+ return packet;
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssh_packet_get);
|
|
+
|
|
+/**
|
|
+ * ssh_packet_put() - Decrement reference count of packet.
|
|
+ * @packet: The packet to decrement the reference count of.
|
|
+ *
|
|
+ * If the reference count reaches zero, the ``release`` callback specified in
|
|
+ * the packet's &struct ssh_packet_ops, i.e. ``packet->ops->release``, will be
|
|
+ * called.
|
|
+ *
|
|
+ * See ssh_packet_get() for the counter-part of this function.
|
|
+ */
|
|
+void ssh_packet_put(struct ssh_packet *packet)
|
|
+{
|
|
+ if (packet)
|
|
+ kref_put(&packet->refcnt, __ssh_ptl_packet_release);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(ssh_packet_put);
|
|
+
|
|
+static u8 ssh_packet_get_seq(struct ssh_packet *packet)
|
|
+{
|
|
+ return packet->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_packet_init() - Initialize SSH packet.
|
|
+ * @packet: The packet to initialize.
|
|
+ * @type: Type-flags of the packet.
|
|
+ * @priority: Priority of the packet. See SSH_PACKET_PRIORITY() for details.
|
|
+ * @ops: Packet operations.
|
|
+ *
|
|
+ * Initializes the given SSH packet. Sets the transmission buffer pointer to
|
|
+ * %NULL and the transmission buffer length to zero. For data-type packets,
|
|
+ * this buffer has to be set separately via ssh_packet_set_data() before
|
|
+ * submission, and must contain a valid SSH message, i.e. frame with optional
|
|
+ * payload of any type.
|
|
+ */
|
|
+void ssh_packet_init(struct ssh_packet *packet, unsigned long type,
|
|
+ u8 priority, const struct ssh_packet_ops *ops)
|
|
+{
|
|
+ kref_init(&packet->refcnt);
|
|
+
|
|
+ packet->ptl = NULL;
|
|
+ INIT_LIST_HEAD(&packet->queue_node);
|
|
+ INIT_LIST_HEAD(&packet->pending_node);
|
|
+
|
|
+ packet->state = type & SSH_PACKET_FLAGS_TY_MASK;
|
|
+ packet->priority = priority;
|
|
+ packet->timestamp = KTIME_MAX;
|
|
+
|
|
+ packet->data.ptr = NULL;
|
|
+ packet->data.len = 0;
|
|
+
|
|
+ packet->ops = ops;
|
|
+}
|
|
+
|
|
+static struct kmem_cache *ssh_ctrl_packet_cache;
|
|
+
|
|
+/**
|
|
+ * ssh_ctrl_packet_cache_init() - Initialize the control packet cache.
|
|
+ */
|
|
+int ssh_ctrl_packet_cache_init(void)
|
|
+{
|
|
+ const unsigned int size = sizeof(struct ssh_packet) + SSH_MSG_LEN_CTRL;
|
|
+ const unsigned int align = __alignof__(struct ssh_packet);
|
|
+ struct kmem_cache *cache;
|
|
+
|
|
+ cache = kmem_cache_create("ssam_ctrl_packet", size, align, 0, NULL);
|
|
+ if (!cache)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ ssh_ctrl_packet_cache = cache;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_ctrl_packet_cache_destroy() - Deinitialize the control packet cache.
|
|
+ */
|
|
+void ssh_ctrl_packet_cache_destroy(void)
|
|
+{
|
|
+ kmem_cache_destroy(ssh_ctrl_packet_cache);
|
|
+ ssh_ctrl_packet_cache = NULL;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_ctrl_packet_alloc() - Allocate packet from control packet cache.
|
|
+ * @packet: Where the pointer to the newly allocated packet should be stored.
|
|
+ * @buffer: The buffer corresponding to this packet.
|
|
+ * @flags: Flags used for allocation.
|
|
+ *
|
|
+ * Allocates a packet and corresponding transport buffer from the control
|
|
+ * packet cache. Sets the packet's buffer reference to the allocated buffer.
|
|
+ * The packet must be freed via ssh_ctrl_packet_free(), which will also free
|
|
+ * the corresponding buffer. The corresponding buffer must not be freed
|
|
+ * separately. Intended to be used with %ssh_ptl_ctrl_packet_ops as packet
|
|
+ * operations.
|
|
+ *
|
|
+ * Return: Returns zero on success, %-ENOMEM if the allocation failed.
|
|
+ */
|
|
+static int ssh_ctrl_packet_alloc(struct ssh_packet **packet,
|
|
+ struct ssam_span *buffer, gfp_t flags)
|
|
+{
|
|
+ *packet = kmem_cache_alloc(ssh_ctrl_packet_cache, flags);
|
|
+ if (!*packet)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ buffer->ptr = (u8 *)(*packet + 1);
|
|
+ buffer->len = SSH_MSG_LEN_CTRL;
|
|
+
|
|
+ trace_ssam_ctrl_packet_alloc(*packet, buffer->len);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_ctrl_packet_free() - Free packet allocated from control packet cache.
|
|
+ * @p: The packet to free.
|
|
+ */
|
|
+static void ssh_ctrl_packet_free(struct ssh_packet *p)
|
|
+{
|
|
+ trace_ssam_ctrl_packet_free(p);
|
|
+ kmem_cache_free(ssh_ctrl_packet_cache, p);
|
|
+}
|
|
+
|
|
+static const struct ssh_packet_ops ssh_ptl_ctrl_packet_ops = {
|
|
+ .complete = NULL,
|
|
+ .release = ssh_ctrl_packet_free,
|
|
+};
|
|
+
|
|
+static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl *ptl, ktime_t now,
|
|
+ ktime_t expires)
|
|
+{
|
|
+ unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
|
|
+ ktime_t aexp = ktime_add(expires, SSH_PTL_PACKET_TIMEOUT_RESOLUTION);
|
|
+
|
|
+ spin_lock(&ptl->rtx_timeout.lock);
|
|
+
|
|
+ /* Re-adjust / schedule reaper only if it is above resolution delta. */
|
|
+ if (ktime_before(aexp, ptl->rtx_timeout.expires)) {
|
|
+ ptl->rtx_timeout.expires = expires;
|
|
+ mod_delayed_work(system_wq, &ptl->rtx_timeout.reaper, delta);
|
|
+ }
|
|
+
|
|
+ spin_unlock(&ptl->rtx_timeout.lock);
|
|
+}
|
|
+
|
|
+/* Must be called with queue lock held. */
|
|
+static void ssh_packet_next_try(struct ssh_packet *p)
|
|
+{
|
|
+ u8 base = ssh_packet_priority_get_base(p->priority);
|
|
+ u8 try = ssh_packet_priority_get_try(p->priority);
|
|
+
|
|
+ lockdep_assert_held(&p->ptl->queue.lock);
|
|
+
|
|
+ /*
|
|
+ * Ensure that we write the priority in one go via WRITE_ONCE() so we
|
|
+ * can access it via READ_ONCE() for tracing. Note that other access
|
|
+ * is guarded by the queue lock, so no need to use READ_ONCE() there.
|
|
+ */
|
|
+ WRITE_ONCE(p->priority, __SSH_PACKET_PRIORITY(base, try + 1));
|
|
+}
|
|
+
|
|
+/* Must be called with queue lock held. */
|
|
+static struct list_head *__ssh_ptl_queue_find_entrypoint(struct ssh_packet *p)
|
|
+{
|
|
+ struct list_head *head;
|
|
+ struct ssh_packet *q;
|
|
+
|
|
+ lockdep_assert_held(&p->ptl->queue.lock);
|
|
+
|
|
+ /*
|
|
+ * We generally assume that there are less control (ACK/NAK) packets
|
|
+ * and re-submitted data packets as there are normal data packets (at
|
|
+ * least in situations in which many packets are queued; if there
|
|
+ * aren't many packets queued the decision on how to iterate should be
|
|
+ * basically irrelevant; the number of control/data packets is more or
|
|
+ * less limited via the maximum number of pending packets). Thus, when
|
|
+ * inserting a control or re-submitted data packet, (determined by
|
|
+ * their priority), we search from front to back. Normal data packets
|
|
+ * are, usually queued directly at the tail of the queue, so for those
|
|
+ * search from back to front.
|
|
+ */
|
|
+
|
|
+ if (p->priority > SSH_PACKET_PRIORITY(DATA, 0)) {
|
|
+ list_for_each(head, &p->ptl->queue.head) {
|
|
+ q = list_entry(head, struct ssh_packet, queue_node);
|
|
+
|
|
+ if (q->priority < p->priority)
|
|
+ break;
|
|
+ }
|
|
+ } else {
|
|
+ list_for_each_prev(head, &p->ptl->queue.head) {
|
|
+ q = list_entry(head, struct ssh_packet, queue_node);
|
|
+
|
|
+ if (q->priority >= p->priority) {
|
|
+ head = head->next;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return head;
|
|
+}
|
|
+
|
|
+/* Must be called with queue lock held. */
|
|
+static int __ssh_ptl_queue_push(struct ssh_packet *packet)
|
|
+{
|
|
+ struct ssh_ptl *ptl = packet->ptl;
|
|
+ struct list_head *head;
|
|
+
|
|
+ lockdep_assert_held(&ptl->queue.lock);
|
|
+
|
|
+ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
|
|
+ return -ESHUTDOWN;
|
|
+
|
|
+ /* Avoid further transitions when canceling/completing. */
|
|
+ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state))
|
|
+ return -EINVAL;
|
|
+
|
|
+ /* If this packet has already been queued, do not add it. */
|
|
+ if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state))
|
|
+ return -EALREADY;
|
|
+
|
|
+ head = __ssh_ptl_queue_find_entrypoint(packet);
|
|
+
|
|
+ list_add_tail(&ssh_packet_get(packet)->queue_node, head);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ssh_ptl_queue_push(struct ssh_packet *packet)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ spin_lock(&packet->ptl->queue.lock);
|
|
+ status = __ssh_ptl_queue_push(packet);
|
|
+ spin_unlock(&packet->ptl->queue.lock);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static void ssh_ptl_queue_remove(struct ssh_packet *packet)
|
|
+{
|
|
+ struct ssh_ptl *ptl = packet->ptl;
|
|
+
|
|
+ spin_lock(&ptl->queue.lock);
|
|
+
|
|
+ if (!test_and_clear_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) {
|
|
+ spin_unlock(&ptl->queue.lock);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ list_del(&packet->queue_node);
|
|
+
|
|
+ spin_unlock(&ptl->queue.lock);
|
|
+ ssh_packet_put(packet);
|
|
+}
|
|
+
|
|
+static void ssh_ptl_pending_push(struct ssh_packet *p)
|
|
+{
|
|
+ struct ssh_ptl *ptl = p->ptl;
|
|
+ const ktime_t timestamp = ktime_get_coarse_boottime();
|
|
+ const ktime_t timeout = ptl->rtx_timeout.timeout;
|
|
+
|
|
+ /*
|
|
+ * Note: We can get the time for the timestamp before acquiring the
|
|
+ * lock as this is the only place we're setting it and this function
|
|
+ * is called only from the transmitter thread. Thus it is not possible
|
|
+ * to overwrite the timestamp with an outdated value below.
|
|
+ */
|
|
+
|
|
+ spin_lock(&ptl->pending.lock);
|
|
+
|
|
+ /* If we are canceling/completing this packet, do not add it. */
|
|
+ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) {
|
|
+ spin_unlock(&ptl->pending.lock);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * On re-submission, the packet has already been added the pending
|
|
+ * set. We still need to update the timestamp as the packet timeout is
|
|
+ * reset for each (re-)submission.
|
|
+ */
|
|
+ p->timestamp = timestamp;
|
|
+
|
|
+ /* In case it is already pending (e.g. re-submission), do not add it. */
|
|
+ if (!test_and_set_bit(SSH_PACKET_SF_PENDING_BIT, &p->state)) {
|
|
+ atomic_inc(&ptl->pending.count);
|
|
+ list_add_tail(&ssh_packet_get(p)->pending_node, &ptl->pending.head);
|
|
+ }
|
|
+
|
|
+ spin_unlock(&ptl->pending.lock);
|
|
+
|
|
+ /* Arm/update timeout reaper. */
|
|
+ ssh_ptl_timeout_reaper_mod(ptl, timestamp, timestamp + timeout);
|
|
+}
|
|
+
|
|
+static void ssh_ptl_pending_remove(struct ssh_packet *packet)
|
|
+{
|
|
+ struct ssh_ptl *ptl = packet->ptl;
|
|
+
|
|
+ spin_lock(&ptl->pending.lock);
|
|
+
|
|
+ if (!test_and_clear_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) {
|
|
+ spin_unlock(&ptl->pending.lock);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ list_del(&packet->pending_node);
|
|
+ atomic_dec(&ptl->pending.count);
|
|
+
|
|
+ spin_unlock(&ptl->pending.lock);
|
|
+
|
|
+ ssh_packet_put(packet);
|
|
+}
|
|
+
|
|
+/* Warning: Does not check/set "completed" bit. */
|
|
+static void __ssh_ptl_complete(struct ssh_packet *p, int status)
|
|
+{
|
|
+ struct ssh_ptl *ptl = READ_ONCE(p->ptl);
|
|
+
|
|
+ trace_ssam_packet_complete(p, status);
|
|
+ ptl_dbg_cond(ptl, "ptl: completing packet %p (status: %d)\n", p, status);
|
|
+
|
|
+ if (p->ops->complete)
|
|
+ p->ops->complete(p, status);
|
|
+}
|
|
+
|
|
+static void ssh_ptl_remove_and_complete(struct ssh_packet *p, int status)
|
|
+{
|
|
+ /*
|
|
+ * A call to this function should in general be preceded by
|
|
+ * set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->flags) to avoid re-adding the
|
|
+ * packet to the structures it's going to be removed from.
|
|
+ *
|
|
+ * The set_bit call does not need explicit memory barriers as the
|
|
+ * implicit barrier of the test_and_set_bit() call below ensure that the
|
|
+ * flag is visible before we actually attempt to remove the packet.
|
|
+ */
|
|
+
|
|
+ if (test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
|
|
+ return;
|
|
+
|
|
+ ssh_ptl_queue_remove(p);
|
|
+ ssh_ptl_pending_remove(p);
|
|
+
|
|
+ __ssh_ptl_complete(p, status);
|
|
+}
|
|
+
|
|
+static bool ssh_ptl_tx_can_process(struct ssh_packet *packet)
|
|
+{
|
|
+ struct ssh_ptl *ptl = packet->ptl;
|
|
+
|
|
+ if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &packet->state))
|
|
+ return !atomic_read(&ptl->pending.count);
|
|
+
|
|
+ /* We can always process non-blocking packets. */
|
|
+ if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &packet->state))
|
|
+ return true;
|
|
+
|
|
+ /* If we are already waiting for this packet, send it again. */
|
|
+ if (test_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state))
|
|
+ return true;
|
|
+
|
|
+ /* Otherwise: Check if we have the capacity to send. */
|
|
+ return atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING;
|
|
+}
|
|
+
|
|
+static struct ssh_packet *ssh_ptl_tx_pop(struct ssh_ptl *ptl)
|
|
+{
|
|
+ struct ssh_packet *packet = ERR_PTR(-ENOENT);
|
|
+ struct ssh_packet *p, *n;
|
|
+
|
|
+ spin_lock(&ptl->queue.lock);
|
|
+ list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
|
|
+ /*
|
|
+ * If we are canceling or completing this packet, ignore it.
|
|
+ * It's going to be removed from this queue shortly.
|
|
+ */
|
|
+ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * Packets should be ordered non-blocking/to-be-resent first.
|
|
+ * If we cannot process this packet, assume that we can't
|
|
+ * process any following packet either and abort.
|
|
+ */
|
|
+ if (!ssh_ptl_tx_can_process(p)) {
|
|
+ packet = ERR_PTR(-EBUSY);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * We are allowed to change the state now. Remove it from the
|
|
+ * queue and mark it as being transmitted.
|
|
+ */
|
|
+
|
|
+ list_del(&p->queue_node);
|
|
+
|
|
+ set_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state);
|
|
+ /* Ensure that state never gets zero. */
|
|
+ smp_mb__before_atomic();
|
|
+ clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
|
|
+
|
|
+ /*
|
|
+ * Update number of tries. This directly influences the
|
|
+ * priority in case the packet is re-submitted (e.g. via
|
|
+ * timeout/NAK). Note that all reads and writes to the
|
|
+ * priority after the first submission are guarded by the
|
|
+ * queue lock.
|
|
+ */
|
|
+ ssh_packet_next_try(p);
|
|
+
|
|
+ packet = p;
|
|
+ break;
|
|
+ }
|
|
+ spin_unlock(&ptl->queue.lock);
|
|
+
|
|
+ return packet;
|
|
+}
|
|
+
|
|
+static struct ssh_packet *ssh_ptl_tx_next(struct ssh_ptl *ptl)
|
|
+{
|
|
+ struct ssh_packet *p;
|
|
+
|
|
+ p = ssh_ptl_tx_pop(ptl);
|
|
+ if (IS_ERR(p))
|
|
+ return p;
|
|
+
|
|
+ if (test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state)) {
|
|
+ ptl_dbg(ptl, "ptl: transmitting sequenced packet %p\n", p);
|
|
+ ssh_ptl_pending_push(p);
|
|
+ } else {
|
|
+ ptl_dbg(ptl, "ptl: transmitting non-sequenced packet %p\n", p);
|
|
+ }
|
|
+
|
|
+ return p;
|
|
+}
|
|
+
|
|
+static void ssh_ptl_tx_compl_success(struct ssh_packet *packet)
|
|
+{
|
|
+ struct ssh_ptl *ptl = packet->ptl;
|
|
+
|
|
+ ptl_dbg(ptl, "ptl: successfully transmitted packet %p\n", packet);
|
|
+
|
|
+ /* Transition state to "transmitted". */
|
|
+ set_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state);
|
|
+ /* Ensure that state never gets zero. */
|
|
+ smp_mb__before_atomic();
|
|
+ clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
|
|
+
|
|
+ /* If the packet is unsequenced, we're done: Lock and complete. */
|
|
+ if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &packet->state)) {
|
|
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
|
|
+ ssh_ptl_remove_and_complete(packet, 0);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Notify that a packet transmission has finished. In general we're only
|
|
+ * waiting for one packet (if any), so wake_up_all should be fine.
|
|
+ */
|
|
+ wake_up_all(&ptl->tx.packet_wq);
|
|
+}
|
|
+
|
|
+static void ssh_ptl_tx_compl_error(struct ssh_packet *packet, int status)
|
|
+{
|
|
+ /* Transmission failure: Lock the packet and try to complete it. */
|
|
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
|
|
+ /* Ensure that state never gets zero. */
|
|
+ smp_mb__before_atomic();
|
|
+ clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
|
|
+
|
|
+ ptl_err(packet->ptl, "ptl: transmission error: %d\n", status);
|
|
+ ptl_dbg(packet->ptl, "ptl: failed to transmit packet: %p\n", packet);
|
|
+
|
|
+ ssh_ptl_remove_and_complete(packet, status);
|
|
+
|
|
+ /*
|
|
+ * Notify that a packet transmission has finished. In general we're only
|
|
+ * waiting for one packet (if any), so wake_up_all should be fine.
|
|
+ */
|
|
+ wake_up_all(&packet->ptl->tx.packet_wq);
|
|
+}
|
|
+
|
|
+static long ssh_ptl_tx_wait_packet(struct ssh_ptl *ptl)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = wait_for_completion_interruptible(&ptl->tx.thread_cplt_pkt);
|
|
+ reinit_completion(&ptl->tx.thread_cplt_pkt);
|
|
+
|
|
+ /*
|
|
+ * Ensure completion is cleared before continuing to avoid lost update
|
|
+ * problems.
|
|
+ */
|
|
+ smp_mb__after_atomic();
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static long ssh_ptl_tx_wait_transfer(struct ssh_ptl *ptl, long timeout)
|
|
+{
|
|
+ long status;
|
|
+
|
|
+ status = wait_for_completion_interruptible_timeout(&ptl->tx.thread_cplt_tx,
|
|
+ timeout);
|
|
+ reinit_completion(&ptl->tx.thread_cplt_tx);
|
|
+
|
|
+ /*
|
|
+ * Ensure completion is cleared before continuing to avoid lost update
|
|
+ * problems.
|
|
+ */
|
|
+ smp_mb__after_atomic();
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static int ssh_ptl_tx_packet(struct ssh_ptl *ptl, struct ssh_packet *packet)
|
|
+{
|
|
+ long timeout = SSH_PTL_TX_TIMEOUT;
|
|
+ size_t offset = 0;
|
|
+
|
|
+ /* Note: Flush-packets don't have any data. */
|
|
+ if (unlikely(!packet->data.ptr))
|
|
+ return 0;
|
|
+
|
|
+ /* Error injection: drop packet to simulate transmission problem. */
|
|
+ if (ssh_ptl_should_drop_packet(packet))
|
|
+ return 0;
|
|
+
|
|
+ /* Error injection: simulate invalid packet data. */
|
|
+ ssh_ptl_tx_inject_invalid_data(packet);
|
|
+
|
|
+ ptl_dbg(ptl, "tx: sending data (length: %zu)\n", packet->data.len);
|
|
+ print_hex_dump_debug("tx: ", DUMP_PREFIX_OFFSET, 16, 1,
|
|
+ packet->data.ptr, packet->data.len, false);
|
|
+
|
|
+ do {
|
|
+ ssize_t status, len;
|
|
+ u8 *buf;
|
|
+
|
|
+ buf = packet->data.ptr + offset;
|
|
+ len = packet->data.len - offset;
|
|
+
|
|
+ status = ssh_ptl_write_buf(ptl, packet, buf, len);
|
|
+ if (status < 0)
|
|
+ return status;
|
|
+
|
|
+ if (status == len)
|
|
+ return 0;
|
|
+
|
|
+ offset += status;
|
|
+
|
|
+ timeout = ssh_ptl_tx_wait_transfer(ptl, timeout);
|
|
+ if (kthread_should_stop() || !atomic_read(&ptl->tx.running))
|
|
+ return -ESHUTDOWN;
|
|
+
|
|
+ if (timeout < 0)
|
|
+ return -EINTR;
|
|
+
|
|
+ if (timeout == 0)
|
|
+ return -ETIMEDOUT;
|
|
+ } while (true);
|
|
+}
|
|
+
|
|
+static int ssh_ptl_tx_threadfn(void *data)
|
|
+{
|
|
+ struct ssh_ptl *ptl = data;
|
|
+
|
|
+ while (!kthread_should_stop() && atomic_read(&ptl->tx.running)) {
|
|
+ struct ssh_packet *packet;
|
|
+ int status;
|
|
+
|
|
+ /* Try to get the next packet. */
|
|
+ packet = ssh_ptl_tx_next(ptl);
|
|
+
|
|
+ /* If no packet can be processed, we are done. */
|
|
+ if (IS_ERR(packet)) {
|
|
+ ssh_ptl_tx_wait_packet(ptl);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* Transfer and complete packet. */
|
|
+ status = ssh_ptl_tx_packet(ptl, packet);
|
|
+ if (status)
|
|
+ ssh_ptl_tx_compl_error(packet, status);
|
|
+ else
|
|
+ ssh_ptl_tx_compl_success(packet);
|
|
+
|
|
+ ssh_packet_put(packet);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_tx_wakeup_packet() - Wake up packet transmitter thread for new
|
|
+ * packet.
|
|
+ * @ptl: The packet transport layer.
|
|
+ *
|
|
+ * Wakes up the packet transmitter thread, notifying it that a new packet has
|
|
+ * arrived and is ready for transfer. If the packet transport layer has been
|
|
+ * shut down, calls to this function will be ignored.
|
|
+ */
|
|
+static void ssh_ptl_tx_wakeup_packet(struct ssh_ptl *ptl)
|
|
+{
|
|
+ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
|
|
+ return;
|
|
+
|
|
+ complete(&ptl->tx.thread_cplt_pkt);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_tx_start() - Start packet transmitter thread.
|
|
+ * @ptl: The packet transport layer.
|
|
+ *
|
|
+ * Return: Returns zero on success, a negative error code on failure.
|
|
+ */
|
|
+int ssh_ptl_tx_start(struct ssh_ptl *ptl)
|
|
+{
|
|
+ atomic_set_release(&ptl->tx.running, 1);
|
|
+
|
|
+ ptl->tx.thread = kthread_run(ssh_ptl_tx_threadfn, ptl, "ssam_serial_hub-tx");
|
|
+ if (IS_ERR(ptl->tx.thread))
|
|
+ return PTR_ERR(ptl->tx.thread);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_tx_stop() - Stop packet transmitter thread.
|
|
+ * @ptl: The packet transport layer.
|
|
+ *
|
|
+ * Return: Returns zero on success, a negative error code on failure.
|
|
+ */
|
|
+int ssh_ptl_tx_stop(struct ssh_ptl *ptl)
|
|
+{
|
|
+ int status = 0;
|
|
+
|
|
+ if (!IS_ERR_OR_NULL(ptl->tx.thread)) {
|
|
+ /* Tell thread to stop. */
|
|
+ atomic_set_release(&ptl->tx.running, 0);
|
|
+
|
|
+ /*
|
|
+ * Wake up thread in case it is paused. Do not use wakeup
|
|
+ * helpers as this may be called when the shutdown bit has
|
|
+ * already been set.
|
|
+ */
|
|
+ complete(&ptl->tx.thread_cplt_pkt);
|
|
+ complete(&ptl->tx.thread_cplt_tx);
|
|
+
|
|
+ /* Finally, wait for thread to stop. */
|
|
+ status = kthread_stop(ptl->tx.thread);
|
|
+ ptl->tx.thread = NULL;
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static struct ssh_packet *ssh_ptl_ack_pop(struct ssh_ptl *ptl, u8 seq_id)
|
|
+{
|
|
+ struct ssh_packet *packet = ERR_PTR(-ENOENT);
|
|
+ struct ssh_packet *p, *n;
|
|
+
|
|
+ spin_lock(&ptl->pending.lock);
|
|
+ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
|
|
+ /*
|
|
+ * We generally expect packets to be in order, so first packet
|
|
+ * to be added to pending is first to be sent, is first to be
|
|
+ * ACKed.
|
|
+ */
|
|
+ if (unlikely(ssh_packet_get_seq(p) != seq_id))
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * In case we receive an ACK while handling a transmission
|
|
+ * error completion. The packet will be removed shortly.
|
|
+ */
|
|
+ if (unlikely(test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
|
|
+ packet = ERR_PTR(-EPERM);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Mark the packet as ACKed and remove it from pending by
|
|
+ * removing its node and decrementing the pending counter.
|
|
+ */
|
|
+ set_bit(SSH_PACKET_SF_ACKED_BIT, &p->state);
|
|
+ /* Ensure that state never gets zero. */
|
|
+ smp_mb__before_atomic();
|
|
+ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
|
|
+
|
|
+ atomic_dec(&ptl->pending.count);
|
|
+ list_del(&p->pending_node);
|
|
+ packet = p;
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ spin_unlock(&ptl->pending.lock);
|
|
+
|
|
+ return packet;
|
|
+}
|
|
+
|
|
+static void ssh_ptl_wait_until_transmitted(struct ssh_packet *packet)
|
|
+{
|
|
+ wait_event(packet->ptl->tx.packet_wq,
|
|
+ test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state) ||
|
|
+ test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state));
|
|
+}
|
|
+
|
|
+static void ssh_ptl_acknowledge(struct ssh_ptl *ptl, u8 seq)
|
|
+{
|
|
+ struct ssh_packet *p;
|
|
+
|
|
+ p = ssh_ptl_ack_pop(ptl, seq);
|
|
+ if (IS_ERR(p)) {
|
|
+ if (PTR_ERR(p) == -ENOENT) {
|
|
+ /*
|
|
+ * The packet has not been found in the set of pending
|
|
+ * packets.
|
|
+ */
|
|
+ ptl_warn(ptl, "ptl: received ACK for non-pending packet\n");
|
|
+ } else {
|
|
+ /*
|
|
+ * The packet is pending, but we are not allowed to take
|
|
+ * it because it has been locked.
|
|
+ */
|
|
+ WARN_ON(PTR_ERR(p) != -EPERM);
|
|
+ }
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ ptl_dbg(ptl, "ptl: received ACK for packet %p\n", p);
|
|
+
|
|
+ /*
|
|
+ * It is possible that the packet has been transmitted, but the state
|
|
+ * has not been updated from "transmitting" to "transmitted" yet.
|
|
+ * In that case, we need to wait for this transition to occur in order
|
|
+ * to determine between success or failure.
|
|
+ *
|
|
+ * On transmission failure, the packet will be locked after this call.
|
|
+ * On success, the transmitted bit will be set.
|
|
+ */
|
|
+ ssh_ptl_wait_until_transmitted(p);
|
|
+
|
|
+ /*
|
|
+ * The packet will already be locked in case of a transmission error or
|
|
+ * cancellation. Let the transmitter or cancellation issuer complete the
|
|
+ * packet.
|
|
+ */
|
|
+ if (unlikely(test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
|
|
+ if (unlikely(!test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &p->state)))
|
|
+ ptl_err(ptl, "ptl: received ACK before packet had been fully transmitted\n");
|
|
+
|
|
+ ssh_packet_put(p);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ ssh_ptl_remove_and_complete(p, 0);
|
|
+ ssh_packet_put(p);
|
|
+
|
|
+ if (atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING)
|
|
+ ssh_ptl_tx_wakeup_packet(ptl);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_submit() - Submit a packet to the transport layer.
|
|
+ * @ptl: The packet transport layer to submit the packet to.
|
|
+ * @p: The packet to submit.
|
|
+ *
|
|
+ * Submits a new packet to the transport layer, queuing it to be sent. This
|
|
+ * function should not be used for re-submission.
|
|
+ *
|
|
+ * Return: Returns zero on success, %-EINVAL if a packet field is invalid or
|
|
+ * the packet has been canceled prior to submission, %-EALREADY if the packet
|
|
+ * has already been submitted, or %-ESHUTDOWN if the packet transport layer
|
|
+ * has been shut down.
|
|
+ */
|
|
+int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p)
|
|
+{
|
|
+ struct ssh_ptl *ptl_old;
|
|
+ int status;
|
|
+
|
|
+ trace_ssam_packet_submit(p);
|
|
+
|
|
+ /* Validate packet fields. */
|
|
+ if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &p->state)) {
|
|
+ if (p->data.ptr || test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state))
|
|
+ return -EINVAL;
|
|
+ } else if (!p->data.ptr) {
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * The ptl reference only gets set on or before the first submission.
|
|
+ * After the first submission, it has to be read-only.
|
|
+ *
|
|
+ * Note that ptl may already be set from upper-layer request
|
|
+ * submission, thus we cannot expect it to be NULL.
|
|
+ */
|
|
+ ptl_old = READ_ONCE(p->ptl);
|
|
+ if (!ptl_old)
|
|
+ WRITE_ONCE(p->ptl, ptl);
|
|
+ else if (WARN_ON(ptl_old != ptl))
|
|
+ return -EALREADY; /* Submitted on different PTL. */
|
|
+
|
|
+ status = ssh_ptl_queue_push(p);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &p->state) ||
|
|
+ (atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING))
|
|
+ ssh_ptl_tx_wakeup_packet(ptl);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * __ssh_ptl_resubmit() - Re-submit a packet to the transport layer.
|
|
+ * @packet: The packet to re-submit.
|
|
+ *
|
|
+ * Re-submits the given packet: Checks if it can be re-submitted and queues it
|
|
+ * if it can, resetting the packet timestamp in the process. Must be called
|
|
+ * with the pending lock held.
|
|
+ *
|
|
+ * Return: Returns %-ECANCELED if the packet has exceeded its number of tries,
|
|
+ * %-EINVAL if the packet has been locked, %-EALREADY if the packet is already
|
|
+ * on the queue, and %-ESHUTDOWN if the transmission layer has been shut down.
|
|
+ */
|
|
+static int __ssh_ptl_resubmit(struct ssh_packet *packet)
|
|
+{
|
|
+ int status;
|
|
+ u8 try;
|
|
+
|
|
+ lockdep_assert_held(&packet->ptl->pending.lock);
|
|
+
|
|
+ trace_ssam_packet_resubmit(packet);
|
|
+
|
|
+ spin_lock(&packet->ptl->queue.lock);
|
|
+
|
|
+ /* Check if the packet is out of tries. */
|
|
+ try = ssh_packet_priority_get_try(packet->priority);
|
|
+ if (try >= SSH_PTL_MAX_PACKET_TRIES) {
|
|
+ spin_unlock(&packet->ptl->queue.lock);
|
|
+ return -ECANCELED;
|
|
+ }
|
|
+
|
|
+ status = __ssh_ptl_queue_push(packet);
|
|
+ if (status) {
|
|
+ /*
|
|
+ * An error here indicates that the packet has either already
|
|
+ * been queued, been locked, or the transport layer is being
|
|
+ * shut down. In all cases: Ignore the error.
|
|
+ */
|
|
+ spin_unlock(&packet->ptl->queue.lock);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ packet->timestamp = KTIME_MAX;
|
|
+
|
|
+ spin_unlock(&packet->ptl->queue.lock);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void ssh_ptl_resubmit_pending(struct ssh_ptl *ptl)
|
|
+{
|
|
+ struct ssh_packet *p;
|
|
+ bool resub = false;
|
|
+
|
|
+ /*
|
|
+ * Note: We deliberately do not remove/attempt to cancel and complete
|
|
+ * packets that are out of tires in this function. The packet will be
|
|
+ * eventually canceled and completed by the timeout. Removing the packet
|
|
+ * here could lead to overly eager cancellation if the packet has not
|
|
+ * been re-transmitted yet but the tries-counter already updated (i.e
|
|
+ * ssh_ptl_tx_next() removed the packet from the queue and updated the
|
|
+ * counter, but re-transmission for the last try has not actually
|
|
+ * started yet).
|
|
+ */
|
|
+
|
|
+ spin_lock(&ptl->pending.lock);
|
|
+
|
|
+ /* Re-queue all pending packets. */
|
|
+ list_for_each_entry(p, &ptl->pending.head, pending_node) {
|
|
+ /*
|
|
+ * Re-submission fails if the packet is out of tries, has been
|
|
+ * locked, is already queued, or the layer is being shut down.
|
|
+ * No need to re-schedule tx-thread in those cases.
|
|
+ */
|
|
+ if (!__ssh_ptl_resubmit(p))
|
|
+ resub = true;
|
|
+ }
|
|
+
|
|
+ spin_unlock(&ptl->pending.lock);
|
|
+
|
|
+ if (resub)
|
|
+ ssh_ptl_tx_wakeup_packet(ptl);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_cancel() - Cancel a packet.
|
|
+ * @p: The packet to cancel.
|
|
+ *
|
|
+ * Cancels a packet. There are no guarantees on when completion and release
|
|
+ * callbacks will be called. This may occur during execution of this function
|
|
+ * or may occur at any point later.
|
|
+ *
|
|
+ * Note that it is not guaranteed that the packet will actually be canceled if
|
|
+ * the packet is concurrently completed by another process. The only guarantee
|
|
+ * of this function is that the packet will be completed (with success,
|
|
+ * failure, or cancellation) and released from the transport layer in a
|
|
+ * reasonable time-frame.
|
|
+ *
|
|
+ * May be called before the packet has been submitted, in which case any later
|
|
+ * packet submission fails.
|
|
+ */
|
|
+void ssh_ptl_cancel(struct ssh_packet *p)
|
|
+{
|
|
+ if (test_and_set_bit(SSH_PACKET_SF_CANCELED_BIT, &p->state))
|
|
+ return;
|
|
+
|
|
+ trace_ssam_packet_cancel(p);
|
|
+
|
|
+ /*
|
|
+ * Lock packet and commit with memory barrier. If this packet has
|
|
+ * already been locked, it's going to be removed and completed by
|
|
+ * another party, which should have precedence.
|
|
+ */
|
|
+ if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * By marking the packet as locked and employing the implicit memory
|
|
+ * barrier of test_and_set_bit, we have guaranteed that, at this point,
|
|
+ * the packet cannot be added to the queue any more.
|
|
+ *
|
|
+ * In case the packet has never been submitted, packet->ptl is NULL. If
|
|
+ * the packet is currently being submitted, packet->ptl may be NULL or
|
|
+ * non-NULL. Due marking the packet as locked above and committing with
|
|
+ * the memory barrier, we have guaranteed that, if packet->ptl is NULL,
|
|
+ * the packet will never be added to the queue. If packet->ptl is
|
|
+ * non-NULL, we don't have any guarantees.
|
|
+ */
|
|
+
|
|
+ if (READ_ONCE(p->ptl)) {
|
|
+ ssh_ptl_remove_and_complete(p, -ECANCELED);
|
|
+
|
|
+ if (atomic_read(&p->ptl->pending.count) < SSH_PTL_MAX_PENDING)
|
|
+ ssh_ptl_tx_wakeup_packet(p->ptl);
|
|
+
|
|
+ } else if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
|
|
+ __ssh_ptl_complete(p, -ECANCELED);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Must be called with pending lock held */
|
|
+static ktime_t ssh_packet_get_expiration(struct ssh_packet *p, ktime_t timeout)
|
|
+{
|
|
+ lockdep_assert_held(&p->ptl->pending.lock);
|
|
+
|
|
+ if (p->timestamp != KTIME_MAX)
|
|
+ return ktime_add(p->timestamp, timeout);
|
|
+ else
|
|
+ return KTIME_MAX;
|
|
+}
|
|
+
|
|
+static void ssh_ptl_timeout_reap(struct work_struct *work)
|
|
+{
|
|
+ struct ssh_ptl *ptl = to_ssh_ptl(work, rtx_timeout.reaper.work);
|
|
+ struct ssh_packet *p, *n;
|
|
+ LIST_HEAD(claimed);
|
|
+ ktime_t now = ktime_get_coarse_boottime();
|
|
+ ktime_t timeout = ptl->rtx_timeout.timeout;
|
|
+ ktime_t next = KTIME_MAX;
|
|
+ bool resub = false;
|
|
+ int status;
|
|
+
|
|
+ trace_ssam_ptl_timeout_reap(atomic_read(&ptl->pending.count));
|
|
+
|
|
+ /*
|
|
+ * Mark reaper as "not pending". This is done before checking any
|
|
+ * packets to avoid lost-update type problems.
|
|
+ */
|
|
+ spin_lock(&ptl->rtx_timeout.lock);
|
|
+ ptl->rtx_timeout.expires = KTIME_MAX;
|
|
+ spin_unlock(&ptl->rtx_timeout.lock);
|
|
+
|
|
+ spin_lock(&ptl->pending.lock);
|
|
+
|
|
+ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
|
|
+ ktime_t expires = ssh_packet_get_expiration(p, timeout);
|
|
+
|
|
+ /*
|
|
+ * Check if the timeout hasn't expired yet. Find out next
|
|
+ * expiration date to be handled after this run.
|
|
+ */
|
|
+ if (ktime_after(expires, now)) {
|
|
+ next = ktime_before(expires, next) ? expires : next;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ trace_ssam_packet_timeout(p);
|
|
+
|
|
+ status = __ssh_ptl_resubmit(p);
|
|
+
|
|
+ /*
|
|
+ * Re-submission fails if the packet is out of tries, has been
|
|
+ * locked, is already queued, or the layer is being shut down.
|
|
+ * No need to re-schedule tx-thread in those cases.
|
|
+ */
|
|
+ if (!status)
|
|
+ resub = true;
|
|
+
|
|
+ /* Go to next packet if this packet is not out of tries. */
|
|
+ if (status != -ECANCELED)
|
|
+ continue;
|
|
+
|
|
+ /* No more tries left: Cancel the packet. */
|
|
+
|
|
+ /*
|
|
+ * If someone else has locked the packet already, don't use it
|
|
+ * and let the other party complete it.
|
|
+ */
|
|
+ if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * We have now marked the packet as locked. Thus it cannot be
|
|
+ * added to the pending list again after we've removed it here.
|
|
+ * We can therefore re-use the pending_node of this packet
|
|
+ * temporarily.
|
|
+ */
|
|
+
|
|
+ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
|
|
+
|
|
+ atomic_dec(&ptl->pending.count);
|
|
+ list_del(&p->pending_node);
|
|
+
|
|
+ list_add_tail(&p->pending_node, &claimed);
|
|
+ }
|
|
+
|
|
+ spin_unlock(&ptl->pending.lock);
|
|
+
|
|
+ /* Cancel and complete the packet. */
|
|
+ list_for_each_entry_safe(p, n, &claimed, pending_node) {
|
|
+ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
|
|
+ ssh_ptl_queue_remove(p);
|
|
+ __ssh_ptl_complete(p, -ETIMEDOUT);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Drop the reference we've obtained by removing it from
|
|
+ * the pending set.
|
|
+ */
|
|
+ list_del(&p->pending_node);
|
|
+ ssh_packet_put(p);
|
|
+ }
|
|
+
|
|
+ /* Ensure that reaper doesn't run again immediately. */
|
|
+ next = max(next, ktime_add(now, SSH_PTL_PACKET_TIMEOUT_RESOLUTION));
|
|
+ if (next != KTIME_MAX)
|
|
+ ssh_ptl_timeout_reaper_mod(ptl, now, next);
|
|
+
|
|
+ if (resub)
|
|
+ ssh_ptl_tx_wakeup_packet(ptl);
|
|
+}
|
|
+
|
|
+static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, u8 seq)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ /*
|
|
+ * Check if SEQ has been seen recently (i.e. packet was
|
|
+ * re-transmitted and we should ignore it).
|
|
+ */
|
|
+ for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) {
|
|
+ if (likely(ptl->rx.blocked.seqs[i] != seq))
|
|
+ continue;
|
|
+
|
|
+ ptl_dbg(ptl, "ptl: ignoring repeated data packet\n");
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /* Update list of blocked sequence IDs. */
|
|
+ ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = seq;
|
|
+ ptl->rx.blocked.offset = (ptl->rx.blocked.offset + 1)
|
|
+ % ARRAY_SIZE(ptl->rx.blocked.seqs);
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static void ssh_ptl_rx_dataframe(struct ssh_ptl *ptl,
|
|
+ const struct ssh_frame *frame,
|
|
+ const struct ssam_span *payload)
|
|
+{
|
|
+ if (ssh_ptl_rx_retransmit_check(ptl, frame->seq))
|
|
+ return;
|
|
+
|
|
+ ptl->ops.data_received(ptl, payload);
|
|
+}
|
|
+
|
|
+static void ssh_ptl_send_ack(struct ssh_ptl *ptl, u8 seq)
|
|
+{
|
|
+ struct ssh_packet *packet;
|
|
+ struct ssam_span buf;
|
|
+ struct msgbuf msgb;
|
|
+ int status;
|
|
+
|
|
+ status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
|
|
+ if (status) {
|
|
+ ptl_err(ptl, "ptl: failed to allocate ACK packet\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ ssh_packet_init(packet, 0, SSH_PACKET_PRIORITY(ACK, 0),
|
|
+ &ssh_ptl_ctrl_packet_ops);
|
|
+
|
|
+ msgb_init(&msgb, buf.ptr, buf.len);
|
|
+ msgb_push_ack(&msgb, seq);
|
|
+ ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
|
|
+
|
|
+ ssh_ptl_submit(ptl, packet);
|
|
+ ssh_packet_put(packet);
|
|
+}
|
|
+
|
|
+static void ssh_ptl_send_nak(struct ssh_ptl *ptl)
|
|
+{
|
|
+ struct ssh_packet *packet;
|
|
+ struct ssam_span buf;
|
|
+ struct msgbuf msgb;
|
|
+ int status;
|
|
+
|
|
+ status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
|
|
+ if (status) {
|
|
+ ptl_err(ptl, "ptl: failed to allocate NAK packet\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ ssh_packet_init(packet, 0, SSH_PACKET_PRIORITY(NAK, 0),
|
|
+ &ssh_ptl_ctrl_packet_ops);
|
|
+
|
|
+ msgb_init(&msgb, buf.ptr, buf.len);
|
|
+ msgb_push_nak(&msgb);
|
|
+ ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
|
|
+
|
|
+ ssh_ptl_submit(ptl, packet);
|
|
+ ssh_packet_put(packet);
|
|
+}
|
|
+
|
|
+static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source)
|
|
+{
|
|
+ struct ssh_frame *frame;
|
|
+ struct ssam_span payload;
|
|
+ struct ssam_span aligned;
|
|
+ bool syn_found;
|
|
+ int status;
|
|
+
|
|
+ /* Error injection: Modify data to simulate corrupt SYN bytes. */
|
|
+ ssh_ptl_rx_inject_invalid_syn(ptl, source);
|
|
+
|
|
+ /* Find SYN. */
|
|
+ syn_found = sshp_find_syn(source, &aligned);
|
|
+
|
|
+ if (unlikely(aligned.ptr != source->ptr)) {
|
|
+ /*
|
|
+ * We expect aligned.ptr == source->ptr. If this is not the
|
|
+ * case, then aligned.ptr > source->ptr and we've encountered
|
|
+ * some unexpected data where we'd expect the start of a new
|
|
+ * message (i.e. the SYN sequence).
|
|
+ *
|
|
+ * This can happen when a CRC check for the previous message
|
|
+ * failed and we start actively searching for the next one
|
|
+ * (via the call to sshp_find_syn() above), or the first bytes
|
|
+ * of a message got dropped or corrupted.
|
|
+ *
|
|
+ * In any case, we issue a warning, send a NAK to the EC to
|
|
+ * request re-transmission of any data we haven't acknowledged
|
|
+ * yet, and finally, skip everything up to the next SYN
|
|
+ * sequence.
|
|
+ */
|
|
+
|
|
+ ptl_warn(ptl, "rx: parser: invalid start of frame, skipping\n");
|
|
+
|
|
+ /*
|
|
+ * Notes:
|
|
+ * - This might send multiple NAKs in case the communication
|
|
+ * starts with an invalid SYN and is broken down into multiple
|
|
+ * pieces. This should generally be handled fine, we just
|
|
+ * might receive duplicate data in this case, which is
|
|
+ * detected when handling data frames.
|
|
+ * - This path will also be executed on invalid CRCs: When an
|
|
+ * invalid CRC is encountered, the code below will skip data
|
|
+ * until directly after the SYN. This causes the search for
|
|
+ * the next SYN, which is generally not placed directly after
|
|
+ * the last one.
|
|
+ *
|
|
+ * Open question: Should we send this in case of invalid
|
|
+ * payload CRCs if the frame-type is non-sequential (current
|
|
+ * implementation) or should we drop that frame without
|
|
+ * telling the EC?
|
|
+ */
|
|
+ ssh_ptl_send_nak(ptl);
|
|
+ }
|
|
+
|
|
+ if (unlikely(!syn_found))
|
|
+ return aligned.ptr - source->ptr;
|
|
+
|
|
+ /* Error injection: Modify data to simulate corruption. */
|
|
+ ssh_ptl_rx_inject_invalid_data(ptl, &aligned);
|
|
+
|
|
+ /* Parse and validate frame. */
|
|
+ status = sshp_parse_frame(&ptl->serdev->dev, &aligned, &frame, &payload,
|
|
+ SSH_PTL_RX_BUF_LEN);
|
|
+ if (status) /* Invalid frame: skip to next SYN. */
|
|
+ return aligned.ptr - source->ptr + sizeof(u16);
|
|
+ if (!frame) /* Not enough data. */
|
|
+ return aligned.ptr - source->ptr;
|
|
+
|
|
+ trace_ssam_rx_frame_received(frame);
|
|
+
|
|
+ switch (frame->type) {
|
|
+ case SSH_FRAME_TYPE_ACK:
|
|
+ ssh_ptl_acknowledge(ptl, frame->seq);
|
|
+ break;
|
|
+
|
|
+ case SSH_FRAME_TYPE_NAK:
|
|
+ ssh_ptl_resubmit_pending(ptl);
|
|
+ break;
|
|
+
|
|
+ case SSH_FRAME_TYPE_DATA_SEQ:
|
|
+ ssh_ptl_send_ack(ptl, frame->seq);
|
|
+ /* fallthrough */
|
|
+
|
|
+ case SSH_FRAME_TYPE_DATA_NSQ:
|
|
+ ssh_ptl_rx_dataframe(ptl, frame, &payload);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ ptl_warn(ptl, "ptl: received frame with unknown type %#04x\n",
|
|
+ frame->type);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return aligned.ptr - source->ptr + SSH_MESSAGE_LENGTH(payload.len);
|
|
+}
|
|
+
|
|
+static int ssh_ptl_rx_threadfn(void *data)
|
|
+{
|
|
+ struct ssh_ptl *ptl = data;
|
|
+
|
|
+ while (true) {
|
|
+ struct ssam_span span;
|
|
+ size_t offs = 0;
|
|
+ size_t n;
|
|
+
|
|
+ wait_event_interruptible(ptl->rx.wq,
|
|
+ !kfifo_is_empty(&ptl->rx.fifo) ||
|
|
+ kthread_should_stop());
|
|
+ if (kthread_should_stop())
|
|
+ break;
|
|
+
|
|
+ /* Copy from fifo to evaluation buffer. */
|
|
+ n = sshp_buf_read_from_fifo(&ptl->rx.buf, &ptl->rx.fifo);
|
|
+
|
|
+ ptl_dbg(ptl, "rx: received data (size: %zu)\n", n);
|
|
+ print_hex_dump_debug("rx: ", DUMP_PREFIX_OFFSET, 16, 1,
|
|
+ ptl->rx.buf.ptr + ptl->rx.buf.len - n,
|
|
+ n, false);
|
|
+
|
|
+ /* Parse until we need more bytes or buffer is empty. */
|
|
+ while (offs < ptl->rx.buf.len) {
|
|
+ sshp_buf_span_from(&ptl->rx.buf, offs, &span);
|
|
+ n = ssh_ptl_rx_eval(ptl, &span);
|
|
+ if (n == 0)
|
|
+ break; /* Need more bytes. */
|
|
+
|
|
+ offs += n;
|
|
+ }
|
|
+
|
|
+ /* Throw away the evaluated parts. */
|
|
+ sshp_buf_drop(&ptl->rx.buf, offs);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void ssh_ptl_rx_wakeup(struct ssh_ptl *ptl)
|
|
+{
|
|
+ wake_up(&ptl->rx.wq);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_rx_start() - Start packet transport layer receiver thread.
|
|
+ * @ptl: The packet transport layer.
|
|
+ *
|
|
+ * Return: Returns zero on success, a negative error code on failure.
|
|
+ */
|
|
+int ssh_ptl_rx_start(struct ssh_ptl *ptl)
|
|
+{
|
|
+ if (ptl->rx.thread)
|
|
+ return 0;
|
|
+
|
|
+ ptl->rx.thread = kthread_run(ssh_ptl_rx_threadfn, ptl,
|
|
+ "ssam_serial_hub-rx");
|
|
+ if (IS_ERR(ptl->rx.thread))
|
|
+ return PTR_ERR(ptl->rx.thread);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_rx_stop() - Stop packet transport layer receiver thread.
|
|
+ * @ptl: The packet transport layer.
|
|
+ *
|
|
+ * Return: Returns zero on success, a negative error code on failure.
|
|
+ */
|
|
+int ssh_ptl_rx_stop(struct ssh_ptl *ptl)
|
|
+{
|
|
+ int status = 0;
|
|
+
|
|
+ if (ptl->rx.thread) {
|
|
+ status = kthread_stop(ptl->rx.thread);
|
|
+ ptl->rx.thread = NULL;
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_rx_rcvbuf() - Push data from lower-layer transport to the packet
|
|
+ * layer.
|
|
+ * @ptl: The packet transport layer.
|
|
+ * @buf: Pointer to the data to push to the layer.
|
|
+ * @n: Size of the data to push to the layer, in bytes.
|
|
+ *
|
|
+ * Pushes data from a lower-layer transport to the receiver fifo buffer of the
|
|
+ * packet layer and notifies the receiver thread. Calls to this function are
|
|
+ * ignored once the packet layer has been shut down.
|
|
+ *
|
|
+ * Return: Returns the number of bytes transferred (positive or zero) on
|
|
+ * success. Returns %-ESHUTDOWN if the packet layer has been shut down.
|
|
+ */
|
|
+int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n)
|
|
+{
|
|
+ int used;
|
|
+
|
|
+ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
|
|
+ return -ESHUTDOWN;
|
|
+
|
|
+ used = kfifo_in(&ptl->rx.fifo, buf, n);
|
|
+ if (used)
|
|
+ ssh_ptl_rx_wakeup(ptl);
|
|
+
|
|
+ return used;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_shutdown() - Shut down the packet transport layer.
|
|
+ * @ptl: The packet transport layer.
|
|
+ *
|
|
+ * Shuts down the packet transport layer, removing and canceling all queued
|
|
+ * and pending packets. Packets canceled by this operation will be completed
|
|
+ * with %-ESHUTDOWN as status. Receiver and transmitter threads will be
|
|
+ * stopped.
|
|
+ *
|
|
+ * As a result of this function, the transport layer will be marked as shut
|
|
+ * down. Submission of packets after the transport layer has been shut down
|
|
+ * will fail with %-ESHUTDOWN.
|
|
+ */
|
|
+void ssh_ptl_shutdown(struct ssh_ptl *ptl)
|
|
+{
|
|
+ LIST_HEAD(complete_q);
|
|
+ LIST_HEAD(complete_p);
|
|
+ struct ssh_packet *p, *n;
|
|
+ int status;
|
|
+
|
|
+ /* Ensure that no new packets (including ACK/NAK) can be submitted. */
|
|
+ set_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state);
|
|
+ /*
|
|
+ * Ensure that the layer gets marked as shut-down before actually
|
|
+ * stopping it. In combination with the check in ssh_ptl_queue_push(),
|
|
+ * this guarantees that no new packets can be added and all already
|
|
+ * queued packets are properly canceled. In combination with the check
|
|
+ * in ssh_ptl_rx_rcvbuf(), this guarantees that received data is
|
|
+ * properly cut off.
|
|
+ */
|
|
+ smp_mb__after_atomic();
|
|
+
|
|
+ status = ssh_ptl_rx_stop(ptl);
|
|
+ if (status)
|
|
+ ptl_err(ptl, "ptl: failed to stop receiver thread\n");
|
|
+
|
|
+ status = ssh_ptl_tx_stop(ptl);
|
|
+ if (status)
|
|
+ ptl_err(ptl, "ptl: failed to stop transmitter thread\n");
|
|
+
|
|
+ cancel_delayed_work_sync(&ptl->rtx_timeout.reaper);
|
|
+
|
|
+ /*
|
|
+ * At this point, all threads have been stopped. This means that the
|
|
+ * only references to packets from inside the system are in the queue
|
|
+ * and pending set.
|
|
+ *
|
|
+ * Note: We still need locks here because someone could still be
|
|
+ * canceling packets.
|
|
+ *
|
|
+ * Note 2: We can re-use queue_node (or pending_node) if we mark the
|
|
+ * packet as locked an then remove it from the queue (or pending set
|
|
+ * respectively). Marking the packet as locked avoids re-queuing
|
|
+ * (which should already be prevented by having stopped the treads...)
|
|
+ * and not setting QUEUED_BIT (or PENDING_BIT) prevents removal from a
|
|
+ * new list via other threads (e.g. cancellation).
|
|
+ *
|
|
+ * Note 3: There may be overlap between complete_p and complete_q.
|
|
+ * This is handled via test_and_set_bit() on the "completed" flag
|
|
+ * (also handles cancellation).
|
|
+ */
|
|
+
|
|
+ /* Mark queued packets as locked and move them to complete_q. */
|
|
+ spin_lock(&ptl->queue.lock);
|
|
+ list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
|
|
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
|
|
+ /* Ensure that state does not get zero. */
|
|
+ smp_mb__before_atomic();
|
|
+ clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
|
|
+
|
|
+ list_del(&p->queue_node);
|
|
+ list_add_tail(&p->queue_node, &complete_q);
|
|
+ }
|
|
+ spin_unlock(&ptl->queue.lock);
|
|
+
|
|
+ /* Mark pending packets as locked and move them to complete_p. */
|
|
+ spin_lock(&ptl->pending.lock);
|
|
+ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
|
|
+ set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
|
|
+ /* Ensure that state does not get zero. */
|
|
+ smp_mb__before_atomic();
|
|
+ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
|
|
+
|
|
+ list_del(&p->pending_node);
|
|
+ list_add_tail(&p->pending_node, &complete_q);
|
|
+ }
|
|
+ atomic_set(&ptl->pending.count, 0);
|
|
+ spin_unlock(&ptl->pending.lock);
|
|
+
|
|
+ /* Complete and drop packets on complete_q. */
|
|
+ list_for_each_entry(p, &complete_q, queue_node) {
|
|
+ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
|
|
+ __ssh_ptl_complete(p, -ESHUTDOWN);
|
|
+
|
|
+ ssh_packet_put(p);
|
|
+ }
|
|
+
|
|
+ /* Complete and drop packets on complete_p. */
|
|
+ list_for_each_entry(p, &complete_p, pending_node) {
|
|
+ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
|
|
+ __ssh_ptl_complete(p, -ESHUTDOWN);
|
|
+
|
|
+ ssh_packet_put(p);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * At this point we have guaranteed that the system doesn't reference
|
|
+ * any packets any more.
|
|
+ */
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_init() - Initialize packet transport layer.
|
|
+ * @ptl: The packet transport layer to initialize.
|
|
+ * @serdev: The underlying serial device, i.e. the lower-level transport.
|
|
+ * @ops: Packet layer operations.
|
|
+ *
|
|
+ * Initializes the given packet transport layer. Transmitter and receiver
|
|
+ * threads must be started separately via ssh_ptl_tx_start() and
|
|
+ * ssh_ptl_rx_start(), after the packet-layer has been initialized and the
|
|
+ * lower-level transport layer has been set up.
|
|
+ *
|
|
+ * Return: Returns zero on success and a nonzero error code on failure.
|
|
+ */
|
|
+int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev,
|
|
+ struct ssh_ptl_ops *ops)
|
|
+{
|
|
+ int i, status;
|
|
+
|
|
+ ptl->serdev = serdev;
|
|
+ ptl->state = 0;
|
|
+
|
|
+ spin_lock_init(&ptl->queue.lock);
|
|
+ INIT_LIST_HEAD(&ptl->queue.head);
|
|
+
|
|
+ spin_lock_init(&ptl->pending.lock);
|
|
+ INIT_LIST_HEAD(&ptl->pending.head);
|
|
+ atomic_set_release(&ptl->pending.count, 0);
|
|
+
|
|
+ ptl->tx.thread = NULL;
|
|
+ atomic_set(&ptl->tx.running, 0);
|
|
+ init_completion(&ptl->tx.thread_cplt_pkt);
|
|
+ init_completion(&ptl->tx.thread_cplt_tx);
|
|
+ init_waitqueue_head(&ptl->tx.packet_wq);
|
|
+
|
|
+ ptl->rx.thread = NULL;
|
|
+ init_waitqueue_head(&ptl->rx.wq);
|
|
+
|
|
+ spin_lock_init(&ptl->rtx_timeout.lock);
|
|
+ ptl->rtx_timeout.timeout = SSH_PTL_PACKET_TIMEOUT;
|
|
+ ptl->rtx_timeout.expires = KTIME_MAX;
|
|
+ INIT_DELAYED_WORK(&ptl->rtx_timeout.reaper, ssh_ptl_timeout_reap);
|
|
+
|
|
+ ptl->ops = *ops;
|
|
+
|
|
+ /* Initialize list of recent/blocked SEQs with invalid sequence IDs. */
|
|
+ for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++)
|
|
+ ptl->rx.blocked.seqs[i] = U16_MAX;
|
|
+ ptl->rx.blocked.offset = 0;
|
|
+
|
|
+ status = kfifo_alloc(&ptl->rx.fifo, SSH_PTL_RX_FIFO_LEN, GFP_KERNEL);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ status = sshp_buf_alloc(&ptl->rx.buf, SSH_PTL_RX_BUF_LEN, GFP_KERNEL);
|
|
+ if (status)
|
|
+ kfifo_free(&ptl->rx.fifo);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_destroy() - Deinitialize packet transport layer.
|
|
+ * @ptl: The packet transport layer to deinitialize.
|
|
+ *
|
|
+ * Deinitializes the given packet transport layer and frees resources
|
|
+ * associated with it. If receiver and/or transmitter threads have been
|
|
+ * started, the layer must first be shut down via ssh_ptl_shutdown() before
|
|
+ * this function can be called.
|
|
+ */
|
|
+void ssh_ptl_destroy(struct ssh_ptl *ptl)
|
|
+{
|
|
+ kfifo_free(&ptl->rx.fifo);
|
|
+ sshp_buf_free(&ptl->rx.buf);
|
|
+}
|
|
diff --git a/drivers/platform/x86/surface_aggregator/ssh_packet_layer.h b/drivers/platform/x86/surface_aggregator/ssh_packet_layer.h
|
|
new file mode 100644
|
|
index 000000000000..2eb329f0b91a
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator/ssh_packet_layer.h
|
|
@@ -0,0 +1,190 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * SSH packet transport layer.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#ifndef _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H
|
|
+#define _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H
|
|
+
|
|
+#include <linux/atomic.h>
|
|
+#include <linux/kfifo.h>
|
|
+#include <linux/ktime.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/serdev.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/wait.h>
|
|
+#include <linux/workqueue.h>
|
|
+
|
|
+#include <linux/surface_aggregator/serial_hub.h>
|
|
+#include "ssh_parser.h"
|
|
+
|
|
+/**
|
|
+ * enum ssh_ptl_state_flags - State-flags for &struct ssh_ptl.
|
|
+ *
|
|
+ * @SSH_PTL_SF_SHUTDOWN_BIT:
|
|
+ * Indicates that the packet transport layer has been shut down or is
|
|
+ * being shut down and should not accept any new packets/data.
|
|
+ */
|
|
+enum ssh_ptl_state_flags {
|
|
+ SSH_PTL_SF_SHUTDOWN_BIT,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssh_ptl_ops - Callback operations for packet transport layer.
|
|
+ * @data_received: Function called when a data-packet has been received. Both,
|
|
+ * the packet layer on which the packet has been received and
|
|
+ * the packet's payload data are provided to this function.
|
|
+ */
|
|
+struct ssh_ptl_ops {
|
|
+ void (*data_received)(struct ssh_ptl *p, const struct ssam_span *data);
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssh_ptl - SSH packet transport layer.
|
|
+ * @serdev: Serial device providing the underlying data transport.
|
|
+ * @state: State(-flags) of the transport layer.
|
|
+ * @queue: Packet submission queue.
|
|
+ * @queue.lock: Lock for modifying the packet submission queue.
|
|
+ * @queue.head: List-head of the packet submission queue.
|
|
+ * @pending: Set/list of pending packets.
|
|
+ * @pending.lock: Lock for modifying the pending set.
|
|
+ * @pending.head: List-head of the pending set/list.
|
|
+ * @pending.count: Number of currently pending packets.
|
|
+ * @tx: Transmitter subsystem.
|
|
+ * @tx.running: Flag indicating (desired) transmitter thread state.
|
|
+ * @tx.thread: Transmitter thread.
|
|
+ * @tx.thread_cplt_tx: Completion for transmitter thread waiting on transfer.
|
|
+ * @tx.thread_cplt_pkt: Completion for transmitter thread waiting on packets.
|
|
+ * @tx.packet_wq: Waitqueue-head for packet transmit completion.
|
|
+ * @rx: Receiver subsystem.
|
|
+ * @rx.thread: Receiver thread.
|
|
+ * @rx.wq: Waitqueue-head for receiver thread.
|
|
+ * @rx.fifo: Buffer for receiving data/pushing data to receiver thread.
|
|
+ * @rx.buf: Buffer for evaluating data on receiver thread.
|
|
+ * @rx.blocked: List of recent/blocked sequence IDs to detect retransmission.
|
|
+ * @rx.blocked.seqs: Array of blocked sequence IDs.
|
|
+ * @rx.blocked.offset: Offset indicating where a new ID should be inserted.
|
|
+ * @rtx_timeout: Retransmission timeout subsystem.
|
|
+ * @rtx_timeout.lock: Lock for modifying the retransmission timeout reaper.
|
|
+ * @rtx_timeout.timeout: Timeout interval for retransmission.
|
|
+ * @rtx_timeout.expires: Time specifying when the reaper work is next scheduled.
|
|
+ * @rtx_timeout.reaper: Work performing timeout checks and subsequent actions.
|
|
+ * @ops: Packet layer operations.
|
|
+ */
|
|
+struct ssh_ptl {
|
|
+ struct serdev_device *serdev;
|
|
+ unsigned long state;
|
|
+
|
|
+ struct {
|
|
+ spinlock_t lock;
|
|
+ struct list_head head;
|
|
+ } queue;
|
|
+
|
|
+ struct {
|
|
+ spinlock_t lock;
|
|
+ struct list_head head;
|
|
+ atomic_t count;
|
|
+ } pending;
|
|
+
|
|
+ struct {
|
|
+ atomic_t running;
|
|
+ struct task_struct *thread;
|
|
+ struct completion thread_cplt_tx;
|
|
+ struct completion thread_cplt_pkt;
|
|
+ struct wait_queue_head packet_wq;
|
|
+ } tx;
|
|
+
|
|
+ struct {
|
|
+ struct task_struct *thread;
|
|
+ struct wait_queue_head wq;
|
|
+ struct kfifo fifo;
|
|
+ struct sshp_buf buf;
|
|
+
|
|
+ struct {
|
|
+ u16 seqs[8];
|
|
+ u16 offset;
|
|
+ } blocked;
|
|
+ } rx;
|
|
+
|
|
+ struct {
|
|
+ spinlock_t lock;
|
|
+ ktime_t timeout;
|
|
+ ktime_t expires;
|
|
+ struct delayed_work reaper;
|
|
+ } rtx_timeout;
|
|
+
|
|
+ struct ssh_ptl_ops ops;
|
|
+};
|
|
+
|
|
+#define __ssam_prcond(func, p, fmt, ...) \
|
|
+ do { \
|
|
+ typeof(p) __p = (p); \
|
|
+ \
|
|
+ if (__p) \
|
|
+ func(__p, fmt, ##__VA_ARGS__); \
|
|
+ } while (0)
|
|
+
|
|
+#define ptl_dbg(p, fmt, ...) dev_dbg(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
|
|
+#define ptl_info(p, fmt, ...) dev_info(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
|
|
+#define ptl_warn(p, fmt, ...) dev_warn(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
|
|
+#define ptl_err(p, fmt, ...) dev_err(&(p)->serdev->dev, fmt, ##__VA_ARGS__)
|
|
+#define ptl_dbg_cond(p, fmt, ...) __ssam_prcond(ptl_dbg, p, fmt, ##__VA_ARGS__)
|
|
+
|
|
+#define to_ssh_ptl(ptr, member) \
|
|
+ container_of(ptr, struct ssh_ptl, member)
|
|
+
|
|
+int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev,
|
|
+ struct ssh_ptl_ops *ops);
|
|
+
|
|
+void ssh_ptl_destroy(struct ssh_ptl *ptl);
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_get_device() - Get device associated with packet transport layer.
|
|
+ * @ptl: The packet transport layer.
|
|
+ *
|
|
+ * Return: Returns the device on which the given packet transport layer builds
|
|
+ * upon.
|
|
+ */
|
|
+static inline struct device *ssh_ptl_get_device(struct ssh_ptl *ptl)
|
|
+{
|
|
+ return ptl->serdev ? &ptl->serdev->dev : NULL;
|
|
+}
|
|
+
|
|
+int ssh_ptl_tx_start(struct ssh_ptl *ptl);
|
|
+int ssh_ptl_tx_stop(struct ssh_ptl *ptl);
|
|
+int ssh_ptl_rx_start(struct ssh_ptl *ptl);
|
|
+int ssh_ptl_rx_stop(struct ssh_ptl *ptl);
|
|
+void ssh_ptl_shutdown(struct ssh_ptl *ptl);
|
|
+
|
|
+int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p);
|
|
+void ssh_ptl_cancel(struct ssh_packet *p);
|
|
+
|
|
+int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n);
|
|
+
|
|
+/**
|
|
+ * ssh_ptl_tx_wakeup_transfer() - Wake up packet transmitter thread for
|
|
+ * transfer.
|
|
+ * @ptl: The packet transport layer.
|
|
+ *
|
|
+ * Wakes up the packet transmitter thread, notifying it that the underlying
|
|
+ * transport has more space for data to be transmitted. If the packet
|
|
+ * transport layer has been shut down, calls to this function will be ignored.
|
|
+ */
|
|
+static inline void ssh_ptl_tx_wakeup_transfer(struct ssh_ptl *ptl)
|
|
+{
|
|
+ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
|
|
+ return;
|
|
+
|
|
+ complete(&ptl->tx.thread_cplt_tx);
|
|
+}
|
|
+
|
|
+void ssh_packet_init(struct ssh_packet *packet, unsigned long type,
|
|
+ u8 priority, const struct ssh_packet_ops *ops);
|
|
+
|
|
+int ssh_ctrl_packet_cache_init(void);
|
|
+void ssh_ctrl_packet_cache_destroy(void);
|
|
+
|
|
+#endif /* _SURFACE_AGGREGATOR_SSH_PACKET_LAYER_H */
|
|
diff --git a/drivers/platform/x86/surface_aggregator/ssh_parser.c b/drivers/platform/x86/surface_aggregator/ssh_parser.c
|
|
new file mode 100644
|
|
index 000000000000..b77912f8f13b
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator/ssh_parser.c
|
|
@@ -0,0 +1,228 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * SSH message parser.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <asm/unaligned.h>
|
|
+#include <linux/compiler.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+#include <linux/surface_aggregator/serial_hub.h>
|
|
+#include "ssh_parser.h"
|
|
+
|
|
+/**
|
|
+ * sshp_validate_crc() - Validate a CRC in raw message data.
|
|
+ * @src: The span of data over which the CRC should be computed.
|
|
+ * @crc: The pointer to the expected u16 CRC value.
|
|
+ *
|
|
+ * Computes the CRC of the provided data span (@src), compares it to the CRC
|
|
+ * stored at the given address (@crc), and returns the result of this
|
|
+ * comparison, i.e. %true if equal. This function is intended to run on raw
|
|
+ * input/message data.
|
|
+ *
|
|
+ * Return: Returns %true if the computed CRC matches the stored CRC, %false
|
|
+ * otherwise.
|
|
+ */
|
|
+static bool sshp_validate_crc(const struct ssam_span *src, const u8 *crc)
|
|
+{
|
|
+ u16 actual = ssh_crc(src->ptr, src->len);
|
|
+ u16 expected = get_unaligned_le16(crc);
|
|
+
|
|
+ return actual == expected;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * sshp_starts_with_syn() - Check if the given data starts with SSH SYN bytes.
|
|
+ * @src: The data span to check the start of.
|
|
+ */
|
|
+static bool sshp_starts_with_syn(const struct ssam_span *src)
|
|
+{
|
|
+ return src->len >= 2 && get_unaligned_le16(src->ptr) == SSH_MSG_SYN;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * sshp_find_syn() - Find SSH SYN bytes in the given data span.
|
|
+ * @src: The data span to search in.
|
|
+ * @rem: The span (output) indicating the remaining data, starting with SSH
|
|
+ * SYN bytes, if found.
|
|
+ *
|
|
+ * Search for SSH SYN bytes in the given source span. If found, set the @rem
|
|
+ * span to the remaining data, starting with the first SYN bytes and capped by
|
|
+ * the source span length, and return %true. This function does not copy any
|
|
+ * data, but rather only sets pointers to the respective start addresses and
|
|
+ * length values.
|
|
+ *
|
|
+ * If no SSH SYN bytes could be found, set the @rem span to the zero-length
|
|
+ * span at the end of the source span and return %false.
|
|
+ *
|
|
+ * If partial SSH SYN bytes could be found at the end of the source span, set
|
|
+ * the @rem span to cover these partial SYN bytes, capped by the end of the
|
|
+ * source span, and return %false. This function should then be re-run once
|
|
+ * more data is available.
|
|
+ *
|
|
+ * Return: Returns %true if a complete SSH SYN sequence could be found,
|
|
+ * %false otherwise.
|
|
+ */
|
|
+bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem)
|
|
+{
|
|
+ size_t i;
|
|
+
|
|
+ for (i = 0; i < src->len - 1; i++) {
|
|
+ if (likely(get_unaligned_le16(src->ptr + i) == SSH_MSG_SYN)) {
|
|
+ rem->ptr = src->ptr + i;
|
|
+ rem->len = src->len - i;
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (unlikely(src->ptr[src->len - 1] == (SSH_MSG_SYN & 0xff))) {
|
|
+ rem->ptr = src->ptr + src->len - 1;
|
|
+ rem->len = 1;
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ rem->ptr = src->ptr + src->len;
|
|
+ rem->len = 0;
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * sshp_parse_frame() - Parse SSH frame.
|
|
+ * @dev: The device used for logging.
|
|
+ * @source: The source to parse from.
|
|
+ * @frame: The parsed frame (output).
|
|
+ * @payload: The parsed payload (output).
|
|
+ * @maxlen: The maximum supported message length.
|
|
+ *
|
|
+ * Parses and validates a SSH frame, including its payload, from the given
|
|
+ * source. Sets the provided @frame pointer to the start of the frame and
|
|
+ * writes the limits of the frame payload to the provided @payload span
|
|
+ * pointer.
|
|
+ *
|
|
+ * This function does not copy any data, but rather only validates the message
|
|
+ * data and sets pointers (and length values) to indicate the respective parts.
|
|
+ *
|
|
+ * If no complete SSH frame could be found, the frame pointer will be set to
|
|
+ * the %NULL pointer and the payload span will be set to the null span (start
|
|
+ * pointer %NULL, size zero).
|
|
+ *
|
|
+ * Return: Returns zero on success or if the frame is incomplete, %-ENOMSG if
|
|
+ * the start of the message is invalid, %-EBADMSG if any (frame-header or
|
|
+ * payload) CRC is invalid, or %-EMSGSIZE if the SSH message is bigger than
|
|
+ * the maximum message length specified in the @maxlen parameter.
|
|
+ */
|
|
+int sshp_parse_frame(const struct device *dev, const struct ssam_span *source,
|
|
+ struct ssh_frame **frame, struct ssam_span *payload,
|
|
+ size_t maxlen)
|
|
+{
|
|
+ struct ssam_span sf;
|
|
+ struct ssam_span sp;
|
|
+
|
|
+ /* Initialize output. */
|
|
+ *frame = NULL;
|
|
+ payload->ptr = NULL;
|
|
+ payload->len = 0;
|
|
+
|
|
+ if (!sshp_starts_with_syn(source)) {
|
|
+ dev_warn(dev, "rx: parser: invalid start of frame\n");
|
|
+ return -ENOMSG;
|
|
+ }
|
|
+
|
|
+ /* Check for minimum packet length. */
|
|
+ if (unlikely(source->len < SSH_MESSAGE_LENGTH(0))) {
|
|
+ dev_dbg(dev, "rx: parser: not enough data for frame\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* Pin down frame. */
|
|
+ sf.ptr = source->ptr + sizeof(u16);
|
|
+ sf.len = sizeof(struct ssh_frame);
|
|
+
|
|
+ /* Validate frame CRC. */
|
|
+ if (unlikely(!sshp_validate_crc(&sf, sf.ptr + sf.len))) {
|
|
+ dev_warn(dev, "rx: parser: invalid frame CRC\n");
|
|
+ return -EBADMSG;
|
|
+ }
|
|
+
|
|
+ /* Ensure packet does not exceed maximum length. */
|
|
+ sp.len = get_unaligned_le16(&((struct ssh_frame *)sf.ptr)->len);
|
|
+ if (unlikely(SSH_MESSAGE_LENGTH(sp.len) > maxlen)) {
|
|
+ dev_warn(dev, "rx: parser: frame too large: %llu bytes\n",
|
|
+ SSH_MESSAGE_LENGTH(sp.len));
|
|
+ return -EMSGSIZE;
|
|
+ }
|
|
+
|
|
+ /* Pin down payload. */
|
|
+ sp.ptr = sf.ptr + sf.len + sizeof(u16);
|
|
+
|
|
+ /* Check for frame + payload length. */
|
|
+ if (source->len < SSH_MESSAGE_LENGTH(sp.len)) {
|
|
+ dev_dbg(dev, "rx: parser: not enough data for payload\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* Validate payload CRC. */
|
|
+ if (unlikely(!sshp_validate_crc(&sp, sp.ptr + sp.len))) {
|
|
+ dev_warn(dev, "rx: parser: invalid payload CRC\n");
|
|
+ return -EBADMSG;
|
|
+ }
|
|
+
|
|
+ *frame = (struct ssh_frame *)sf.ptr;
|
|
+ *payload = sp;
|
|
+
|
|
+ dev_dbg(dev, "rx: parser: valid frame found (type: %#04x, len: %u)\n",
|
|
+ (*frame)->type, (*frame)->len);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * sshp_parse_command() - Parse SSH command frame payload.
|
|
+ * @dev: The device used for logging.
|
|
+ * @source: The source to parse from.
|
|
+ * @command: The parsed command (output).
|
|
+ * @command_data: The parsed command data/payload (output).
|
|
+ *
|
|
+ * Parses and validates a SSH command frame payload. Sets the @command pointer
|
|
+ * to the command header and the @command_data span to the command data (i.e.
|
|
+ * payload of the command). This will result in a zero-length span if the
|
|
+ * command does not have any associated data/payload. This function does not
|
|
+ * check the frame-payload-type field, which should be checked by the caller
|
|
+ * before calling this function.
|
|
+ *
|
|
+ * The @source parameter should be the complete frame payload, e.g. returned
|
|
+ * by the sshp_parse_frame() command.
|
|
+ *
|
|
+ * This function does not copy any data, but rather only validates the frame
|
|
+ * payload data and sets pointers (and length values) to indicate the
|
|
+ * respective parts.
|
|
+ *
|
|
+ * Return: Returns zero on success or %-ENOMSG if @source does not represent a
|
|
+ * valid command-type frame payload, i.e. is too short.
|
|
+ */
|
|
+int sshp_parse_command(const struct device *dev, const struct ssam_span *source,
|
|
+ struct ssh_command **command,
|
|
+ struct ssam_span *command_data)
|
|
+{
|
|
+ /* Check for minimum length. */
|
|
+ if (unlikely(source->len < sizeof(struct ssh_command))) {
|
|
+ *command = NULL;
|
|
+ command_data->ptr = NULL;
|
|
+ command_data->len = 0;
|
|
+
|
|
+ dev_err(dev, "rx: parser: command payload is too short\n");
|
|
+ return -ENOMSG;
|
|
+ }
|
|
+
|
|
+ *command = (struct ssh_command *)source->ptr;
|
|
+ command_data->ptr = source->ptr + sizeof(struct ssh_command);
|
|
+ command_data->len = source->len - sizeof(struct ssh_command);
|
|
+
|
|
+ dev_dbg(dev, "rx: parser: valid command found (tc: %#04x, cid: %#04x)\n",
|
|
+ (*command)->tc, (*command)->cid);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/drivers/platform/x86/surface_aggregator/ssh_parser.h b/drivers/platform/x86/surface_aggregator/ssh_parser.h
|
|
new file mode 100644
|
|
index 000000000000..3bd6e180fd16
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator/ssh_parser.h
|
|
@@ -0,0 +1,154 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * SSH message parser.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#ifndef _SURFACE_AGGREGATOR_SSH_PARSER_H
|
|
+#define _SURFACE_AGGREGATOR_SSH_PARSER_H
|
|
+
|
|
+#include <linux/device.h>
|
|
+#include <linux/kfifo.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+#include <linux/surface_aggregator/serial_hub.h>
|
|
+
|
|
+/**
|
|
+ * struct sshp_buf - Parser buffer for SSH messages.
|
|
+ * @ptr: Pointer to the beginning of the buffer.
|
|
+ * @len: Number of bytes used in the buffer.
|
|
+ * @cap: Maximum capacity of the buffer.
|
|
+ */
|
|
+struct sshp_buf {
|
|
+ u8 *ptr;
|
|
+ size_t len;
|
|
+ size_t cap;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * sshp_buf_init() - Initialize a SSH parser buffer.
|
|
+ * @buf: The buffer to initialize.
|
|
+ * @ptr: The memory backing the buffer.
|
|
+ * @cap: The length of the memory backing the buffer, i.e. its capacity.
|
|
+ *
|
|
+ * Initializes the buffer with the given memory as backing and set its used
|
|
+ * length to zero.
|
|
+ */
|
|
+static inline void sshp_buf_init(struct sshp_buf *buf, u8 *ptr, size_t cap)
|
|
+{
|
|
+ buf->ptr = ptr;
|
|
+ buf->len = 0;
|
|
+ buf->cap = cap;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * sshp_buf_alloc() - Allocate and initialize a SSH parser buffer.
|
|
+ * @buf: The buffer to initialize/allocate to.
|
|
+ * @cap: The desired capacity of the buffer.
|
|
+ * @flags: The flags used for allocating the memory.
|
|
+ *
|
|
+ * Allocates @cap bytes and initializes the provided buffer struct with the
|
|
+ * allocated memory.
|
|
+ *
|
|
+ * Return: Returns zero on success and %-ENOMEM if allocation failed.
|
|
+ */
|
|
+static inline int sshp_buf_alloc(struct sshp_buf *buf, size_t cap, gfp_t flags)
|
|
+{
|
|
+ u8 *ptr;
|
|
+
|
|
+ ptr = kzalloc(cap, flags);
|
|
+ if (!ptr)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ sshp_buf_init(buf, ptr, cap);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * sshp_buf_free() - Free a SSH parser buffer.
|
|
+ * @buf: The buffer to free.
|
|
+ *
|
|
+ * Frees a SSH parser buffer by freeing the memory backing it and then
|
|
+ * resetting its pointer to %NULL and length and capacity to zero. Intended to
|
|
+ * free a buffer previously allocated with sshp_buf_alloc().
|
|
+ */
|
|
+static inline void sshp_buf_free(struct sshp_buf *buf)
|
|
+{
|
|
+ kfree(buf->ptr);
|
|
+ buf->ptr = NULL;
|
|
+ buf->len = 0;
|
|
+ buf->cap = 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * sshp_buf_drop() - Drop data from the beginning of the buffer.
|
|
+ * @buf: The buffer to drop data from.
|
|
+ * @n: The number of bytes to drop.
|
|
+ *
|
|
+ * Drops the first @n bytes from the buffer. Re-aligns any remaining data to
|
|
+ * the beginning of the buffer.
|
|
+ */
|
|
+static inline void sshp_buf_drop(struct sshp_buf *buf, size_t n)
|
|
+{
|
|
+ memmove(buf->ptr, buf->ptr + n, buf->len - n);
|
|
+ buf->len -= n;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * sshp_buf_read_from_fifo() - Transfer data from a fifo to the buffer.
|
|
+ * @buf: The buffer to write the data into.
|
|
+ * @fifo: The fifo to read the data from.
|
|
+ *
|
|
+ * Transfers the data contained in the fifo to the buffer, removing it from
|
|
+ * the fifo. This function will try to transfer as much data as possible,
|
|
+ * limited either by the remaining space in the buffer or by the number of
|
|
+ * bytes available in the fifo.
|
|
+ *
|
|
+ * Return: Returns the number of bytes transferred.
|
|
+ */
|
|
+static inline size_t sshp_buf_read_from_fifo(struct sshp_buf *buf,
|
|
+ struct kfifo *fifo)
|
|
+{
|
|
+ size_t n;
|
|
+
|
|
+ n = kfifo_out(fifo, buf->ptr + buf->len, buf->cap - buf->len);
|
|
+ buf->len += n;
|
|
+
|
|
+ return n;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * sshp_buf_span_from() - Initialize a span from the given buffer and offset.
|
|
+ * @buf: The buffer to create the span from.
|
|
+ * @offset: The offset in the buffer at which the span should start.
|
|
+ * @span: The span to initialize (output).
|
|
+ *
|
|
+ * Initializes the provided span to point to the memory at the given offset in
|
|
+ * the buffer, with the length of the span being capped by the number of bytes
|
|
+ * used in the buffer after the offset (i.e. bytes remaining after the
|
|
+ * offset).
|
|
+ *
|
|
+ * Warning: This function does not validate that @offset is less than or equal
|
|
+ * to the number of bytes used in the buffer or the buffer capacity. This must
|
|
+ * be guaranteed by the caller.
|
|
+ */
|
|
+static inline void sshp_buf_span_from(struct sshp_buf *buf, size_t offset,
|
|
+ struct ssam_span *span)
|
|
+{
|
|
+ span->ptr = buf->ptr + offset;
|
|
+ span->len = buf->len - offset;
|
|
+}
|
|
+
|
|
+bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem);
|
|
+
|
|
+int sshp_parse_frame(const struct device *dev, const struct ssam_span *source,
|
|
+ struct ssh_frame **frame, struct ssam_span *payload,
|
|
+ size_t maxlen);
|
|
+
|
|
+int sshp_parse_command(const struct device *dev, const struct ssam_span *source,
|
|
+ struct ssh_command **command,
|
|
+ struct ssam_span *command_data);
|
|
+
|
|
+#endif /* _SURFACE_AGGREGATOR_SSH_PARSER_h */
|
|
diff --git a/drivers/platform/x86/surface_aggregator/ssh_request_layer.c b/drivers/platform/x86/surface_aggregator/ssh_request_layer.c
|
|
new file mode 100644
|
|
index 000000000000..bfe1aaf38065
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator/ssh_request_layer.c
|
|
@@ -0,0 +1,1263 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * SSH request transport layer.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <asm/unaligned.h>
|
|
+#include <linux/atomic.h>
|
|
+#include <linux/completion.h>
|
|
+#include <linux/error-injection.h>
|
|
+#include <linux/ktime.h>
|
|
+#include <linux/limits.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/workqueue.h>
|
|
+
|
|
+#include <linux/surface_aggregator/serial_hub.h>
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+
|
|
+#include "ssh_packet_layer.h"
|
|
+#include "ssh_request_layer.h"
|
|
+
|
|
+#include "trace.h"
|
|
+
|
|
+/*
|
|
+ * SSH_RTL_REQUEST_TIMEOUT - Request timeout.
|
|
+ *
|
|
+ * Timeout as ktime_t delta for request responses. If we have not received a
|
|
+ * response in this time-frame after finishing the underlying packet
|
|
+ * transmission, the request will be completed with %-ETIMEDOUT as status
|
|
+ * code.
|
|
+ */
|
|
+#define SSH_RTL_REQUEST_TIMEOUT ms_to_ktime(3000)
|
|
+
|
|
+/*
|
|
+ * SSH_RTL_REQUEST_TIMEOUT_RESOLUTION - Request timeout granularity.
|
|
+ *
|
|
+ * Time-resolution for timeouts. Should be larger than one jiffy to avoid
|
|
+ * direct re-scheduling of reaper work_struct.
|
|
+ */
|
|
+#define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
|
|
+
|
|
+/*
|
|
+ * SSH_RTL_MAX_PENDING - Maximum number of pending requests.
|
|
+ *
|
|
+ * Maximum number of requests concurrently waiting to be completed (i.e.
|
|
+ * waiting for the corresponding packet transmission to finish if they don't
|
|
+ * have a response or waiting for a response if they have one).
|
|
+ */
|
|
+#define SSH_RTL_MAX_PENDING 3
|
|
+
|
|
+/*
|
|
+ * SSH_RTL_TX_BATCH - Maximum number of requests processed per work execution.
|
|
+ * Used to prevent livelocking of the workqueue. Value chosen via educated
|
|
+ * guess, may be adjusted.
|
|
+ */
|
|
+#define SSH_RTL_TX_BATCH 10
|
|
+
|
|
+#ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
|
|
+
|
|
+/**
|
|
+ * ssh_rtl_should_drop_response() - Error injection hook to drop request
|
|
+ * responses.
|
|
+ *
|
|
+ * Useful to cause request transmission timeouts in the driver by dropping the
|
|
+ * response to a request.
|
|
+ */
|
|
+static noinline bool ssh_rtl_should_drop_response(void)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response, TRUE);
|
|
+
|
|
+#else
|
|
+
|
|
+static inline bool ssh_rtl_should_drop_response(void)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+static u16 ssh_request_get_rqid(struct ssh_request *rqst)
|
|
+{
|
|
+ return get_unaligned_le16(rqst->packet.data.ptr
|
|
+ + SSH_MSGOFFSET_COMMAND(rqid));
|
|
+}
|
|
+
|
|
+static u32 ssh_request_get_rqid_safe(struct ssh_request *rqst)
|
|
+{
|
|
+ if (!rqst->packet.data.ptr)
|
|
+ return U32_MAX;
|
|
+
|
|
+ return ssh_request_get_rqid(rqst);
|
|
+}
|
|
+
|
|
+static void ssh_rtl_queue_remove(struct ssh_request *rqst)
|
|
+{
|
|
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
|
|
+
|
|
+ spin_lock(&rtl->queue.lock);
|
|
+
|
|
+ if (!test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state)) {
|
|
+ spin_unlock(&rtl->queue.lock);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ list_del(&rqst->node);
|
|
+
|
|
+ spin_unlock(&rtl->queue.lock);
|
|
+ ssh_request_put(rqst);
|
|
+}
|
|
+
|
|
+static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl)
|
|
+{
|
|
+ bool empty;
|
|
+
|
|
+ spin_lock(&rtl->queue.lock);
|
|
+ empty = list_empty(&rtl->queue.head);
|
|
+ spin_unlock(&rtl->queue.lock);
|
|
+
|
|
+ return empty;
|
|
+}
|
|
+
|
|
+static void ssh_rtl_pending_remove(struct ssh_request *rqst)
|
|
+{
|
|
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
|
|
+
|
|
+ spin_lock(&rtl->pending.lock);
|
|
+
|
|
+ if (!test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
|
|
+ spin_unlock(&rtl->pending.lock);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ atomic_dec(&rtl->pending.count);
|
|
+ list_del(&rqst->node);
|
|
+
|
|
+ spin_unlock(&rtl->pending.lock);
|
|
+
|
|
+ ssh_request_put(rqst);
|
|
+}
|
|
+
|
|
+static int ssh_rtl_tx_pending_push(struct ssh_request *rqst)
|
|
+{
|
|
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
|
|
+
|
|
+ spin_lock(&rtl->pending.lock);
|
|
+
|
|
+ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
|
|
+ spin_unlock(&rtl->pending.lock);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
|
|
+ spin_unlock(&rtl->pending.lock);
|
|
+ return -EALREADY;
|
|
+ }
|
|
+
|
|
+ atomic_inc(&rtl->pending.count);
|
|
+ list_add_tail(&ssh_request_get(rqst)->node, &rtl->pending.head);
|
|
+
|
|
+ spin_unlock(&rtl->pending.lock);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status)
|
|
+{
|
|
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
|
|
+
|
|
+ trace_ssam_request_complete(rqst, status);
|
|
+
|
|
+ /* rtl/ptl may not be set if we're canceling before submitting. */
|
|
+ rtl_dbg_cond(rtl, "rtl: completing request (rqid: %#06x, status: %d)\n",
|
|
+ ssh_request_get_rqid_safe(rqst), status);
|
|
+
|
|
+ rqst->ops->complete(rqst, NULL, NULL, status);
|
|
+}
|
|
+
|
|
+static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst,
|
|
+ const struct ssh_command *cmd,
|
|
+ const struct ssam_span *data)
|
|
+{
|
|
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
|
|
+
|
|
+ trace_ssam_request_complete(rqst, 0);
|
|
+
|
|
+ rtl_dbg(rtl, "rtl: completing request with response (rqid: %#06x)\n",
|
|
+ ssh_request_get_rqid(rqst));
|
|
+
|
|
+ rqst->ops->complete(rqst, cmd, data, 0);
|
|
+}
|
|
+
|
|
+static bool ssh_rtl_tx_can_process(struct ssh_request *rqst)
|
|
+{
|
|
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
|
|
+
|
|
+ if (test_bit(SSH_REQUEST_TY_FLUSH_BIT, &rqst->state))
|
|
+ return !atomic_read(&rtl->pending.count);
|
|
+
|
|
+ return atomic_read(&rtl->pending.count) < SSH_RTL_MAX_PENDING;
|
|
+}
|
|
+
|
|
+static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl)
|
|
+{
|
|
+ struct ssh_request *rqst = ERR_PTR(-ENOENT);
|
|
+ struct ssh_request *p, *n;
|
|
+
|
|
+ spin_lock(&rtl->queue.lock);
|
|
+
|
|
+ /* Find first non-locked request and remove it. */
|
|
+ list_for_each_entry_safe(p, n, &rtl->queue.head, node) {
|
|
+ if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state)))
|
|
+ continue;
|
|
+
|
|
+ if (!ssh_rtl_tx_can_process(p)) {
|
|
+ rqst = ERR_PTR(-EBUSY);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* Remove from queue and mark as transmitting. */
|
|
+ set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &p->state);
|
|
+ /* Ensure state never gets zero. */
|
|
+ smp_mb__before_atomic();
|
|
+ clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &p->state);
|
|
+
|
|
+ list_del(&p->node);
|
|
+
|
|
+ rqst = p;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ spin_unlock(&rtl->queue.lock);
|
|
+ return rqst;
|
|
+}
|
|
+
|
|
+static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl)
|
|
+{
|
|
+ struct ssh_request *rqst;
|
|
+ int status;
|
|
+
|
|
+ /* Get and prepare next request for transmit. */
|
|
+ rqst = ssh_rtl_tx_next(rtl);
|
|
+ if (IS_ERR(rqst))
|
|
+ return PTR_ERR(rqst);
|
|
+
|
|
+ /* Add it to/mark it as pending. */
|
|
+ status = ssh_rtl_tx_pending_push(rqst);
|
|
+ if (status) {
|
|
+ ssh_request_put(rqst);
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+
|
|
+ /* Submit packet. */
|
|
+ status = ssh_ptl_submit(&rtl->ptl, &rqst->packet);
|
|
+ if (status == -ESHUTDOWN) {
|
|
+ /*
|
|
+ * Packet has been refused due to the packet layer shutting
|
|
+ * down. Complete it here.
|
|
+ */
|
|
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state);
|
|
+ /*
|
|
+ * Note: A barrier is not required here, as there are only two
|
|
+ * references in the system at this point: The one that we have,
|
|
+ * and the other one that belongs to the pending set. Due to the
|
|
+ * request being marked as "transmitting", our process is the
|
|
+ * only one allowed to remove the pending node and change the
|
|
+ * state. Normally, the task would fall to the packet callback,
|
|
+ * but as this is a path where submission failed, this callback
|
|
+ * will never be executed.
|
|
+ */
|
|
+
|
|
+ ssh_rtl_pending_remove(rqst);
|
|
+ ssh_rtl_complete_with_status(rqst, -ESHUTDOWN);
|
|
+
|
|
+ ssh_request_put(rqst);
|
|
+ return -ESHUTDOWN;
|
|
+
|
|
+ } else if (status) {
|
|
+ /*
|
|
+ * If submitting the packet failed and the packet layer isn't
|
|
+ * shutting down, the packet has either been submitted/queued
|
|
+ * before (-EALREADY, which cannot happen as we have
|
|
+ * guaranteed that requests cannot be re-submitted), or the
|
|
+ * packet was marked as locked (-EINVAL). To mark the packet
|
|
+ * locked at this stage, the request, and thus the packets
|
|
+ * itself, had to have been canceled. Simply drop the
|
|
+ * reference. Cancellation itself will remove it from the set
|
|
+ * of pending requests.
|
|
+ */
|
|
+
|
|
+ WARN_ON(status != -EINVAL);
|
|
+
|
|
+ ssh_request_put(rqst);
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+
|
|
+ ssh_request_put(rqst);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl)
|
|
+{
|
|
+ if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING)
|
|
+ return false;
|
|
+
|
|
+ if (ssh_rtl_queue_empty(rtl))
|
|
+ return false;
|
|
+
|
|
+ return schedule_work(&rtl->tx.work);
|
|
+}
|
|
+
|
|
+static void ssh_rtl_tx_work_fn(struct work_struct *work)
|
|
+{
|
|
+ struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work);
|
|
+ unsigned int iterations = SSH_RTL_TX_BATCH;
|
|
+ int status;
|
|
+
|
|
+ /*
|
|
+ * Try to be nice and not block/live-lock the workqueue: Run a maximum
|
|
+ * of 10 tries, then re-submit if necessary. This should not be
|
|
+ * necessary for normal execution, but guarantee it anyway.
|
|
+ */
|
|
+ do {
|
|
+ status = ssh_rtl_tx_try_process_one(rtl);
|
|
+ if (status == -ENOENT || status == -EBUSY)
|
|
+ return; /* No more requests to process. */
|
|
+
|
|
+ if (status == -ESHUTDOWN) {
|
|
+ /*
|
|
+ * Packet system shutting down. No new packets can be
|
|
+ * transmitted. Return silently, the party initiating
|
|
+ * the shutdown should handle the rest.
|
|
+ */
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ WARN_ON(status != 0 && status != -EAGAIN);
|
|
+ } while (--iterations);
|
|
+
|
|
+ /* Out of tries, reschedule. */
|
|
+ ssh_rtl_tx_schedule(rtl);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_rtl_submit() - Submit a request to the transport layer.
|
|
+ * @rtl: The request transport layer.
|
|
+ * @rqst: The request to submit.
|
|
+ *
|
|
+ * Submits a request to the transport layer. A single request may not be
|
|
+ * submitted multiple times without reinitializing it.
|
|
+ *
|
|
+ * Return: Returns zero on success, %-EINVAL if the request type is invalid or
|
|
+ * the request has been canceled prior to submission, %-EALREADY if the
|
|
+ * request has already been submitted, or %-ESHUTDOWN in case the request
|
|
+ * transport layer has been shut down.
|
|
+ */
|
|
+int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst)
|
|
+{
|
|
+ trace_ssam_request_submit(rqst);
|
|
+
|
|
+ /*
|
|
+ * Ensure that requests expecting a response are sequenced. If this
|
|
+ * invariant ever changes, see the comment in ssh_rtl_complete() on what
|
|
+ * is required to be changed in the code.
|
|
+ */
|
|
+ if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &rqst->state))
|
|
+ if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &rqst->packet.state))
|
|
+ return -EINVAL;
|
|
+
|
|
+ spin_lock(&rtl->queue.lock);
|
|
+
|
|
+ /*
|
|
+ * Try to set ptl and check if this request has already been submitted.
|
|
+ *
|
|
+ * Must be inside lock as we might run into a lost update problem
|
|
+ * otherwise: If this were outside of the lock, cancellation in
|
|
+ * ssh_rtl_cancel_nonpending() may run after we've set the ptl
|
|
+ * reference but before we enter the lock. In that case, we'd detect
|
|
+ * that the request is being added to the queue and would try to remove
|
|
+ * it from that, but removal might fail because it hasn't actually been
|
|
+ * added yet. By putting this cmpxchg in the critical section, we
|
|
+ * ensure that the queuing detection only triggers when we are already
|
|
+ * in the critical section and the remove process will wait until the
|
|
+ * push operation has been completed (via lock) due to that. Only then,
|
|
+ * we can safely try to remove it.
|
|
+ */
|
|
+ if (cmpxchg(&rqst->packet.ptl, NULL, &rtl->ptl)) {
|
|
+ spin_unlock(&rtl->queue.lock);
|
|
+ return -EALREADY;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Ensure that we set ptl reference before we continue modifying state.
|
|
+ * This is required for non-pending cancellation. This barrier is paired
|
|
+ * with the one in ssh_rtl_cancel_nonpending().
|
|
+ *
|
|
+ * By setting the ptl reference before we test for "locked", we can
|
|
+ * check if the "locked" test may have already run. See comments in
|
|
+ * ssh_rtl_cancel_nonpending() for more detail.
|
|
+ */
|
|
+ smp_mb__after_atomic();
|
|
+
|
|
+ if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) {
|
|
+ spin_unlock(&rtl->queue.lock);
|
|
+ return -ESHUTDOWN;
|
|
+ }
|
|
+
|
|
+ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
|
|
+ spin_unlock(&rtl->queue.lock);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ set_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state);
|
|
+ list_add_tail(&ssh_request_get(rqst)->node, &rtl->queue.head);
|
|
+
|
|
+ spin_unlock(&rtl->queue.lock);
|
|
+
|
|
+ ssh_rtl_tx_schedule(rtl);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now,
|
|
+ ktime_t expires)
|
|
+{
|
|
+ unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
|
|
+ ktime_t aexp = ktime_add(expires, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION);
|
|
+
|
|
+ spin_lock(&rtl->rtx_timeout.lock);
|
|
+
|
|
+ /* Re-adjust / schedule reaper only if it is above resolution delta. */
|
|
+ if (ktime_before(aexp, rtl->rtx_timeout.expires)) {
|
|
+ rtl->rtx_timeout.expires = expires;
|
|
+ mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta);
|
|
+ }
|
|
+
|
|
+ spin_unlock(&rtl->rtx_timeout.lock);
|
|
+}
|
|
+
|
|
+static void ssh_rtl_timeout_start(struct ssh_request *rqst)
|
|
+{
|
|
+ struct ssh_rtl *rtl = ssh_request_rtl(rqst);
|
|
+ ktime_t timestamp = ktime_get_coarse_boottime();
|
|
+ ktime_t timeout = rtl->rtx_timeout.timeout;
|
|
+
|
|
+ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state))
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * Note: The timestamp gets set only once. This happens on the packet
|
|
+ * callback. All other access to it is read-only.
|
|
+ */
|
|
+ WRITE_ONCE(rqst->timestamp, timestamp);
|
|
+ /*
|
|
+ * Ensure timestamp is set before starting the reaper. Paired with
|
|
+ * implicit barrier following check on ssh_request_get_expiration() in
|
|
+ * ssh_rtl_timeout_reap.
|
|
+ */
|
|
+ smp_mb__after_atomic();
|
|
+
|
|
+ ssh_rtl_timeout_reaper_mod(rtl, timestamp, timestamp + timeout);
|
|
+}
|
|
+
|
|
+static void ssh_rtl_complete(struct ssh_rtl *rtl,
|
|
+ const struct ssh_command *command,
|
|
+ const struct ssam_span *command_data)
|
|
+{
|
|
+ struct ssh_request *r = NULL;
|
|
+ struct ssh_request *p, *n;
|
|
+ u16 rqid = get_unaligned_le16(&command->rqid);
|
|
+
|
|
+ trace_ssam_rx_response_received(command, command_data->len);
|
|
+
|
|
+ /*
|
|
+ * Get request from pending based on request ID and mark it as response
|
|
+ * received and locked.
|
|
+ */
|
|
+ spin_lock(&rtl->pending.lock);
|
|
+ list_for_each_entry_safe(p, n, &rtl->pending.head, node) {
|
|
+ /* We generally expect requests to be processed in order. */
|
|
+ if (unlikely(ssh_request_get_rqid(p) != rqid))
|
|
+ continue;
|
|
+
|
|
+ /* Simulate response timeout. */
|
|
+ if (ssh_rtl_should_drop_response()) {
|
|
+ spin_unlock(&rtl->pending.lock);
|
|
+
|
|
+ trace_ssam_ei_rx_drop_response(p);
|
|
+ rtl_info(rtl, "request error injection: dropping response for request %p\n",
|
|
+ &p->packet);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Mark as "response received" and "locked" as we're going to
|
|
+ * complete it.
|
|
+ */
|
|
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state);
|
|
+ set_bit(SSH_REQUEST_SF_RSPRCVD_BIT, &p->state);
|
|
+ /* Ensure state never gets zero. */
|
|
+ smp_mb__before_atomic();
|
|
+ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &p->state);
|
|
+
|
|
+ atomic_dec(&rtl->pending.count);
|
|
+ list_del(&p->node);
|
|
+
|
|
+ r = p;
|
|
+ break;
|
|
+ }
|
|
+ spin_unlock(&rtl->pending.lock);
|
|
+
|
|
+ if (!r) {
|
|
+ rtl_warn(rtl, "rtl: dropping unexpected command message (rqid = %#06x)\n",
|
|
+ rqid);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* If the request hasn't been completed yet, we will do this now. */
|
|
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) {
|
|
+ ssh_request_put(r);
|
|
+ ssh_rtl_tx_schedule(rtl);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Make sure the request has been transmitted. In case of a sequenced
|
|
+ * request, we are guaranteed that the completion callback will run on
|
|
+ * the receiver thread directly when the ACK for the packet has been
|
|
+ * received. Similarly, this function is guaranteed to run on the
|
|
+ * receiver thread. Thus we are guaranteed that if the packet has been
|
|
+ * successfully transmitted and received an ACK, the transmitted flag
|
|
+ * has been set and is visible here.
|
|
+ *
|
|
+ * We are currently not handling unsequenced packets here, as those
|
|
+ * should never expect a response as ensured in ssh_rtl_submit. If this
|
|
+ * ever changes, one would have to test for
|
|
+ *
|
|
+ * (r->state & (transmitting | transmitted))
|
|
+ *
|
|
+ * on unsequenced packets to determine if they could have been
|
|
+ * transmitted. There are no synchronization guarantees as in the
|
|
+ * sequenced case, since, in this case, the callback function will not
|
|
+ * run on the same thread. Thus an exact determination is impossible.
|
|
+ */
|
|
+ if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state)) {
|
|
+ rtl_err(rtl, "rtl: received response before ACK for request (rqid = %#06x)\n",
|
|
+ rqid);
|
|
+
|
|
+ /*
|
|
+ * NB: Timeout has already been canceled, request already been
|
|
+ * removed from pending and marked as locked and completed. As
|
|
+ * we receive a "false" response, the packet might still be
|
|
+ * queued though.
|
|
+ */
|
|
+ ssh_rtl_queue_remove(r);
|
|
+
|
|
+ ssh_rtl_complete_with_status(r, -EREMOTEIO);
|
|
+ ssh_request_put(r);
|
|
+
|
|
+ ssh_rtl_tx_schedule(rtl);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * NB: Timeout has already been canceled, request already been
|
|
+ * removed from pending and marked as locked and completed. The request
|
|
+ * can also not be queued any more, as it has been marked as
|
|
+ * transmitting and later transmitted. Thus no need to remove it from
|
|
+ * anywhere.
|
|
+ */
|
|
+
|
|
+ ssh_rtl_complete_with_rsp(r, command, command_data);
|
|
+ ssh_request_put(r);
|
|
+
|
|
+ ssh_rtl_tx_schedule(rtl);
|
|
+}
|
|
+
|
|
+static bool ssh_rtl_cancel_nonpending(struct ssh_request *r)
|
|
+{
|
|
+ struct ssh_rtl *rtl;
|
|
+ unsigned long flags, fixed;
|
|
+ bool remove;
|
|
+
|
|
+ /*
|
|
+ * Handle unsubmitted request: Try to mark the packet as locked,
|
|
+ * expecting the state to be zero (i.e. unsubmitted). Note that, if
|
|
+ * setting the state worked, we might still be adding the packet to the
|
|
+ * queue in a currently executing submit call. In that case, however,
|
|
+ * ptl reference must have been set previously, as locked is checked
|
|
+ * after setting ptl. Furthermore, when the ptl reference is set, the
|
|
+ * submission process is guaranteed to have entered the critical
|
|
+ * section. Thus only if we successfully locked this request and ptl is
|
|
+ * NULL, we have successfully removed the request, i.e. we are
|
|
+ * guaranteed that, due to the "locked" check in ssh_rtl_submit(), the
|
|
+ * packet will never be added. Otherwise, we need to try and grab it
|
|
+ * from the queue, where we are now guaranteed that the packet is or has
|
|
+ * been due to the critical section.
|
|
+ *
|
|
+ * Note that if the cmpxchg() fails, we are guaranteed that ptl has
|
|
+ * been set and is non-NULL, as states can only be nonzero after this
|
|
+ * has been set. Also note that we need to fetch the static (type)
|
|
+ * flags to ensure that they don't cause the cmpxchg() to fail.
|
|
+ */
|
|
+ fixed = READ_ONCE(r->state) & SSH_REQUEST_FLAGS_TY_MASK;
|
|
+ flags = cmpxchg(&r->state, fixed, SSH_REQUEST_SF_LOCKED_BIT);
|
|
+
|
|
+ /*
|
|
+ * Force correct ordering with regards to state and ptl reference access
|
|
+ * to safe-guard cancellation to concurrent submission against a
|
|
+ * lost-update problem. First try to exchange state, then also check
|
|
+ * ptl if that worked. This barrier is paired with the
|
|
+ * one in ssh_rtl_submit().
|
|
+ */
|
|
+ smp_mb__after_atomic();
|
|
+
|
|
+ if (flags == fixed && !READ_ONCE(r->packet.ptl)) {
|
|
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
|
|
+ return true;
|
|
+
|
|
+ ssh_rtl_complete_with_status(r, -ECANCELED);
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ rtl = ssh_request_rtl(r);
|
|
+ spin_lock(&rtl->queue.lock);
|
|
+
|
|
+ /*
|
|
+ * Note: 1) Requests cannot be re-submitted. 2) If a request is
|
|
+ * queued, it cannot be "transmitting"/"pending" yet. Thus, if we
|
|
+ * successfully remove the request here, we have removed all its
|
|
+ * occurrences in the system.
|
|
+ */
|
|
+
|
|
+ remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
|
|
+ if (!remove) {
|
|
+ spin_unlock(&rtl->queue.lock);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
|
|
+ list_del(&r->node);
|
|
+
|
|
+ spin_unlock(&rtl->queue.lock);
|
|
+
|
|
+ ssh_request_put(r); /* Drop reference obtained from queue. */
|
|
+
|
|
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
|
|
+ return true;
|
|
+
|
|
+ ssh_rtl_complete_with_status(r, -ECANCELED);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool ssh_rtl_cancel_pending(struct ssh_request *r)
|
|
+{
|
|
+ /* If the packet is already locked, it's going to be removed shortly. */
|
|
+ if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
|
|
+ return true;
|
|
+
|
|
+ /*
|
|
+ * Now that we have locked the packet, we have guaranteed that it can't
|
|
+ * be added to the system any more. If ptl is NULL, the locked
|
|
+ * check in ssh_rtl_submit() has not been run and any submission,
|
|
+ * currently in progress or called later, won't add the packet. Thus we
|
|
+ * can directly complete it.
|
|
+ *
|
|
+ * The implicit memory barrier of test_and_set_bit() should be enough
|
|
+ * to ensure that the correct order (first lock, then check ptl) is
|
|
+ * ensured. This is paired with the barrier in ssh_rtl_submit().
|
|
+ */
|
|
+ if (!READ_ONCE(r->packet.ptl)) {
|
|
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
|
|
+ return true;
|
|
+
|
|
+ ssh_rtl_complete_with_status(r, -ECANCELED);
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Try to cancel the packet. If the packet has not been completed yet,
|
|
+ * this will subsequently (and synchronously) call the completion
|
|
+ * callback of the packet, which will complete the request.
|
|
+ */
|
|
+ ssh_ptl_cancel(&r->packet);
|
|
+
|
|
+ /*
|
|
+ * If the packet has been completed with success, i.e. has not been
|
|
+ * canceled by the above call, the request may not have been completed
|
|
+ * yet (may be waiting for a response). Check if we need to do this
|
|
+ * here.
|
|
+ */
|
|
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
|
|
+ return true;
|
|
+
|
|
+ ssh_rtl_queue_remove(r);
|
|
+ ssh_rtl_pending_remove(r);
|
|
+ ssh_rtl_complete_with_status(r, -ECANCELED);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_rtl_cancel() - Cancel request.
|
|
+ * @rqst: The request to cancel.
|
|
+ * @pending: Whether to also cancel pending requests.
|
|
+ *
|
|
+ * Cancels the given request. If @pending is %false, this will not cancel
|
|
+ * pending requests, i.e. requests that have already been submitted to the
|
|
+ * packet layer but not been completed yet. If @pending is %true, this will
|
|
+ * cancel the given request regardless of the state it is in.
|
|
+ *
|
|
+ * If the request has been canceled by calling this function, both completion
|
|
+ * and release callbacks of the request will be executed in a reasonable
|
|
+ * time-frame. This may happen during execution of this function, however,
|
|
+ * there is no guarantee for this. For example, a request currently
|
|
+ * transmitting will be canceled/completed only after transmission has
|
|
+ * completed, and the respective callbacks will be executed on the transmitter
|
|
+ * thread, which may happen during, but also some time after execution of the
|
|
+ * cancel function.
|
|
+ *
|
|
+ * Return: Returns %true if the given request has been canceled or completed,
|
|
+ * either by this function or prior to calling this function, %false
|
|
+ * otherwise. If @pending is %true, this function will always return %true.
|
|
+ */
|
|
+bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending)
|
|
+{
|
|
+ struct ssh_rtl *rtl;
|
|
+ bool canceled;
|
|
+
|
|
+ if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state))
|
|
+ return true;
|
|
+
|
|
+ trace_ssam_request_cancel(rqst);
|
|
+
|
|
+ if (pending)
|
|
+ canceled = ssh_rtl_cancel_pending(rqst);
|
|
+ else
|
|
+ canceled = ssh_rtl_cancel_nonpending(rqst);
|
|
+
|
|
+ /* Note: rtl may be NULL if request has not been submitted yet. */
|
|
+ rtl = ssh_request_rtl(rqst);
|
|
+ if (canceled && rtl)
|
|
+ ssh_rtl_tx_schedule(rtl);
|
|
+
|
|
+ return canceled;
|
|
+}
|
|
+
|
|
+static void ssh_rtl_packet_callback(struct ssh_packet *p, int status)
|
|
+{
|
|
+ struct ssh_request *r = to_ssh_request(p);
|
|
+
|
|
+ if (unlikely(status)) {
|
|
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
|
|
+
|
|
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
|
|
+ return;
|
|
+
|
|
+ /*
|
|
+ * The packet may get canceled even though it has not been
|
|
+ * submitted yet. The request may still be queued. Check the
|
|
+ * queue and remove it if necessary. As the timeout would have
|
|
+ * been started in this function on success, there's no need
|
|
+ * to cancel it here.
|
|
+ */
|
|
+ ssh_rtl_queue_remove(r);
|
|
+ ssh_rtl_pending_remove(r);
|
|
+ ssh_rtl_complete_with_status(r, status);
|
|
+
|
|
+ ssh_rtl_tx_schedule(ssh_request_rtl(r));
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Update state: Mark as transmitted and clear transmitting. */
|
|
+ set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state);
|
|
+ /* Ensure state never gets zero. */
|
|
+ smp_mb__before_atomic();
|
|
+ clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &r->state);
|
|
+
|
|
+ /* If we expect a response, we just need to start the timeout. */
|
|
+ if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &r->state)) {
|
|
+ /*
|
|
+ * Note: This is the only place where the timestamp gets set,
|
|
+ * all other access to it is read-only.
|
|
+ */
|
|
+ ssh_rtl_timeout_start(r);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If we don't expect a response, lock, remove, and complete the
|
|
+ * request. Note that, at this point, the request is guaranteed to have
|
|
+ * left the queue and no timeout has been started. Thus we only need to
|
|
+ * remove it from pending. If the request has already been completed (it
|
|
+ * may have been canceled) return.
|
|
+ */
|
|
+
|
|
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
|
|
+ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
|
|
+ return;
|
|
+
|
|
+ ssh_rtl_pending_remove(r);
|
|
+ ssh_rtl_complete_with_status(r, 0);
|
|
+
|
|
+ ssh_rtl_tx_schedule(ssh_request_rtl(r));
|
|
+}
|
|
+
|
|
+static ktime_t ssh_request_get_expiration(struct ssh_request *r, ktime_t timeout)
|
|
+{
|
|
+ ktime_t timestamp = READ_ONCE(r->timestamp);
|
|
+
|
|
+ if (timestamp != KTIME_MAX)
|
|
+ return ktime_add(timestamp, timeout);
|
|
+ else
|
|
+ return KTIME_MAX;
|
|
+}
|
|
+
|
|
+static void ssh_rtl_timeout_reap(struct work_struct *work)
|
|
+{
|
|
+ struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work);
|
|
+ struct ssh_request *r, *n;
|
|
+ LIST_HEAD(claimed);
|
|
+ ktime_t now = ktime_get_coarse_boottime();
|
|
+ ktime_t timeout = rtl->rtx_timeout.timeout;
|
|
+ ktime_t next = KTIME_MAX;
|
|
+
|
|
+ trace_ssam_rtl_timeout_reap(atomic_read(&rtl->pending.count));
|
|
+
|
|
+ /*
|
|
+ * Mark reaper as "not pending". This is done before checking any
|
|
+ * requests to avoid lost-update type problems.
|
|
+ */
|
|
+ spin_lock(&rtl->rtx_timeout.lock);
|
|
+ rtl->rtx_timeout.expires = KTIME_MAX;
|
|
+ spin_unlock(&rtl->rtx_timeout.lock);
|
|
+
|
|
+ spin_lock(&rtl->pending.lock);
|
|
+ list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
|
|
+ ktime_t expires = ssh_request_get_expiration(r, timeout);
|
|
+
|
|
+ /*
|
|
+ * Check if the timeout hasn't expired yet. Find out next
|
|
+ * expiration date to be handled after this run.
|
|
+ */
|
|
+ if (ktime_after(expires, now)) {
|
|
+ next = ktime_before(expires, next) ? expires : next;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* Avoid further transitions if locked. */
|
|
+ if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * We have now marked the packet as locked. Thus it cannot be
|
|
+ * added to the pending or queued lists again after we've
|
|
+ * removed it here. We can therefore re-use the node of this
|
|
+ * packet temporarily.
|
|
+ */
|
|
+
|
|
+ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
|
|
+
|
|
+ atomic_dec(&rtl->pending.count);
|
|
+ list_del(&r->node);
|
|
+
|
|
+ list_add_tail(&r->node, &claimed);
|
|
+ }
|
|
+ spin_unlock(&rtl->pending.lock);
|
|
+
|
|
+ /* Cancel and complete the request. */
|
|
+ list_for_each_entry_safe(r, n, &claimed, node) {
|
|
+ trace_ssam_request_timeout(r);
|
|
+
|
|
+ /*
|
|
+ * At this point we've removed the packet from pending. This
|
|
+ * means that we've obtained the last (only) reference of the
|
|
+ * system to it. Thus we can just complete it.
|
|
+ */
|
|
+ if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
|
|
+ ssh_rtl_complete_with_status(r, -ETIMEDOUT);
|
|
+
|
|
+ /*
|
|
+ * Drop the reference we've obtained by removing it from the
|
|
+ * pending set.
|
|
+ */
|
|
+ list_del(&r->node);
|
|
+ ssh_request_put(r);
|
|
+ }
|
|
+
|
|
+ /* Ensure that the reaper doesn't run again immediately. */
|
|
+ next = max(next, ktime_add(now, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION));
|
|
+ if (next != KTIME_MAX)
|
|
+ ssh_rtl_timeout_reaper_mod(rtl, now, next);
|
|
+
|
|
+ ssh_rtl_tx_schedule(rtl);
|
|
+}
|
|
+
|
|
+static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd,
|
|
+ const struct ssam_span *data)
|
|
+{
|
|
+ trace_ssam_rx_event_received(cmd, data->len);
|
|
+
|
|
+ rtl_dbg(rtl, "rtl: handling event (rqid: %#06x)\n",
|
|
+ get_unaligned_le16(&cmd->rqid));
|
|
+
|
|
+ rtl->ops.handle_event(rtl, cmd, data);
|
|
+}
|
|
+
|
|
+static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
|
|
+{
|
|
+ struct ssh_rtl *rtl = to_ssh_rtl(p, ptl);
|
|
+ struct device *dev = &p->serdev->dev;
|
|
+ struct ssh_command *command;
|
|
+ struct ssam_span command_data;
|
|
+
|
|
+ if (sshp_parse_command(dev, data, &command, &command_data))
|
|
+ return;
|
|
+
|
|
+ if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
|
|
+ ssh_rtl_rx_event(rtl, command, &command_data);
|
|
+ else
|
|
+ ssh_rtl_complete(rtl, command, &command_data);
|
|
+}
|
|
+
|
|
+static void ssh_rtl_rx_data(struct ssh_ptl *p, const struct ssam_span *data)
|
|
+{
|
|
+ if (!data->len) {
|
|
+ ptl_err(p, "rtl: rx: no data frame payload\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ switch (data->ptr[0]) {
|
|
+ case SSH_PLD_TYPE_CMD:
|
|
+ ssh_rtl_rx_command(p, data);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ ptl_err(p, "rtl: rx: unknown frame payload type (type: %#04x)\n",
|
|
+ data->ptr[0]);
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void ssh_rtl_packet_release(struct ssh_packet *p)
|
|
+{
|
|
+ struct ssh_request *rqst;
|
|
+
|
|
+ rqst = to_ssh_request(p);
|
|
+ rqst->ops->release(rqst);
|
|
+}
|
|
+
|
|
+static const struct ssh_packet_ops ssh_rtl_packet_ops = {
|
|
+ .complete = ssh_rtl_packet_callback,
|
|
+ .release = ssh_rtl_packet_release,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * ssh_request_init() - Initialize SSH request.
|
|
+ * @rqst: The request to initialize.
|
|
+ * @flags: Request flags, determining the type of the request.
|
|
+ * @ops: Request operations.
|
|
+ *
|
|
+ * Initializes the given SSH request and underlying packet. Sets the message
|
|
+ * buffer pointer to %NULL and the message buffer length to zero. This buffer
|
|
+ * has to be set separately via ssh_request_set_data() before submission and
|
|
+ * must contain a valid SSH request message.
|
|
+ *
|
|
+ * Return: Returns zero on success or %-EINVAL if the given flags are invalid.
|
|
+ */
|
|
+int ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
|
|
+ const struct ssh_request_ops *ops)
|
|
+{
|
|
+ unsigned long type = BIT(SSH_PACKET_TY_BLOCKING_BIT);
|
|
+
|
|
+ /* Unsequenced requests cannot have a response. */
|
|
+ if (flags & SSAM_REQUEST_UNSEQUENCED && flags & SSAM_REQUEST_HAS_RESPONSE)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (!(flags & SSAM_REQUEST_UNSEQUENCED))
|
|
+ type |= BIT(SSH_PACKET_TY_SEQUENCED_BIT);
|
|
+
|
|
+ ssh_packet_init(&rqst->packet, type, SSH_PACKET_PRIORITY(DATA, 0),
|
|
+ &ssh_rtl_packet_ops);
|
|
+
|
|
+ INIT_LIST_HEAD(&rqst->node);
|
|
+
|
|
+ rqst->state = 0;
|
|
+ if (flags & SSAM_REQUEST_HAS_RESPONSE)
|
|
+ rqst->state |= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
|
|
+
|
|
+ rqst->timestamp = KTIME_MAX;
|
|
+ rqst->ops = ops;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_rtl_init() - Initialize request transport layer.
|
|
+ * @rtl: The request transport layer to initialize.
|
|
+ * @serdev: The underlying serial device, i.e. the lower-level transport.
|
|
+ * @ops: Request transport layer operations.
|
|
+ *
|
|
+ * Initializes the given request transport layer and associated packet
|
|
+ * transport layer. Transmitter and receiver threads must be started
|
|
+ * separately via ssh_rtl_start(), after the request-layer has been
|
|
+ * initialized and the lower-level serial device layer has been set up.
|
|
+ *
|
|
+ * Return: Returns zero on success and a nonzero error code on failure.
|
|
+ */
|
|
+int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
|
|
+ const struct ssh_rtl_ops *ops)
|
|
+{
|
|
+ struct ssh_ptl_ops ptl_ops;
|
|
+ int status;
|
|
+
|
|
+ ptl_ops.data_received = ssh_rtl_rx_data;
|
|
+
|
|
+ status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ spin_lock_init(&rtl->queue.lock);
|
|
+ INIT_LIST_HEAD(&rtl->queue.head);
|
|
+
|
|
+ spin_lock_init(&rtl->pending.lock);
|
|
+ INIT_LIST_HEAD(&rtl->pending.head);
|
|
+ atomic_set_release(&rtl->pending.count, 0);
|
|
+
|
|
+ INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn);
|
|
+
|
|
+ spin_lock_init(&rtl->rtx_timeout.lock);
|
|
+ rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT;
|
|
+ rtl->rtx_timeout.expires = KTIME_MAX;
|
|
+ INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap);
|
|
+
|
|
+ rtl->ops = *ops;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_rtl_destroy() - Deinitialize request transport layer.
|
|
+ * @rtl: The request transport layer to deinitialize.
|
|
+ *
|
|
+ * Deinitializes the given request transport layer and frees resources
|
|
+ * associated with it. If receiver and/or transmitter threads have been
|
|
+ * started, the layer must first be shut down via ssh_rtl_shutdown() before
|
|
+ * this function can be called.
|
|
+ */
|
|
+void ssh_rtl_destroy(struct ssh_rtl *rtl)
|
|
+{
|
|
+ ssh_ptl_destroy(&rtl->ptl);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_rtl_start() - Start request transmitter and receiver.
|
|
+ * @rtl: The request transport layer.
|
|
+ *
|
|
+ * Return: Returns zero on success, a negative error code on failure.
|
|
+ */
|
|
+int ssh_rtl_start(struct ssh_rtl *rtl)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = ssh_ptl_tx_start(&rtl->ptl);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ ssh_rtl_tx_schedule(rtl);
|
|
+
|
|
+ status = ssh_ptl_rx_start(&rtl->ptl);
|
|
+ if (status) {
|
|
+ ssh_rtl_flush(rtl, msecs_to_jiffies(5000));
|
|
+ ssh_ptl_tx_stop(&rtl->ptl);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+struct ssh_flush_request {
|
|
+ struct ssh_request base;
|
|
+ struct completion completion;
|
|
+ int status;
|
|
+};
|
|
+
|
|
+static void ssh_rtl_flush_request_complete(struct ssh_request *r,
|
|
+ const struct ssh_command *cmd,
|
|
+ const struct ssam_span *data,
|
|
+ int status)
|
|
+{
|
|
+ struct ssh_flush_request *rqst;
|
|
+
|
|
+ rqst = container_of(r, struct ssh_flush_request, base);
|
|
+ rqst->status = status;
|
|
+}
|
|
+
|
|
+static void ssh_rtl_flush_request_release(struct ssh_request *r)
|
|
+{
|
|
+ struct ssh_flush_request *rqst;
|
|
+
|
|
+ rqst = container_of(r, struct ssh_flush_request, base);
|
|
+ complete_all(&rqst->completion);
|
|
+}
|
|
+
|
|
+static const struct ssh_request_ops ssh_rtl_flush_request_ops = {
|
|
+ .complete = ssh_rtl_flush_request_complete,
|
|
+ .release = ssh_rtl_flush_request_release,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * ssh_rtl_flush() - Flush the request transport layer.
|
|
+ * @rtl: request transport layer
|
|
+ * @timeout: timeout for the flush operation in jiffies
|
|
+ *
|
|
+ * Queue a special flush request and wait for its completion. This request
|
|
+ * will be completed after all other currently queued and pending requests
|
|
+ * have been completed. Instead of a normal data packet, this request submits
|
|
+ * a special flush packet, meaning that upon completion, also the underlying
|
|
+ * packet transport layer has been flushed.
|
|
+ *
|
|
+ * Flushing the request layer guarantees that all previously submitted
|
|
+ * requests have been fully completed before this call returns. Additionally,
|
|
+ * flushing blocks execution of all later submitted requests until the flush
|
|
+ * has been completed.
|
|
+ *
|
|
+ * If the caller ensures that no new requests are submitted after a call to
|
|
+ * this function, the request transport layer is guaranteed to have no
|
|
+ * remaining requests when this call returns. The same guarantee does not hold
|
|
+ * for the packet layer, on which control packets may still be queued after
|
|
+ * this call.
|
|
+ *
|
|
+ * Return: Returns zero on success, %-ETIMEDOUT if the flush timed out and has
|
|
+ * been canceled as a result of the timeout, or %-ESHUTDOWN if the packet
|
|
+ * and/or request transport layer has been shut down before this call. May
|
|
+ * also return %-EINTR if the underlying packet transmission has been
|
|
+ * interrupted.
|
|
+ */
|
|
+int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout)
|
|
+{
|
|
+ const unsigned int init_flags = SSAM_REQUEST_UNSEQUENCED;
|
|
+ struct ssh_flush_request rqst;
|
|
+ int status;
|
|
+
|
|
+ ssh_request_init(&rqst.base, init_flags, &ssh_rtl_flush_request_ops);
|
|
+ rqst.base.packet.state |= BIT(SSH_PACKET_TY_FLUSH_BIT);
|
|
+ rqst.base.packet.priority = SSH_PACKET_PRIORITY(FLUSH, 0);
|
|
+ rqst.base.state |= BIT(SSH_REQUEST_TY_FLUSH_BIT);
|
|
+
|
|
+ init_completion(&rqst.completion);
|
|
+
|
|
+ status = ssh_rtl_submit(rtl, &rqst.base);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ ssh_request_put(&rqst.base);
|
|
+
|
|
+ if (!wait_for_completion_timeout(&rqst.completion, timeout)) {
|
|
+ ssh_rtl_cancel(&rqst.base, true);
|
|
+ wait_for_completion(&rqst.completion);
|
|
+ }
|
|
+
|
|
+ WARN_ON(rqst.status != 0 && rqst.status != -ECANCELED &&
|
|
+ rqst.status != -ESHUTDOWN && rqst.status != -EINTR);
|
|
+
|
|
+ return rqst.status == -ECANCELED ? -ETIMEDOUT : rqst.status;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_rtl_shutdown() - Shut down request transport layer.
|
|
+ * @rtl: The request transport layer.
|
|
+ *
|
|
+ * Shuts down the request transport layer, removing and canceling all queued
|
|
+ * and pending requests. Requests canceled by this operation will be completed
|
|
+ * with %-ESHUTDOWN as status. Receiver and transmitter threads will be
|
|
+ * stopped, the lower-level packet layer will be shutdown.
|
|
+ *
|
|
+ * As a result of this function, the transport layer will be marked as shut
|
|
+ * down. Submission of requests after the transport layer has been shut down
|
|
+ * will fail with %-ESHUTDOWN.
|
|
+ */
|
|
+void ssh_rtl_shutdown(struct ssh_rtl *rtl)
|
|
+{
|
|
+ struct ssh_request *r, *n;
|
|
+ LIST_HEAD(claimed);
|
|
+ int pending;
|
|
+
|
|
+ set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state);
|
|
+ /*
|
|
+ * Ensure that the layer gets marked as shut-down before actually
|
|
+ * stopping it. In combination with the check in ssh_rtl_submit(),
|
|
+ * this guarantees that no new requests can be added and all already
|
|
+ * queued requests are properly canceled.
|
|
+ */
|
|
+ smp_mb__after_atomic();
|
|
+
|
|
+ /* Remove requests from queue. */
|
|
+ spin_lock(&rtl->queue.lock);
|
|
+ list_for_each_entry_safe(r, n, &rtl->queue.head, node) {
|
|
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
|
|
+ /* Ensure state never gets zero. */
|
|
+ smp_mb__before_atomic();
|
|
+ clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
|
|
+
|
|
+ list_del(&r->node);
|
|
+ list_add_tail(&r->node, &claimed);
|
|
+ }
|
|
+ spin_unlock(&rtl->queue.lock);
|
|
+
|
|
+ /*
|
|
+ * We have now guaranteed that the queue is empty and no more new
|
|
+ * requests can be submitted (i.e. it will stay empty). This means that
|
|
+ * calling ssh_rtl_tx_schedule() will not schedule tx.work any more. So
|
|
+ * we can simply call cancel_work_sync() on tx.work here and when that
|
|
+ * returns, we've locked it down. This also means that after this call,
|
|
+ * we don't submit any more packets to the underlying packet layer, so
|
|
+ * we can also shut that down.
|
|
+ */
|
|
+
|
|
+ cancel_work_sync(&rtl->tx.work);
|
|
+ ssh_ptl_shutdown(&rtl->ptl);
|
|
+ cancel_delayed_work_sync(&rtl->rtx_timeout.reaper);
|
|
+
|
|
+ /*
|
|
+ * Shutting down the packet layer should also have canceled all
|
|
+ * requests. Thus the pending set should be empty. Attempt to handle
|
|
+ * this gracefully anyways, even though this should be dead code.
|
|
+ */
|
|
+
|
|
+ pending = atomic_read(&rtl->pending.count);
|
|
+ if (WARN_ON(pending)) {
|
|
+ spin_lock(&rtl->pending.lock);
|
|
+ list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
|
|
+ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
|
|
+ /* Ensure state never gets zero. */
|
|
+ smp_mb__before_atomic();
|
|
+ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
|
|
+
|
|
+ list_del(&r->node);
|
|
+ list_add_tail(&r->node, &claimed);
|
|
+ }
|
|
+ spin_unlock(&rtl->pending.lock);
|
|
+ }
|
|
+
|
|
+ /* Finally, cancel and complete the requests we claimed before. */
|
|
+ list_for_each_entry_safe(r, n, &claimed, node) {
|
|
+ /*
|
|
+ * We need test_and_set() because we still might compete with
|
|
+ * cancellation.
|
|
+ */
|
|
+ if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
|
|
+ ssh_rtl_complete_with_status(r, -ESHUTDOWN);
|
|
+
|
|
+ /*
|
|
+ * Drop the reference we've obtained by removing it from the
|
|
+ * lists.
|
|
+ */
|
|
+ list_del(&r->node);
|
|
+ ssh_request_put(r);
|
|
+ }
|
|
+}
|
|
diff --git a/drivers/platform/x86/surface_aggregator/ssh_request_layer.h b/drivers/platform/x86/surface_aggregator/ssh_request_layer.h
|
|
new file mode 100644
|
|
index 000000000000..9c3cbae2d4bd
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator/ssh_request_layer.h
|
|
@@ -0,0 +1,143 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * SSH request transport layer.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#ifndef _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H
|
|
+#define _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H
|
|
+
|
|
+#include <linux/atomic.h>
|
|
+#include <linux/ktime.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/workqueue.h>
|
|
+
|
|
+#include <linux/surface_aggregator/serial_hub.h>
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+
|
|
+#include "ssh_packet_layer.h"
|
|
+
|
|
+/**
|
|
+ * enum ssh_rtl_state_flags - State-flags for &struct ssh_rtl.
|
|
+ *
|
|
+ * @SSH_RTL_SF_SHUTDOWN_BIT:
|
|
+ * Indicates that the request transport layer has been shut down or is
|
|
+ * being shut down and should not accept any new requests.
|
|
+ */
|
|
+enum ssh_rtl_state_flags {
|
|
+ SSH_RTL_SF_SHUTDOWN_BIT,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssh_rtl_ops - Callback operations for request transport layer.
|
|
+ * @handle_event: Function called when a SSH event has been received. The
|
|
+ * specified function takes the request layer, received command
|
|
+ * struct, and corresponding payload as arguments. If the event
|
|
+ * has no payload, the payload span is empty (not %NULL).
|
|
+ */
|
|
+struct ssh_rtl_ops {
|
|
+ void (*handle_event)(struct ssh_rtl *rtl, const struct ssh_command *cmd,
|
|
+ const struct ssam_span *data);
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssh_rtl - SSH request transport layer.
|
|
+ * @ptl: Underlying packet transport layer.
|
|
+ * @state: State(-flags) of the transport layer.
|
|
+ * @queue: Request submission queue.
|
|
+ * @queue.lock: Lock for modifying the request submission queue.
|
|
+ * @queue.head: List-head of the request submission queue.
|
|
+ * @pending: Set/list of pending requests.
|
|
+ * @pending.lock: Lock for modifying the request set.
|
|
+ * @pending.head: List-head of the pending set/list.
|
|
+ * @pending.count: Number of currently pending requests.
|
|
+ * @tx: Transmitter subsystem.
|
|
+ * @tx.work: Transmitter work item.
|
|
+ * @rtx_timeout: Retransmission timeout subsystem.
|
|
+ * @rtx_timeout.lock: Lock for modifying the retransmission timeout reaper.
|
|
+ * @rtx_timeout.timeout: Timeout interval for retransmission.
|
|
+ * @rtx_timeout.expires: Time specifying when the reaper work is next scheduled.
|
|
+ * @rtx_timeout.reaper: Work performing timeout checks and subsequent actions.
|
|
+ * @ops: Request layer operations.
|
|
+ */
|
|
+struct ssh_rtl {
|
|
+ struct ssh_ptl ptl;
|
|
+ unsigned long state;
|
|
+
|
|
+ struct {
|
|
+ spinlock_t lock;
|
|
+ struct list_head head;
|
|
+ } queue;
|
|
+
|
|
+ struct {
|
|
+ spinlock_t lock;
|
|
+ struct list_head head;
|
|
+ atomic_t count;
|
|
+ } pending;
|
|
+
|
|
+ struct {
|
|
+ struct work_struct work;
|
|
+ } tx;
|
|
+
|
|
+ struct {
|
|
+ spinlock_t lock;
|
|
+ ktime_t timeout;
|
|
+ ktime_t expires;
|
|
+ struct delayed_work reaper;
|
|
+ } rtx_timeout;
|
|
+
|
|
+ struct ssh_rtl_ops ops;
|
|
+};
|
|
+
|
|
+#define rtl_dbg(r, fmt, ...) ptl_dbg(&(r)->ptl, fmt, ##__VA_ARGS__)
|
|
+#define rtl_info(p, fmt, ...) ptl_info(&(p)->ptl, fmt, ##__VA_ARGS__)
|
|
+#define rtl_warn(r, fmt, ...) ptl_warn(&(r)->ptl, fmt, ##__VA_ARGS__)
|
|
+#define rtl_err(r, fmt, ...) ptl_err(&(r)->ptl, fmt, ##__VA_ARGS__)
|
|
+#define rtl_dbg_cond(r, fmt, ...) __ssam_prcond(rtl_dbg, r, fmt, ##__VA_ARGS__)
|
|
+
|
|
+#define to_ssh_rtl(ptr, member) \
|
|
+ container_of(ptr, struct ssh_rtl, member)
|
|
+
|
|
+/**
|
|
+ * ssh_rtl_get_device() - Get device associated with request transport layer.
|
|
+ * @rtl: The request transport layer.
|
|
+ *
|
|
+ * Return: Returns the device on which the given request transport layer
|
|
+ * builds upon.
|
|
+ */
|
|
+static inline struct device *ssh_rtl_get_device(struct ssh_rtl *rtl)
|
|
+{
|
|
+ return ssh_ptl_get_device(&rtl->ptl);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_request_rtl() - Get request transport layer associated with request.
|
|
+ * @rqst: The request to get the request transport layer reference for.
|
|
+ *
|
|
+ * Return: Returns the &struct ssh_rtl associated with the given SSH request.
|
|
+ */
|
|
+static inline struct ssh_rtl *ssh_request_rtl(struct ssh_request *rqst)
|
|
+{
|
|
+ struct ssh_ptl *ptl;
|
|
+
|
|
+ ptl = READ_ONCE(rqst->packet.ptl);
|
|
+ return likely(ptl) ? to_ssh_rtl(ptl, ptl) : NULL;
|
|
+}
|
|
+
|
|
+int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst);
|
|
+bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending);
|
|
+
|
|
+int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
|
|
+ const struct ssh_rtl_ops *ops);
|
|
+
|
|
+int ssh_rtl_start(struct ssh_rtl *rtl);
|
|
+int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout);
|
|
+void ssh_rtl_shutdown(struct ssh_rtl *rtl);
|
|
+void ssh_rtl_destroy(struct ssh_rtl *rtl);
|
|
+
|
|
+int ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
|
|
+ const struct ssh_request_ops *ops);
|
|
+
|
|
+#endif /* _SURFACE_AGGREGATOR_SSH_REQUEST_LAYER_H */
|
|
diff --git a/drivers/platform/x86/surface_aggregator/trace.h b/drivers/platform/x86/surface_aggregator/trace.h
|
|
new file mode 100644
|
|
index 000000000000..de64cf169060
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator/trace.h
|
|
@@ -0,0 +1,632 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Trace points for SSAM/SSH.
|
|
+ *
|
|
+ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#undef TRACE_SYSTEM
|
|
+#define TRACE_SYSTEM surface_aggregator
|
|
+
|
|
+#if !defined(_SURFACE_AGGREGATOR_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
|
|
+#define _SURFACE_AGGREGATOR_TRACE_H
|
|
+
|
|
+#include <linux/surface_aggregator/serial_hub.h>
|
|
+
|
|
+#include <asm/unaligned.h>
|
|
+#include <linux/tracepoint.h>
|
|
+
|
|
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_SEQ);
|
|
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_NSQ);
|
|
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_ACK);
|
|
+TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_NAK);
|
|
+
|
|
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_LOCKED_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_QUEUED_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_PENDING_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTING_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTED_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_ACKED_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_CANCELED_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_PACKET_SF_COMPLETED_BIT);
|
|
+
|
|
+TRACE_DEFINE_ENUM(SSH_PACKET_TY_FLUSH_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_PACKET_TY_SEQUENCED_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_PACKET_TY_BLOCKING_BIT);
|
|
+
|
|
+TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_SF_MASK);
|
|
+TRACE_DEFINE_ENUM(SSH_PACKET_FLAGS_TY_MASK);
|
|
+
|
|
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_LOCKED_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_QUEUED_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_PENDING_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTING_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTED_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_RSPRCVD_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_CANCELED_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_REQUEST_SF_COMPLETED_BIT);
|
|
+
|
|
+TRACE_DEFINE_ENUM(SSH_REQUEST_TY_FLUSH_BIT);
|
|
+TRACE_DEFINE_ENUM(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
|
|
+
|
|
+TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_SF_MASK);
|
|
+TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_TY_MASK);
|
|
+
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SAM);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAT);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TMP);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_PMC);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_FAN);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_PoM);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_DBG);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_KBD);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_FWU);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_UNI);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_LPC);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCL);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SFL);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_KIP);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_EXT);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BLD);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAS);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SEN);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SRQ);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_MCU);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_HID);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCH);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_BKL);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_TAM);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_UFI);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_USC);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_PEN);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_VID);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_AUD);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_SMC);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_KPD);
|
|
+TRACE_DEFINE_ENUM(SSAM_SSH_TC_REG);
|
|
+
|
|
+#define SSAM_PTR_UID_LEN 9
|
|
+#define SSAM_U8_FIELD_NOT_APPLICABLE ((u16)-1)
|
|
+#define SSAM_SEQ_NOT_APPLICABLE ((u16)-1)
|
|
+#define SSAM_RQID_NOT_APPLICABLE ((u32)-1)
|
|
+#define SSAM_SSH_TC_NOT_APPLICABLE 0
|
|
+
|
|
+#ifndef _SURFACE_AGGREGATOR_TRACE_HELPERS
|
|
+#define _SURFACE_AGGREGATOR_TRACE_HELPERS
|
|
+
|
|
+/**
|
|
+ * ssam_trace_ptr_uid() - Convert the pointer to a non-pointer UID string.
|
|
+ * @ptr: The pointer to convert.
|
|
+ * @uid_str: A buffer of length SSAM_PTR_UID_LEN where the UID will be stored.
|
|
+ *
|
|
+ * Converts the given pointer into a UID string that is safe to be shared
|
|
+ * with userspace and logs, i.e. doesn't give away the real memory location.
|
|
+ */
|
|
+static inline void ssam_trace_ptr_uid(const void *ptr, char *uid_str)
|
|
+{
|
|
+ char buf[2 * sizeof(void *) + 1];
|
|
+
|
|
+ BUILD_BUG_ON(ARRAY_SIZE(buf) < SSAM_PTR_UID_LEN);
|
|
+
|
|
+ snprintf(buf, ARRAY_SIZE(buf), "%p", ptr);
|
|
+ memcpy(uid_str, &buf[ARRAY_SIZE(buf) - SSAM_PTR_UID_LEN],
|
|
+ SSAM_PTR_UID_LEN);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_trace_get_packet_seq() - Read the packet's sequence ID.
|
|
+ * @p: The packet.
|
|
+ *
|
|
+ * Return: Returns the packet's sequence ID (SEQ) field if present, or
|
|
+ * %SSAM_SEQ_NOT_APPLICABLE if not (e.g. flush packet).
|
|
+ */
|
|
+static inline u16 ssam_trace_get_packet_seq(const struct ssh_packet *p)
|
|
+{
|
|
+ if (!p->data.ptr || p->data.len < SSH_MESSAGE_LENGTH(0))
|
|
+ return SSAM_SEQ_NOT_APPLICABLE;
|
|
+
|
|
+ return p->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_trace_get_request_id() - Read the packet's request ID.
|
|
+ * @p: The packet.
|
|
+ *
|
|
+ * Return: Returns the packet's request ID (RQID) field if the packet
|
|
+ * represents a request with command data, or %SSAM_RQID_NOT_APPLICABLE if not
|
|
+ * (e.g. flush request, control packet).
|
|
+ */
|
|
+static inline u32 ssam_trace_get_request_id(const struct ssh_packet *p)
|
|
+{
|
|
+ if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
|
|
+ return SSAM_RQID_NOT_APPLICABLE;
|
|
+
|
|
+ return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(rqid)]);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_trace_get_request_tc() - Read the packet's request target category.
|
|
+ * @p: The packet.
|
|
+ *
|
|
+ * Return: Returns the packet's request target category (TC) field if the
|
|
+ * packet represents a request with command data, or %SSAM_TC_NOT_APPLICABLE
|
|
+ * if not (e.g. flush request, control packet).
|
|
+ */
|
|
+static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p)
|
|
+{
|
|
+ if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
|
|
+ return SSAM_SSH_TC_NOT_APPLICABLE;
|
|
+
|
|
+ return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(tc)]);
|
|
+}
|
|
+
|
|
+#endif /* _SURFACE_AGGREGATOR_TRACE_HELPERS */
|
|
+
|
|
+#define ssam_trace_get_command_field_u8(packet, field) \
|
|
+ ((!(packet) || (packet)->data.len < SSH_COMMAND_MESSAGE_LENGTH(0)) \
|
|
+ ? 0 : (packet)->data.ptr[SSH_MSGOFFSET_COMMAND(field)])
|
|
+
|
|
+#define ssam_show_generic_u8_field(value) \
|
|
+ __print_symbolic(value, \
|
|
+ { SSAM_U8_FIELD_NOT_APPLICABLE, "N/A" } \
|
|
+ )
|
|
+
|
|
+#define ssam_show_frame_type(ty) \
|
|
+ __print_symbolic(ty, \
|
|
+ { SSH_FRAME_TYPE_DATA_SEQ, "DSEQ" }, \
|
|
+ { SSH_FRAME_TYPE_DATA_NSQ, "DNSQ" }, \
|
|
+ { SSH_FRAME_TYPE_ACK, "ACK" }, \
|
|
+ { SSH_FRAME_TYPE_NAK, "NAK" } \
|
|
+ )
|
|
+
|
|
+#define ssam_show_packet_type(type) \
|
|
+ __print_flags(flags & SSH_PACKET_FLAGS_TY_MASK, "", \
|
|
+ { BIT(SSH_PACKET_TY_FLUSH_BIT), "F" }, \
|
|
+ { BIT(SSH_PACKET_TY_SEQUENCED_BIT), "S" }, \
|
|
+ { BIT(SSH_PACKET_TY_BLOCKING_BIT), "B" } \
|
|
+ )
|
|
+
|
|
+#define ssam_show_packet_state(state) \
|
|
+ __print_flags(flags & SSH_PACKET_FLAGS_SF_MASK, "", \
|
|
+ { BIT(SSH_PACKET_SF_LOCKED_BIT), "L" }, \
|
|
+ { BIT(SSH_PACKET_SF_QUEUED_BIT), "Q" }, \
|
|
+ { BIT(SSH_PACKET_SF_PENDING_BIT), "P" }, \
|
|
+ { BIT(SSH_PACKET_SF_TRANSMITTING_BIT), "S" }, \
|
|
+ { BIT(SSH_PACKET_SF_TRANSMITTED_BIT), "T" }, \
|
|
+ { BIT(SSH_PACKET_SF_ACKED_BIT), "A" }, \
|
|
+ { BIT(SSH_PACKET_SF_CANCELED_BIT), "C" }, \
|
|
+ { BIT(SSH_PACKET_SF_COMPLETED_BIT), "F" } \
|
|
+ )
|
|
+
|
|
+#define ssam_show_packet_seq(seq) \
|
|
+ __print_symbolic(seq, \
|
|
+ { SSAM_SEQ_NOT_APPLICABLE, "N/A" } \
|
|
+ )
|
|
+
|
|
+#define ssam_show_request_type(flags) \
|
|
+ __print_flags((flags) & SSH_REQUEST_FLAGS_TY_MASK, "", \
|
|
+ { BIT(SSH_REQUEST_TY_FLUSH_BIT), "F" }, \
|
|
+ { BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT), "R" } \
|
|
+ )
|
|
+
|
|
+#define ssam_show_request_state(flags) \
|
|
+ __print_flags((flags) & SSH_REQUEST_FLAGS_SF_MASK, "", \
|
|
+ { BIT(SSH_REQUEST_SF_LOCKED_BIT), "L" }, \
|
|
+ { BIT(SSH_REQUEST_SF_QUEUED_BIT), "Q" }, \
|
|
+ { BIT(SSH_REQUEST_SF_PENDING_BIT), "P" }, \
|
|
+ { BIT(SSH_REQUEST_SF_TRANSMITTING_BIT), "S" }, \
|
|
+ { BIT(SSH_REQUEST_SF_TRANSMITTED_BIT), "T" }, \
|
|
+ { BIT(SSH_REQUEST_SF_RSPRCVD_BIT), "A" }, \
|
|
+ { BIT(SSH_REQUEST_SF_CANCELED_BIT), "C" }, \
|
|
+ { BIT(SSH_REQUEST_SF_COMPLETED_BIT), "F" } \
|
|
+ )
|
|
+
|
|
+#define ssam_show_request_id(rqid) \
|
|
+ __print_symbolic(rqid, \
|
|
+ { SSAM_RQID_NOT_APPLICABLE, "N/A" } \
|
|
+ )
|
|
+
|
|
+#define ssam_show_ssh_tc(rqid) \
|
|
+ __print_symbolic(rqid, \
|
|
+ { SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \
|
|
+ { SSAM_SSH_TC_SAM, "SAM" }, \
|
|
+ { SSAM_SSH_TC_BAT, "BAT" }, \
|
|
+ { SSAM_SSH_TC_TMP, "TMP" }, \
|
|
+ { SSAM_SSH_TC_PMC, "PMC" }, \
|
|
+ { SSAM_SSH_TC_FAN, "FAN" }, \
|
|
+ { SSAM_SSH_TC_PoM, "PoM" }, \
|
|
+ { SSAM_SSH_TC_DBG, "DBG" }, \
|
|
+ { SSAM_SSH_TC_KBD, "KBD" }, \
|
|
+ { SSAM_SSH_TC_FWU, "FWU" }, \
|
|
+ { SSAM_SSH_TC_UNI, "UNI" }, \
|
|
+ { SSAM_SSH_TC_LPC, "LPC" }, \
|
|
+ { SSAM_SSH_TC_TCL, "TCL" }, \
|
|
+ { SSAM_SSH_TC_SFL, "SFL" }, \
|
|
+ { SSAM_SSH_TC_KIP, "KIP" }, \
|
|
+ { SSAM_SSH_TC_EXT, "EXT" }, \
|
|
+ { SSAM_SSH_TC_BLD, "BLD" }, \
|
|
+ { SSAM_SSH_TC_BAS, "BAS" }, \
|
|
+ { SSAM_SSH_TC_SEN, "SEN" }, \
|
|
+ { SSAM_SSH_TC_SRQ, "SRQ" }, \
|
|
+ { SSAM_SSH_TC_MCU, "MCU" }, \
|
|
+ { SSAM_SSH_TC_HID, "HID" }, \
|
|
+ { SSAM_SSH_TC_TCH, "TCH" }, \
|
|
+ { SSAM_SSH_TC_BKL, "BKL" }, \
|
|
+ { SSAM_SSH_TC_TAM, "TAM" }, \
|
|
+ { SSAM_SSH_TC_ACC, "ACC" }, \
|
|
+ { SSAM_SSH_TC_UFI, "UFI" }, \
|
|
+ { SSAM_SSH_TC_USC, "USC" }, \
|
|
+ { SSAM_SSH_TC_PEN, "PEN" }, \
|
|
+ { SSAM_SSH_TC_VID, "VID" }, \
|
|
+ { SSAM_SSH_TC_AUD, "AUD" }, \
|
|
+ { SSAM_SSH_TC_SMC, "SMC" }, \
|
|
+ { SSAM_SSH_TC_KPD, "KPD" }, \
|
|
+ { SSAM_SSH_TC_REG, "REG" } \
|
|
+ )
|
|
+
|
|
+DECLARE_EVENT_CLASS(ssam_frame_class,
|
|
+ TP_PROTO(const struct ssh_frame *frame),
|
|
+
|
|
+ TP_ARGS(frame),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __field(u8, type)
|
|
+ __field(u8, seq)
|
|
+ __field(u16, len)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ __entry->type = frame->type;
|
|
+ __entry->seq = frame->seq;
|
|
+ __entry->len = get_unaligned_le16(&frame->len);
|
|
+ ),
|
|
+
|
|
+ TP_printk("ty=%s, seq=%#04x, len=%u",
|
|
+ ssam_show_frame_type(__entry->type),
|
|
+ __entry->seq,
|
|
+ __entry->len
|
|
+ )
|
|
+);
|
|
+
|
|
+#define DEFINE_SSAM_FRAME_EVENT(name) \
|
|
+ DEFINE_EVENT(ssam_frame_class, ssam_##name, \
|
|
+ TP_PROTO(const struct ssh_frame *frame), \
|
|
+ TP_ARGS(frame) \
|
|
+ )
|
|
+
|
|
+DECLARE_EVENT_CLASS(ssam_command_class,
|
|
+ TP_PROTO(const struct ssh_command *cmd, u16 len),
|
|
+
|
|
+ TP_ARGS(cmd, len),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __field(u16, rqid)
|
|
+ __field(u16, len)
|
|
+ __field(u8, tc)
|
|
+ __field(u8, cid)
|
|
+ __field(u8, iid)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ __entry->rqid = get_unaligned_le16(&cmd->rqid);
|
|
+ __entry->tc = cmd->tc;
|
|
+ __entry->cid = cmd->cid;
|
|
+ __entry->iid = cmd->iid;
|
|
+ __entry->len = len;
|
|
+ ),
|
|
+
|
|
+ TP_printk("rqid=%#06x, tc=%s, cid=%#04x, iid=%#04x, len=%u",
|
|
+ __entry->rqid,
|
|
+ ssam_show_ssh_tc(__entry->tc),
|
|
+ __entry->cid,
|
|
+ __entry->iid,
|
|
+ __entry->len
|
|
+ )
|
|
+);
|
|
+
|
|
+#define DEFINE_SSAM_COMMAND_EVENT(name) \
|
|
+ DEFINE_EVENT(ssam_command_class, ssam_##name, \
|
|
+ TP_PROTO(const struct ssh_command *cmd, u16 len), \
|
|
+ TP_ARGS(cmd, len) \
|
|
+ )
|
|
+
|
|
+DECLARE_EVENT_CLASS(ssam_packet_class,
|
|
+ TP_PROTO(const struct ssh_packet *packet),
|
|
+
|
|
+ TP_ARGS(packet),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __field(unsigned long, state)
|
|
+ __array(char, uid, SSAM_PTR_UID_LEN)
|
|
+ __field(u8, priority)
|
|
+ __field(u16, length)
|
|
+ __field(u16, seq)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ __entry->state = READ_ONCE(packet->state);
|
|
+ ssam_trace_ptr_uid(packet, __entry->uid);
|
|
+ __entry->priority = READ_ONCE(packet->priority);
|
|
+ __entry->length = packet->data.len;
|
|
+ __entry->seq = ssam_trace_get_packet_seq(packet);
|
|
+ ),
|
|
+
|
|
+ TP_printk("uid=%s, seq=%s, ty=%s, pri=%#04x, len=%u, sta=%s",
|
|
+ __entry->uid,
|
|
+ ssam_show_packet_seq(__entry->seq),
|
|
+ ssam_show_packet_type(__entry->state),
|
|
+ __entry->priority,
|
|
+ __entry->length,
|
|
+ ssam_show_packet_state(__entry->state)
|
|
+ )
|
|
+);
|
|
+
|
|
+#define DEFINE_SSAM_PACKET_EVENT(name) \
|
|
+ DEFINE_EVENT(ssam_packet_class, ssam_##name, \
|
|
+ TP_PROTO(const struct ssh_packet *packet), \
|
|
+ TP_ARGS(packet) \
|
|
+ )
|
|
+
|
|
+DECLARE_EVENT_CLASS(ssam_packet_status_class,
|
|
+ TP_PROTO(const struct ssh_packet *packet, int status),
|
|
+
|
|
+ TP_ARGS(packet, status),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __field(unsigned long, state)
|
|
+ __field(int, status)
|
|
+ __array(char, uid, SSAM_PTR_UID_LEN)
|
|
+ __field(u8, priority)
|
|
+ __field(u16, length)
|
|
+ __field(u16, seq)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ __entry->state = READ_ONCE(packet->state);
|
|
+ __entry->status = status;
|
|
+ ssam_trace_ptr_uid(packet, __entry->uid);
|
|
+ __entry->priority = READ_ONCE(packet->priority);
|
|
+ __entry->length = packet->data.len;
|
|
+ __entry->seq = ssam_trace_get_packet_seq(packet);
|
|
+ ),
|
|
+
|
|
+ TP_printk("uid=%s, seq=%s, ty=%s, pri=%#04x, len=%u, sta=%s, status=%d",
|
|
+ __entry->uid,
|
|
+ ssam_show_packet_seq(__entry->seq),
|
|
+ ssam_show_packet_type(__entry->state),
|
|
+ __entry->priority,
|
|
+ __entry->length,
|
|
+ ssam_show_packet_state(__entry->state),
|
|
+ __entry->status
|
|
+ )
|
|
+);
|
|
+
|
|
+#define DEFINE_SSAM_PACKET_STATUS_EVENT(name) \
|
|
+ DEFINE_EVENT(ssam_packet_status_class, ssam_##name, \
|
|
+ TP_PROTO(const struct ssh_packet *packet, int status), \
|
|
+ TP_ARGS(packet, status) \
|
|
+ )
|
|
+
|
|
+DECLARE_EVENT_CLASS(ssam_request_class,
|
|
+ TP_PROTO(const struct ssh_request *request),
|
|
+
|
|
+ TP_ARGS(request),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __field(unsigned long, state)
|
|
+ __field(u32, rqid)
|
|
+ __array(char, uid, SSAM_PTR_UID_LEN)
|
|
+ __field(u8, tc)
|
|
+ __field(u16, cid)
|
|
+ __field(u16, iid)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ const struct ssh_packet *p = &request->packet;
|
|
+
|
|
+ /* Use packet for UID so we can match requests to packets. */
|
|
+ __entry->state = READ_ONCE(request->state);
|
|
+ __entry->rqid = ssam_trace_get_request_id(p);
|
|
+ ssam_trace_ptr_uid(p, __entry->uid);
|
|
+ __entry->tc = ssam_trace_get_request_tc(p);
|
|
+ __entry->cid = ssam_trace_get_command_field_u8(p, cid);
|
|
+ __entry->iid = ssam_trace_get_command_field_u8(p, iid);
|
|
+ ),
|
|
+
|
|
+ TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s",
|
|
+ __entry->uid,
|
|
+ ssam_show_request_id(__entry->rqid),
|
|
+ ssam_show_request_type(__entry->state),
|
|
+ ssam_show_request_state(__entry->state),
|
|
+ ssam_show_ssh_tc(__entry->tc),
|
|
+ ssam_show_generic_u8_field(__entry->cid),
|
|
+ ssam_show_generic_u8_field(__entry->iid)
|
|
+ )
|
|
+);
|
|
+
|
|
+#define DEFINE_SSAM_REQUEST_EVENT(name) \
|
|
+ DEFINE_EVENT(ssam_request_class, ssam_##name, \
|
|
+ TP_PROTO(const struct ssh_request *request), \
|
|
+ TP_ARGS(request) \
|
|
+ )
|
|
+
|
|
+DECLARE_EVENT_CLASS(ssam_request_status_class,
|
|
+ TP_PROTO(const struct ssh_request *request, int status),
|
|
+
|
|
+ TP_ARGS(request, status),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __field(unsigned long, state)
|
|
+ __field(u32, rqid)
|
|
+ __field(int, status)
|
|
+ __array(char, uid, SSAM_PTR_UID_LEN)
|
|
+ __field(u8, tc)
|
|
+ __field(u16, cid)
|
|
+ __field(u16, iid)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ const struct ssh_packet *p = &request->packet;
|
|
+
|
|
+ /* Use packet for UID so we can match requests to packets. */
|
|
+ __entry->state = READ_ONCE(request->state);
|
|
+ __entry->rqid = ssam_trace_get_request_id(p);
|
|
+ __entry->status = status;
|
|
+ ssam_trace_ptr_uid(p, __entry->uid);
|
|
+ __entry->tc = ssam_trace_get_request_tc(p);
|
|
+ __entry->cid = ssam_trace_get_command_field_u8(p, cid);
|
|
+ __entry->iid = ssam_trace_get_command_field_u8(p, iid);
|
|
+ ),
|
|
+
|
|
+ TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s, status=%d",
|
|
+ __entry->uid,
|
|
+ ssam_show_request_id(__entry->rqid),
|
|
+ ssam_show_request_type(__entry->state),
|
|
+ ssam_show_request_state(__entry->state),
|
|
+ ssam_show_ssh_tc(__entry->tc),
|
|
+ ssam_show_generic_u8_field(__entry->cid),
|
|
+ ssam_show_generic_u8_field(__entry->iid),
|
|
+ __entry->status
|
|
+ )
|
|
+);
|
|
+
|
|
+#define DEFINE_SSAM_REQUEST_STATUS_EVENT(name) \
|
|
+ DEFINE_EVENT(ssam_request_status_class, ssam_##name, \
|
|
+ TP_PROTO(const struct ssh_request *request, int status),\
|
|
+ TP_ARGS(request, status) \
|
|
+ )
|
|
+
|
|
+DECLARE_EVENT_CLASS(ssam_alloc_class,
|
|
+ TP_PROTO(void *ptr, size_t len),
|
|
+
|
|
+ TP_ARGS(ptr, len),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __field(size_t, len)
|
|
+ __array(char, uid, SSAM_PTR_UID_LEN)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ __entry->len = len;
|
|
+ ssam_trace_ptr_uid(ptr, __entry->uid);
|
|
+ ),
|
|
+
|
|
+ TP_printk("uid=%s, len=%zu", __entry->uid, __entry->len)
|
|
+);
|
|
+
|
|
+#define DEFINE_SSAM_ALLOC_EVENT(name) \
|
|
+ DEFINE_EVENT(ssam_alloc_class, ssam_##name, \
|
|
+ TP_PROTO(void *ptr, size_t len), \
|
|
+ TP_ARGS(ptr, len) \
|
|
+ )
|
|
+
|
|
+DECLARE_EVENT_CLASS(ssam_free_class,
|
|
+ TP_PROTO(void *ptr),
|
|
+
|
|
+ TP_ARGS(ptr),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __array(char, uid, SSAM_PTR_UID_LEN)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ ssam_trace_ptr_uid(ptr, __entry->uid);
|
|
+ ),
|
|
+
|
|
+ TP_printk("uid=%s", __entry->uid)
|
|
+);
|
|
+
|
|
+#define DEFINE_SSAM_FREE_EVENT(name) \
|
|
+ DEFINE_EVENT(ssam_free_class, ssam_##name, \
|
|
+ TP_PROTO(void *ptr), \
|
|
+ TP_ARGS(ptr) \
|
|
+ )
|
|
+
|
|
+DECLARE_EVENT_CLASS(ssam_pending_class,
|
|
+ TP_PROTO(unsigned int pending),
|
|
+
|
|
+ TP_ARGS(pending),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __field(unsigned int, pending)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ __entry->pending = pending;
|
|
+ ),
|
|
+
|
|
+ TP_printk("pending=%u", __entry->pending)
|
|
+);
|
|
+
|
|
+#define DEFINE_SSAM_PENDING_EVENT(name) \
|
|
+ DEFINE_EVENT(ssam_pending_class, ssam_##name, \
|
|
+ TP_PROTO(unsigned int pending), \
|
|
+ TP_ARGS(pending) \
|
|
+ )
|
|
+
|
|
+DECLARE_EVENT_CLASS(ssam_data_class,
|
|
+ TP_PROTO(size_t length),
|
|
+
|
|
+ TP_ARGS(length),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
+ __field(size_t, length)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
+ __entry->length = length;
|
|
+ ),
|
|
+
|
|
+ TP_printk("length=%zu", __entry->length)
|
|
+);
|
|
+
|
|
+#define DEFINE_SSAM_DATA_EVENT(name) \
|
|
+ DEFINE_EVENT(ssam_data_class, ssam_##name, \
|
|
+ TP_PROTO(size_t length), \
|
|
+ TP_ARGS(length) \
|
|
+ )
|
|
+
|
|
+DEFINE_SSAM_FRAME_EVENT(rx_frame_received);
|
|
+DEFINE_SSAM_COMMAND_EVENT(rx_response_received);
|
|
+DEFINE_SSAM_COMMAND_EVENT(rx_event_received);
|
|
+
|
|
+DEFINE_SSAM_PACKET_EVENT(packet_release);
|
|
+DEFINE_SSAM_PACKET_EVENT(packet_submit);
|
|
+DEFINE_SSAM_PACKET_EVENT(packet_resubmit);
|
|
+DEFINE_SSAM_PACKET_EVENT(packet_timeout);
|
|
+DEFINE_SSAM_PACKET_EVENT(packet_cancel);
|
|
+DEFINE_SSAM_PACKET_STATUS_EVENT(packet_complete);
|
|
+DEFINE_SSAM_PENDING_EVENT(ptl_timeout_reap);
|
|
+
|
|
+DEFINE_SSAM_REQUEST_EVENT(request_submit);
|
|
+DEFINE_SSAM_REQUEST_EVENT(request_timeout);
|
|
+DEFINE_SSAM_REQUEST_EVENT(request_cancel);
|
|
+DEFINE_SSAM_REQUEST_STATUS_EVENT(request_complete);
|
|
+DEFINE_SSAM_PENDING_EVENT(rtl_timeout_reap);
|
|
+
|
|
+DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_ack_packet);
|
|
+DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_nak_packet);
|
|
+DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_dsq_packet);
|
|
+DEFINE_SSAM_PACKET_STATUS_EVENT(ei_tx_fail_write);
|
|
+DEFINE_SSAM_PACKET_EVENT(ei_tx_corrupt_data);
|
|
+DEFINE_SSAM_DATA_EVENT(ei_rx_corrupt_syn);
|
|
+DEFINE_SSAM_FRAME_EVENT(ei_rx_corrupt_data);
|
|
+DEFINE_SSAM_REQUEST_EVENT(ei_rx_drop_response);
|
|
+
|
|
+DEFINE_SSAM_ALLOC_EVENT(ctrl_packet_alloc);
|
|
+DEFINE_SSAM_FREE_EVENT(ctrl_packet_free);
|
|
+
|
|
+DEFINE_SSAM_ALLOC_EVENT(event_item_alloc);
|
|
+DEFINE_SSAM_FREE_EVENT(event_item_free);
|
|
+
|
|
+#endif /* _SURFACE_AGGREGATOR_TRACE_H */
|
|
+
|
|
+/* This part must be outside protection */
|
|
+#undef TRACE_INCLUDE_PATH
|
|
+#undef TRACE_INCLUDE_FILE
|
|
+
|
|
+#define TRACE_INCLUDE_PATH .
|
|
+#define TRACE_INCLUDE_FILE trace
|
|
+
|
|
+#include <trace/define_trace.h>
|
|
diff --git a/drivers/platform/x86/surface_aggregator_cdev.c b/drivers/platform/x86/surface_aggregator_cdev.c
|
|
new file mode 100644
|
|
index 000000000000..bbc0fc57ba13
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator_cdev.c
|
|
@@ -0,0 +1,810 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Provides user-space access to the SSAM EC via the /dev/surface/aggregator
|
|
+ * misc device. Intended for debugging and development.
|
|
+ *
|
|
+ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <linux/fs.h>
|
|
+#include <linux/ioctl.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/kfifo.h>
|
|
+#include <linux/kref.h>
|
|
+#include <linux/miscdevice.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/poll.h>
|
|
+#include <linux/rwsem.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/uaccess.h>
|
|
+#include <linux/vmalloc.h>
|
|
+
|
|
+#include <linux/surface_aggregator/cdev.h>
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+#include <linux/surface_aggregator/serial_hub.h>
|
|
+
|
|
+#define SSAM_CDEV_DEVICE_NAME "surface_aggregator_cdev"
|
|
+
|
|
+
|
|
+/* -- Main structures. ------------------------------------------------------ */
|
|
+
|
|
+enum ssam_cdev_device_state {
|
|
+ SSAM_CDEV_DEVICE_SHUTDOWN_BIT = BIT(0),
|
|
+};
|
|
+
|
|
+struct ssam_cdev {
|
|
+ struct kref kref;
|
|
+ struct rw_semaphore lock;
|
|
+
|
|
+ struct device *dev;
|
|
+ struct ssam_controller *ctrl;
|
|
+ struct miscdevice mdev;
|
|
+ unsigned long flags;
|
|
+
|
|
+ struct rw_semaphore client_lock; /* Guards client list. */
|
|
+ struct list_head client_list;
|
|
+};
|
|
+
|
|
+struct ssam_cdev_client;
|
|
+
|
|
+struct ssam_cdev_notifier {
|
|
+ struct ssam_cdev_client *client;
|
|
+ struct ssam_event_notifier nf;
|
|
+};
|
|
+
|
|
+struct ssam_cdev_client {
|
|
+ struct ssam_cdev *cdev;
|
|
+ struct list_head node;
|
|
+
|
|
+ struct mutex notifier_lock; /* Guards notifier access for registration */
|
|
+ struct ssam_cdev_notifier *notifier[SSH_NUM_EVENTS];
|
|
+
|
|
+ struct mutex read_lock; /* Guards FIFO buffer read access */
|
|
+ struct mutex write_lock; /* Guards FIFO buffer write access */
|
|
+ DECLARE_KFIFO(buffer, u8, 4096);
|
|
+
|
|
+ wait_queue_head_t waitq;
|
|
+ struct fasync_struct *fasync;
|
|
+};
|
|
+
|
|
+static void __ssam_cdev_release(struct kref *kref)
|
|
+{
|
|
+ kfree(container_of(kref, struct ssam_cdev, kref));
|
|
+}
|
|
+
|
|
+static struct ssam_cdev *ssam_cdev_get(struct ssam_cdev *cdev)
|
|
+{
|
|
+ if (cdev)
|
|
+ kref_get(&cdev->kref);
|
|
+
|
|
+ return cdev;
|
|
+}
|
|
+
|
|
+static void ssam_cdev_put(struct ssam_cdev *cdev)
|
|
+{
|
|
+ if (cdev)
|
|
+ kref_put(&cdev->kref, __ssam_cdev_release);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Notifier handling. ---------------------------------------------------- */
|
|
+
|
|
+static u32 ssam_cdev_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
|
|
+{
|
|
+ struct ssam_cdev_notifier *cdev_nf = container_of(nf, struct ssam_cdev_notifier, nf);
|
|
+ struct ssam_cdev_client *client = cdev_nf->client;
|
|
+ struct ssam_cdev_event event;
|
|
+ size_t n = struct_size(&event, data, in->length);
|
|
+
|
|
+ /* Translate event. */
|
|
+ event.target_category = in->target_category;
|
|
+ event.target_id = in->target_id;
|
|
+ event.command_id = in->command_id;
|
|
+ event.instance_id = in->instance_id;
|
|
+ event.length = in->length;
|
|
+
|
|
+ mutex_lock(&client->write_lock);
|
|
+
|
|
+ /* Make sure we have enough space. */
|
|
+ if (kfifo_avail(&client->buffer) < n) {
|
|
+ dev_warn(client->cdev->dev,
|
|
+ "buffer full, dropping event (tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
|
|
+ in->target_category, in->target_id, in->command_id, in->instance_id);
|
|
+ mutex_unlock(&client->write_lock);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ /* Copy event header and payload. */
|
|
+ kfifo_in(&client->buffer, (const u8 *)&event, struct_size(&event, data, 0));
|
|
+ kfifo_in(&client->buffer, &in->data[0], in->length);
|
|
+
|
|
+ mutex_unlock(&client->write_lock);
|
|
+
|
|
+ /* Notify waiting readers. */
|
|
+ kill_fasync(&client->fasync, SIGIO, POLL_IN);
|
|
+ wake_up_interruptible(&client->waitq);
|
|
+
|
|
+ /*
|
|
+ * Don't mark events as handled, this is the job of a proper driver and
|
|
+ * not the debugging interface.
|
|
+ */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ssam_cdev_notifier_register(struct ssam_cdev_client *client, u8 tc, int priority)
|
|
+{
|
|
+ const u16 rqid = ssh_tc_to_rqid(tc);
|
|
+ const u16 event = ssh_rqid_to_event(rqid);
|
|
+ struct ssam_cdev_notifier *nf;
|
|
+ int status;
|
|
+
|
|
+ lockdep_assert_held_read(&client->cdev->lock);
|
|
+
|
|
+ /* Validate notifier target category. */
|
|
+ if (!ssh_rqid_is_event(rqid))
|
|
+ return -EINVAL;
|
|
+
|
|
+ mutex_lock(&client->notifier_lock);
|
|
+
|
|
+ /* Check if the notifier has already been registered. */
|
|
+ if (client->notifier[event]) {
|
|
+ mutex_unlock(&client->notifier_lock);
|
|
+ return -EEXIST;
|
|
+ }
|
|
+
|
|
+ /* Allocate new notifier. */
|
|
+ nf = kzalloc(sizeof(*nf), GFP_KERNEL);
|
|
+ if (!nf) {
|
|
+ mutex_unlock(&client->notifier_lock);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Create a dummy notifier with the minimal required fields for
|
|
+ * observer registration. Note that we can skip fully specifying event
|
|
+ * and registry here as we do not need any matching and use silent
|
|
+ * registration, which does not enable the corresponding event.
|
|
+ */
|
|
+ nf->client = client;
|
|
+ nf->nf.base.fn = ssam_cdev_notifier;
|
|
+ nf->nf.base.priority = priority;
|
|
+ nf->nf.event.id.target_category = tc;
|
|
+ nf->nf.event.mask = 0; /* Do not do any matching. */
|
|
+ nf->nf.flags = SSAM_EVENT_NOTIFIER_OBSERVER;
|
|
+
|
|
+ /* Register notifier. */
|
|
+ status = ssam_notifier_register(client->cdev->ctrl, &nf->nf);
|
|
+ if (status)
|
|
+ kfree(nf);
|
|
+ else
|
|
+ client->notifier[event] = nf;
|
|
+
|
|
+ mutex_unlock(&client->notifier_lock);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static int ssam_cdev_notifier_unregister(struct ssam_cdev_client *client, u8 tc)
|
|
+{
|
|
+ const u16 rqid = ssh_tc_to_rqid(tc);
|
|
+ const u16 event = ssh_rqid_to_event(rqid);
|
|
+ int status;
|
|
+
|
|
+ lockdep_assert_held_read(&client->cdev->lock);
|
|
+
|
|
+ /* Validate notifier target category. */
|
|
+ if (!ssh_rqid_is_event(rqid))
|
|
+ return -EINVAL;
|
|
+
|
|
+ mutex_lock(&client->notifier_lock);
|
|
+
|
|
+ /* Check if the notifier is currently registered. */
|
|
+ if (!client->notifier[event]) {
|
|
+ mutex_unlock(&client->notifier_lock);
|
|
+ return -ENOENT;
|
|
+ }
|
|
+
|
|
+ /* Unregister and free notifier. */
|
|
+ status = ssam_notifier_unregister(client->cdev->ctrl, &client->notifier[event]->nf);
|
|
+ kfree(client->notifier[event]);
|
|
+ client->notifier[event] = NULL;
|
|
+
|
|
+ mutex_unlock(&client->notifier_lock);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static void ssam_cdev_notifier_unregister_all(struct ssam_cdev_client *client)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ down_read(&client->cdev->lock);
|
|
+
|
|
+ /*
|
|
+ * This function may be used during shutdown, thus we need to test for
|
|
+ * cdev->ctrl instead of the SSAM_CDEV_DEVICE_SHUTDOWN_BIT bit.
|
|
+ */
|
|
+ if (client->cdev->ctrl) {
|
|
+ for (i = 0; i < SSH_NUM_EVENTS; i++)
|
|
+ ssam_cdev_notifier_unregister(client, i + 1);
|
|
+
|
|
+ } else {
|
|
+ int count = 0;
|
|
+
|
|
+ /*
|
|
+ * Device has been shut down. Any notifier remaining is a bug,
|
|
+ * so warn about that as this would otherwise hardly be
|
|
+ * noticeable. Nevertheless, free them as well.
|
|
+ */
|
|
+ mutex_lock(&client->notifier_lock);
|
|
+ for (i = 0; i < SSH_NUM_EVENTS; i++) {
|
|
+ count += !!(client->notifier[i]);
|
|
+ kfree(client->notifier[i]);
|
|
+ client->notifier[i] = NULL;
|
|
+ }
|
|
+ mutex_unlock(&client->notifier_lock);
|
|
+
|
|
+ WARN_ON(count > 0);
|
|
+ }
|
|
+
|
|
+ up_read(&client->cdev->lock);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- IOCTL functions. ------------------------------------------------------ */
|
|
+
|
|
+static long ssam_cdev_request(struct ssam_cdev_client *client, struct ssam_cdev_request __user *r)
|
|
+{
|
|
+ struct ssam_cdev_request rqst;
|
|
+ struct ssam_request spec = {};
|
|
+ struct ssam_response rsp = {};
|
|
+ const void __user *plddata;
|
|
+ void __user *rspdata;
|
|
+ int status = 0, ret = 0, tmp;
|
|
+
|
|
+ lockdep_assert_held_read(&client->cdev->lock);
|
|
+
|
|
+ ret = copy_from_user(&rqst, r, sizeof(struct ssam_cdev_request));
|
|
+ if (ret)
|
|
+ goto out;
|
|
+
|
|
+ plddata = u64_to_user_ptr(rqst.payload.data);
|
|
+ rspdata = u64_to_user_ptr(rqst.response.data);
|
|
+
|
|
+ /* Setup basic request fields. */
|
|
+ spec.target_category = rqst.target_category;
|
|
+ spec.target_id = rqst.target_id;
|
|
+ spec.command_id = rqst.command_id;
|
|
+ spec.instance_id = rqst.instance_id;
|
|
+ spec.flags = 0;
|
|
+ spec.length = rqst.payload.length;
|
|
+ spec.payload = NULL;
|
|
+
|
|
+ if (rqst.flags & SSAM_CDEV_REQUEST_HAS_RESPONSE)
|
|
+ spec.flags |= SSAM_REQUEST_HAS_RESPONSE;
|
|
+
|
|
+ if (rqst.flags & SSAM_CDEV_REQUEST_UNSEQUENCED)
|
|
+ spec.flags |= SSAM_REQUEST_UNSEQUENCED;
|
|
+
|
|
+ rsp.capacity = rqst.response.length;
|
|
+ rsp.length = 0;
|
|
+ rsp.pointer = NULL;
|
|
+
|
|
+ /* Get request payload from user-space. */
|
|
+ if (spec.length) {
|
|
+ if (!plddata) {
|
|
+ ret = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Note: spec.length is limited to U16_MAX bytes via struct
|
|
+ * ssam_cdev_request. This is slightly larger than the
|
|
+ * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
|
|
+ * underlying protocol (note that nothing remotely this size
|
|
+ * should ever be allocated in any normal case). This size is
|
|
+ * validated later in ssam_request_sync(), for allocation the
|
|
+ * bound imposed by u16 should be enough.
|
|
+ */
|
|
+ spec.payload = kzalloc(spec.length, GFP_KERNEL);
|
|
+ if (!spec.payload) {
|
|
+ ret = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (copy_from_user((void *)spec.payload, plddata, spec.length)) {
|
|
+ ret = -EFAULT;
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Allocate response buffer. */
|
|
+ if (rsp.capacity) {
|
|
+ if (!rspdata) {
|
|
+ ret = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Note: rsp.capacity is limited to U16_MAX bytes via struct
|
|
+ * ssam_cdev_request. This is slightly larger than the
|
|
+ * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
|
|
+ * underlying protocol (note that nothing remotely this size
|
|
+ * should ever be allocated in any normal case). In later use,
|
|
+ * this capacity does not have to be strictly bounded, as it
|
|
+ * is only used as an output buffer to be written to. For
|
|
+ * allocation the bound imposed by u16 should be enough.
|
|
+ */
|
|
+ rsp.pointer = kzalloc(rsp.capacity, GFP_KERNEL);
|
|
+ if (!rsp.pointer) {
|
|
+ ret = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Perform request. */
|
|
+ status = ssam_request_sync(client->cdev->ctrl, &spec, &rsp);
|
|
+ if (status)
|
|
+ goto out;
|
|
+
|
|
+ /* Copy response to user-space. */
|
|
+ if (rsp.length && copy_to_user(rspdata, rsp.pointer, rsp.length))
|
|
+ ret = -EFAULT;
|
|
+
|
|
+out:
|
|
+ /* Always try to set response-length and status. */
|
|
+ tmp = put_user(rsp.length, &r->response.length);
|
|
+ if (tmp)
|
|
+ ret = tmp;
|
|
+
|
|
+ tmp = put_user(status, &r->status);
|
|
+ if (tmp)
|
|
+ ret = tmp;
|
|
+
|
|
+ /* Cleanup. */
|
|
+ kfree(spec.payload);
|
|
+ kfree(rsp.pointer);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static long ssam_cdev_notif_register(struct ssam_cdev_client *client,
|
|
+ const struct ssam_cdev_notifier_desc __user *d)
|
|
+{
|
|
+ struct ssam_cdev_notifier_desc desc;
|
|
+ long ret;
|
|
+
|
|
+ lockdep_assert_held_read(&client->cdev->lock);
|
|
+
|
|
+ ret = copy_from_user(&desc, d, sizeof(*d));
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ return ssam_cdev_notifier_register(client, desc.target_category, desc.priority);
|
|
+}
|
|
+
|
|
+static long ssam_cdev_notif_unregister(struct ssam_cdev_client *client,
|
|
+ const struct ssam_cdev_notifier_desc __user *d)
|
|
+{
|
|
+ struct ssam_cdev_notifier_desc desc;
|
|
+ long ret;
|
|
+
|
|
+ lockdep_assert_held_read(&client->cdev->lock);
|
|
+
|
|
+ ret = copy_from_user(&desc, d, sizeof(*d));
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ return ssam_cdev_notifier_unregister(client, desc.target_category);
|
|
+}
|
|
+
|
|
+static long ssam_cdev_event_enable(struct ssam_cdev_client *client,
|
|
+ const struct ssam_cdev_event_desc __user *d)
|
|
+{
|
|
+ struct ssam_cdev_event_desc desc;
|
|
+ struct ssam_event_registry reg;
|
|
+ struct ssam_event_id id;
|
|
+ long ret;
|
|
+
|
|
+ lockdep_assert_held_read(&client->cdev->lock);
|
|
+
|
|
+ /* Read descriptor from user-space. */
|
|
+ ret = copy_from_user(&desc, d, sizeof(*d));
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /* Translate descriptor. */
|
|
+ reg.target_category = desc.reg.target_category;
|
|
+ reg.target_id = desc.reg.target_id;
|
|
+ reg.cid_enable = desc.reg.cid_enable;
|
|
+ reg.cid_disable = desc.reg.cid_disable;
|
|
+
|
|
+ id.target_category = desc.id.target_category;
|
|
+ id.instance = desc.id.instance;
|
|
+
|
|
+ /* Disable event. */
|
|
+ return ssam_controller_event_enable(client->cdev->ctrl, reg, id, desc.flags);
|
|
+}
|
|
+
|
|
+static long ssam_cdev_event_disable(struct ssam_cdev_client *client,
|
|
+ const struct ssam_cdev_event_desc __user *d)
|
|
+{
|
|
+ struct ssam_cdev_event_desc desc;
|
|
+ struct ssam_event_registry reg;
|
|
+ struct ssam_event_id id;
|
|
+ long ret;
|
|
+
|
|
+ lockdep_assert_held_read(&client->cdev->lock);
|
|
+
|
|
+ /* Read descriptor from user-space. */
|
|
+ ret = copy_from_user(&desc, d, sizeof(*d));
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /* Translate descriptor. */
|
|
+ reg.target_category = desc.reg.target_category;
|
|
+ reg.target_id = desc.reg.target_id;
|
|
+ reg.cid_enable = desc.reg.cid_enable;
|
|
+ reg.cid_disable = desc.reg.cid_disable;
|
|
+
|
|
+ id.target_category = desc.id.target_category;
|
|
+ id.instance = desc.id.instance;
|
|
+
|
|
+ /* Disable event. */
|
|
+ return ssam_controller_event_disable(client->cdev->ctrl, reg, id, desc.flags);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- File operations. ------------------------------------------------------ */
|
|
+
|
|
+static int ssam_cdev_device_open(struct inode *inode, struct file *filp)
|
|
+{
|
|
+ struct miscdevice *mdev = filp->private_data;
|
|
+ struct ssam_cdev_client *client;
|
|
+ struct ssam_cdev *cdev = container_of(mdev, struct ssam_cdev, mdev);
|
|
+
|
|
+ /* Initialize client */
|
|
+ client = vzalloc(sizeof(*client));
|
|
+ if (!client)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ client->cdev = ssam_cdev_get(cdev);
|
|
+
|
|
+ INIT_LIST_HEAD(&client->node);
|
|
+
|
|
+ mutex_init(&client->notifier_lock);
|
|
+
|
|
+ mutex_init(&client->read_lock);
|
|
+ mutex_init(&client->write_lock);
|
|
+ INIT_KFIFO(client->buffer);
|
|
+ init_waitqueue_head(&client->waitq);
|
|
+
|
|
+ filp->private_data = client;
|
|
+
|
|
+ /* Attach client. */
|
|
+ down_write(&cdev->client_lock);
|
|
+
|
|
+ if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
|
|
+ up_write(&cdev->client_lock);
|
|
+ mutex_destroy(&client->write_lock);
|
|
+ mutex_destroy(&client->read_lock);
|
|
+ mutex_destroy(&client->notifier_lock);
|
|
+ ssam_cdev_put(client->cdev);
|
|
+ vfree(client);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ list_add_tail(&client->node, &cdev->client_list);
|
|
+
|
|
+ up_write(&cdev->client_lock);
|
|
+
|
|
+ stream_open(inode, filp);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ssam_cdev_device_release(struct inode *inode, struct file *filp)
|
|
+{
|
|
+ struct ssam_cdev_client *client = filp->private_data;
|
|
+
|
|
+ /* Force-unregister all remaining notifiers of this client. */
|
|
+ ssam_cdev_notifier_unregister_all(client);
|
|
+
|
|
+ /* Detach client. */
|
|
+ down_write(&client->cdev->client_lock);
|
|
+ list_del(&client->node);
|
|
+ up_write(&client->cdev->client_lock);
|
|
+
|
|
+ /* Free client. */
|
|
+ mutex_destroy(&client->write_lock);
|
|
+ mutex_destroy(&client->read_lock);
|
|
+
|
|
+ mutex_destroy(&client->notifier_lock);
|
|
+
|
|
+ ssam_cdev_put(client->cdev);
|
|
+ vfree(client);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static long __ssam_cdev_device_ioctl(struct ssam_cdev_client *client, unsigned int cmd,
|
|
+ unsigned long arg)
|
|
+{
|
|
+ lockdep_assert_held_read(&client->cdev->lock);
|
|
+
|
|
+ switch (cmd) {
|
|
+ case SSAM_CDEV_REQUEST:
|
|
+ return ssam_cdev_request(client, (struct ssam_cdev_request __user *)arg);
|
|
+
|
|
+ case SSAM_CDEV_NOTIF_REGISTER:
|
|
+ return ssam_cdev_notif_register(client,
|
|
+ (struct ssam_cdev_notifier_desc __user *)arg);
|
|
+
|
|
+ case SSAM_CDEV_NOTIF_UNREGISTER:
|
|
+ return ssam_cdev_notif_unregister(client,
|
|
+ (struct ssam_cdev_notifier_desc __user *)arg);
|
|
+
|
|
+ case SSAM_CDEV_EVENT_ENABLE:
|
|
+ return ssam_cdev_event_enable(client, (struct ssam_cdev_event_desc __user *)arg);
|
|
+
|
|
+ case SSAM_CDEV_EVENT_DISABLE:
|
|
+ return ssam_cdev_event_disable(client, (struct ssam_cdev_event_desc __user *)arg);
|
|
+
|
|
+ default:
|
|
+ return -ENOTTY;
|
|
+ }
|
|
+}
|
|
+
|
|
+static long ssam_cdev_device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
+{
|
|
+ struct ssam_cdev_client *client = file->private_data;
|
|
+ long status;
|
|
+
|
|
+ /* Ensure that controller is valid for as long as we need it. */
|
|
+ if (down_read_killable(&client->cdev->lock))
|
|
+ return -ERESTARTSYS;
|
|
+
|
|
+ if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &client->cdev->flags)) {
|
|
+ up_read(&client->cdev->lock);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ status = __ssam_cdev_device_ioctl(client, cmd, arg);
|
|
+
|
|
+ up_read(&client->cdev->lock);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static ssize_t ssam_cdev_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
|
|
+{
|
|
+ struct ssam_cdev_client *client = file->private_data;
|
|
+ struct ssam_cdev *cdev = client->cdev;
|
|
+ unsigned int copied;
|
|
+ int status = 0;
|
|
+
|
|
+ if (down_read_killable(&cdev->lock))
|
|
+ return -ERESTARTSYS;
|
|
+
|
|
+ /* Make sure we're not shut down. */
|
|
+ if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
|
|
+ up_read(&cdev->lock);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ do {
|
|
+ /* Check availability, wait if necessary. */
|
|
+ if (kfifo_is_empty(&client->buffer)) {
|
|
+ up_read(&cdev->lock);
|
|
+
|
|
+ if (file->f_flags & O_NONBLOCK)
|
|
+ return -EAGAIN;
|
|
+
|
|
+ status = wait_event_interruptible(client->waitq,
|
|
+ !kfifo_is_empty(&client->buffer) ||
|
|
+ test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT,
|
|
+ &cdev->flags));
|
|
+ if (status < 0)
|
|
+ return status;
|
|
+
|
|
+ if (down_read_killable(&cdev->lock))
|
|
+ return -ERESTARTSYS;
|
|
+
|
|
+ /* Need to check that we're not shut down again. */
|
|
+ if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
|
|
+ up_read(&cdev->lock);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Try to read from FIFO. */
|
|
+ if (mutex_lock_interruptible(&client->read_lock)) {
|
|
+ up_read(&cdev->lock);
|
|
+ return -ERESTARTSYS;
|
|
+ }
|
|
+
|
|
+ status = kfifo_to_user(&client->buffer, buf, count, &copied);
|
|
+ mutex_unlock(&client->read_lock);
|
|
+
|
|
+ if (status < 0) {
|
|
+ up_read(&cdev->lock);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ /* We might not have gotten anything, check this here. */
|
|
+ if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
|
|
+ up_read(&cdev->lock);
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+ } while (copied == 0);
|
|
+
|
|
+ up_read(&cdev->lock);
|
|
+ return copied;
|
|
+}
|
|
+
|
|
+static __poll_t ssam_cdev_poll(struct file *file, struct poll_table_struct *pt)
|
|
+{
|
|
+ struct ssam_cdev_client *client = file->private_data;
|
|
+ __poll_t events = 0;
|
|
+
|
|
+ if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &client->cdev->flags))
|
|
+ return EPOLLHUP | EPOLLERR;
|
|
+
|
|
+ poll_wait(file, &client->waitq, pt);
|
|
+
|
|
+ if (!kfifo_is_empty(&client->buffer))
|
|
+ events |= EPOLLIN | EPOLLRDNORM;
|
|
+
|
|
+ return events;
|
|
+}
|
|
+
|
|
+static int ssam_cdev_fasync(int fd, struct file *file, int on)
|
|
+{
|
|
+ struct ssam_cdev_client *client = file->private_data;
|
|
+
|
|
+ return fasync_helper(fd, file, on, &client->fasync);
|
|
+}
|
|
+
|
|
+static const struct file_operations ssam_controller_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = ssam_cdev_device_open,
|
|
+ .release = ssam_cdev_device_release,
|
|
+ .read = ssam_cdev_read,
|
|
+ .poll = ssam_cdev_poll,
|
|
+ .fasync = ssam_cdev_fasync,
|
|
+ .unlocked_ioctl = ssam_cdev_device_ioctl,
|
|
+ .compat_ioctl = ssam_cdev_device_ioctl,
|
|
+ .llseek = no_llseek,
|
|
+};
|
|
+
|
|
+
|
|
+/* -- Device and driver setup ----------------------------------------------- */
|
|
+
|
|
+static int ssam_dbg_device_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct ssam_controller *ctrl;
|
|
+ struct ssam_cdev *cdev;
|
|
+ int status;
|
|
+
|
|
+ ctrl = ssam_client_bind(&pdev->dev);
|
|
+ if (IS_ERR(ctrl))
|
|
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
|
|
+
|
|
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
|
|
+ if (!cdev)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ kref_init(&cdev->kref);
|
|
+ init_rwsem(&cdev->lock);
|
|
+ cdev->ctrl = ctrl;
|
|
+ cdev->dev = &pdev->dev;
|
|
+
|
|
+ cdev->mdev.parent = &pdev->dev;
|
|
+ cdev->mdev.minor = MISC_DYNAMIC_MINOR;
|
|
+ cdev->mdev.name = "surface_aggregator";
|
|
+ cdev->mdev.nodename = "surface/aggregator";
|
|
+ cdev->mdev.fops = &ssam_controller_fops;
|
|
+
|
|
+ init_rwsem(&cdev->client_lock);
|
|
+ INIT_LIST_HEAD(&cdev->client_list);
|
|
+
|
|
+ status = misc_register(&cdev->mdev);
|
|
+ if (status) {
|
|
+ kfree(cdev);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ platform_set_drvdata(pdev, cdev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int ssam_dbg_device_remove(struct platform_device *pdev)
|
|
+{
|
|
+ struct ssam_cdev *cdev = platform_get_drvdata(pdev);
|
|
+ struct ssam_cdev_client *client;
|
|
+
|
|
+ /*
|
|
+ * Mark device as shut-down. Prevent new clients from being added and
|
|
+ * new operations from being executed.
|
|
+ */
|
|
+ set_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags);
|
|
+
|
|
+ down_write(&cdev->client_lock);
|
|
+
|
|
+ /* Remove all notifiers registered by us. */
|
|
+ list_for_each_entry(client, &cdev->client_list, node) {
|
|
+ ssam_cdev_notifier_unregister_all(client);
|
|
+ }
|
|
+
|
|
+ /* Wake up async clients. */
|
|
+ list_for_each_entry(client, &cdev->client_list, node) {
|
|
+ kill_fasync(&client->fasync, SIGIO, POLL_HUP);
|
|
+ }
|
|
+
|
|
+ /* Wake up blocking clients. */
|
|
+ list_for_each_entry(client, &cdev->client_list, node) {
|
|
+ wake_up_interruptible(&client->waitq);
|
|
+ }
|
|
+
|
|
+ up_write(&cdev->client_lock);
|
|
+
|
|
+ /*
|
|
+ * The controller is only guaranteed to be valid for as long as the
|
|
+ * driver is bound. Remove controller so that any lingering open files
|
|
+ * cannot access it any more after we're gone.
|
|
+ */
|
|
+ down_write(&cdev->lock);
|
|
+ cdev->ctrl = NULL;
|
|
+ cdev->dev = NULL;
|
|
+ up_write(&cdev->lock);
|
|
+
|
|
+ misc_deregister(&cdev->mdev);
|
|
+
|
|
+ ssam_cdev_put(cdev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct platform_device *ssam_cdev_device;
|
|
+
|
|
+static struct platform_driver ssam_cdev_driver = {
|
|
+ .probe = ssam_dbg_device_probe,
|
|
+ .remove = ssam_dbg_device_remove,
|
|
+ .driver = {
|
|
+ .name = SSAM_CDEV_DEVICE_NAME,
|
|
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
|
+ },
|
|
+};
|
|
+
|
|
+static int __init ssam_debug_init(void)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ ssam_cdev_device = platform_device_alloc(SSAM_CDEV_DEVICE_NAME,
|
|
+ PLATFORM_DEVID_NONE);
|
|
+ if (!ssam_cdev_device)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ status = platform_device_add(ssam_cdev_device);
|
|
+ if (status)
|
|
+ goto err_device;
|
|
+
|
|
+ status = platform_driver_register(&ssam_cdev_driver);
|
|
+ if (status)
|
|
+ goto err_driver;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_driver:
|
|
+ platform_device_del(ssam_cdev_device);
|
|
+err_device:
|
|
+ platform_device_put(ssam_cdev_device);
|
|
+ return status;
|
|
+}
|
|
+module_init(ssam_debug_init);
|
|
+
|
|
+static void __exit ssam_debug_exit(void)
|
|
+{
|
|
+ platform_driver_unregister(&ssam_cdev_driver);
|
|
+ platform_device_unregister(ssam_cdev_device);
|
|
+}
|
|
+module_exit(ssam_debug_exit);
|
|
+
|
|
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
|
|
+MODULE_DESCRIPTION("User-space interface for Surface System Aggregator Module");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/platform/x86/surface_aggregator_registry.c b/drivers/platform/x86/surface_aggregator_registry.c
|
|
new file mode 100644
|
|
index 000000000000..1b87bdd6dd1e
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_aggregator_registry.c
|
|
@@ -0,0 +1,618 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Surface System Aggregator Module (SSAM) client device registry.
|
|
+ *
|
|
+ * Registry for non-platform/non-ACPI SSAM client devices, i.e. devices that
|
|
+ * cannot be auto-detected. Provides device-hubs for these devices.
|
|
+ *
|
|
+ * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <linux/acpi.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/notifier.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+#include <linux/surface_aggregator/device.h>
|
|
+
|
|
+
|
|
+/* -- Device registry structures. ------------------------------------------- */
|
|
+
|
|
+struct ssam_hub_cell {
|
|
+ struct ssam_device_uid uid;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct ssam_hub_desc {
|
|
+ const struct ssam_hub_cell *cells;
|
|
+ unsigned int num_cells;
|
|
+};
|
|
+
|
|
+
|
|
+#define SSAM_DUID(cat, tid, iid, fun) \
|
|
+ ((struct ssam_device_uid) { \
|
|
+ .domain = SSAM_DOMAIN_SERIALHUB, \
|
|
+ .category = SSAM_SSH_TC_##cat, \
|
|
+ .target = (tid), \
|
|
+ .instance = (iid), \
|
|
+ .function = (fun) \
|
|
+ })
|
|
+
|
|
+#define SSAM_VDUID(cat, tid, iid, fun) \
|
|
+ ((struct ssam_device_uid) { \
|
|
+ .domain = SSAM_DOMAIN_VIRTUAL, \
|
|
+ .category = SSAM_VIRTUAL_TC_##cat, \
|
|
+ .target = (tid), \
|
|
+ .instance = (iid), \
|
|
+ .function = (fun) \
|
|
+ })
|
|
+
|
|
+#define SSAM_DUID_HUB_MAIN SSAM_VDUID(HUB, 0x01, 0x00, 0x00)
|
|
+#define SSAM_DUID_HUB_BASE SSAM_VDUID(HUB, 0x02, 0x00, 0x00)
|
|
+
|
|
+#define SSAM_DEFINE_HUB_DESC(__name, __cells) \
|
|
+ struct ssam_hub_desc __name = { \
|
|
+ .cells = __cells, \
|
|
+ .num_cells = ARRAY_SIZE(__cells), \
|
|
+ };
|
|
+
|
|
+#define SSAM_DEFINE_PLATFORM_HUB(__suffix) \
|
|
+ static const SSAM_DEFINE_HUB_DESC(ssam_device_hub_##__suffix, \
|
|
+ ssam_devices_##__suffix); \
|
|
+ static const struct ssam_hub_cell ssam_platform_hubs_##__suffix[] = { \
|
|
+ { SSAM_DUID_HUB_MAIN, (void *)&ssam_device_hub_##__suffix }, \
|
|
+ }; \
|
|
+ static const SSAM_DEFINE_HUB_DESC(ssam_platform_hub_##__suffix, \
|
|
+ ssam_platform_hubs_##__suffix); \
|
|
+
|
|
+#define SSAM_DEFINE_PLATFORM_HUB_WITH_BASE(__suffix) \
|
|
+ static const SSAM_DEFINE_HUB_DESC(ssam_device_hub_##__suffix, \
|
|
+ ssam_devices_##__suffix); \
|
|
+ static const SSAM_DEFINE_HUB_DESC(ssam_device_hub_##__suffix##_base, \
|
|
+ ssam_devices_##__suffix##_base); \
|
|
+ static const struct ssam_hub_cell ssam_platform_hubs_##__suffix[] = { \
|
|
+ { SSAM_DUID_HUB_MAIN, (void *)&ssam_device_hub_##__suffix }, \
|
|
+ { SSAM_DUID_HUB_BASE, (void *)&ssam_device_hub_##__suffix##_base },\
|
|
+ }; \
|
|
+ static const SSAM_DEFINE_HUB_DESC(ssam_platform_hub_##__suffix, \
|
|
+ ssam_platform_hubs_##__suffix); \
|
|
+
|
|
+
|
|
+/* -- Device registry. ------------------------------------------------------ */
|
|
+
|
|
+#define SSAM_DUID_BAT_AC SSAM_DUID(BAT, 0x01, 0x01, 0x01)
|
|
+#define SSAM_DUID_BAT_MAIN SSAM_DUID(BAT, 0x01, 0x01, 0x00)
|
|
+#define SSAM_DUID_BAT_SB3BASE SSAM_DUID(BAT, 0x02, 0x01, 0x00)
|
|
+
|
|
+#define SSAM_DUID_TMP_PERF SSAM_DUID(TMP, 0x01, 0x00, 0x01)
|
|
+
|
|
+#define SSAM_DUID_BAS_DTX SSAM_DUID(BAS, 0x01, 0x00, 0x00)
|
|
+
|
|
+#define SSAM_DUID_HID_KEYBOARD SSAM_DUID(HID, 0x02, 0x01, 0x00)
|
|
+#define SSAM_DUID_HID_TOUCHPAD SSAM_DUID(HID, 0x02, 0x03, 0x00)
|
|
+#define SSAM_DUID_HID_IID5 SSAM_DUID(HID, 0x02, 0x05, 0x00)
|
|
+#define SSAM_DUID_HID_IID6 SSAM_DUID(HID, 0x02, 0x06, 0x00)
|
|
+
|
|
+
|
|
+static const struct ssam_hub_cell ssam_devices_sb2[] = {
|
|
+ { SSAM_DUID_TMP_PERF },
|
|
+};
|
|
+
|
|
+static const struct ssam_hub_cell ssam_devices_sb3[] = {
|
|
+ { SSAM_DUID_TMP_PERF },
|
|
+ { SSAM_DUID_BAT_AC },
|
|
+ { SSAM_DUID_BAT_MAIN },
|
|
+ { SSAM_DUID_BAS_DTX },
|
|
+};
|
|
+
|
|
+static const struct ssam_hub_cell ssam_devices_sb3_base[] = {
|
|
+ { SSAM_DUID_BAT_SB3BASE },
|
|
+ { SSAM_DUID_HID_KEYBOARD },
|
|
+ { SSAM_DUID_HID_TOUCHPAD },
|
|
+ { SSAM_DUID_HID_IID5 },
|
|
+ { SSAM_DUID_HID_IID6 },
|
|
+};
|
|
+
|
|
+static const struct ssam_hub_cell ssam_devices_sl1[] = {
|
|
+ { SSAM_DUID_TMP_PERF },
|
|
+};
|
|
+
|
|
+static const struct ssam_hub_cell ssam_devices_sl2[] = {
|
|
+ { SSAM_DUID_TMP_PERF },
|
|
+};
|
|
+
|
|
+static const struct ssam_hub_cell ssam_devices_sl3[] = {
|
|
+ { SSAM_DUID_TMP_PERF },
|
|
+ { SSAM_DUID_BAT_AC },
|
|
+ { SSAM_DUID_BAT_MAIN },
|
|
+ { SSAM_DUID_HID_KEYBOARD },
|
|
+ { SSAM_DUID_HID_TOUCHPAD },
|
|
+ { SSAM_DUID_HID_IID5 },
|
|
+};
|
|
+
|
|
+static const struct ssam_hub_cell ssam_devices_slg1[] = {
|
|
+ { SSAM_DUID_TMP_PERF },
|
|
+ { SSAM_DUID_BAT_AC },
|
|
+ { SSAM_DUID_BAT_MAIN },
|
|
+};
|
|
+
|
|
+static const struct ssam_hub_cell ssam_devices_sp5[] = {
|
|
+ { SSAM_DUID_TMP_PERF },
|
|
+};
|
|
+
|
|
+static const struct ssam_hub_cell ssam_devices_sp6[] = {
|
|
+ { SSAM_DUID_TMP_PERF },
|
|
+};
|
|
+
|
|
+static const struct ssam_hub_cell ssam_devices_sp7[] = {
|
|
+ { SSAM_DUID_TMP_PERF },
|
|
+ { SSAM_DUID_BAT_AC },
|
|
+ { SSAM_DUID_BAT_MAIN },
|
|
+};
|
|
+
|
|
+SSAM_DEFINE_PLATFORM_HUB(sb2);
|
|
+SSAM_DEFINE_PLATFORM_HUB_WITH_BASE(sb3);
|
|
+SSAM_DEFINE_PLATFORM_HUB(sl1);
|
|
+SSAM_DEFINE_PLATFORM_HUB(sl2);
|
|
+SSAM_DEFINE_PLATFORM_HUB(sl3);
|
|
+SSAM_DEFINE_PLATFORM_HUB(slg1);
|
|
+SSAM_DEFINE_PLATFORM_HUB(sp5);
|
|
+SSAM_DEFINE_PLATFORM_HUB(sp6);
|
|
+SSAM_DEFINE_PLATFORM_HUB(sp7);
|
|
+
|
|
+
|
|
+/* -- Device registry helper functions. ------------------------------------- */
|
|
+
|
|
+static int ssam_hub_remove_devices_fn(struct device *dev, void *data)
|
|
+{
|
|
+ if (!is_ssam_device(dev))
|
|
+ return 0;
|
|
+
|
|
+ ssam_device_remove(to_ssam_device(dev));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void ssam_hub_remove_devices(struct device *parent)
|
|
+{
|
|
+ device_for_each_child_reverse(parent, NULL, ssam_hub_remove_devices_fn);
|
|
+}
|
|
+
|
|
+static int ssam_hub_add_device(struct device *parent,
|
|
+ struct ssam_controller *ctrl,
|
|
+ const struct ssam_hub_cell *cell)
|
|
+{
|
|
+ struct ssam_device *sdev;
|
|
+ int status;
|
|
+
|
|
+ sdev = ssam_device_alloc(ctrl, cell->uid);
|
|
+ if (!sdev)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ sdev->dev.parent = parent;
|
|
+ sdev->dev.platform_data = cell->data;
|
|
+
|
|
+ status = ssam_device_add(sdev);
|
|
+ if (status)
|
|
+ ssam_device_put(sdev);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static int ssam_hub_add_devices(struct device *parent,
|
|
+ struct ssam_controller *ctrl,
|
|
+ const struct ssam_hub_desc *desc)
|
|
+{
|
|
+ int status, i;
|
|
+
|
|
+ for (i = 0; i < desc->num_cells; i++) {
|
|
+ status = ssam_hub_add_device(parent, ctrl, &desc->cells[i]);
|
|
+ if (status)
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+err:
|
|
+ ssam_hub_remove_devices(parent);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- SSAM main-hub driver. ------------------------------------------------- */
|
|
+
|
|
+static int ssam_hub_probe(struct ssam_device *sdev)
|
|
+{
|
|
+ const struct ssam_hub_desc *desc = dev_get_platdata(&sdev->dev);
|
|
+
|
|
+ if (!desc)
|
|
+ return -ENODEV;
|
|
+
|
|
+ return ssam_hub_add_devices(&sdev->dev, sdev->ctrl, desc);
|
|
+}
|
|
+
|
|
+static void ssam_hub_remove(struct ssam_device *sdev)
|
|
+{
|
|
+ ssam_hub_remove_devices(&sdev->dev);
|
|
+}
|
|
+
|
|
+static const struct ssam_device_id ssam_hub_match[] = {
|
|
+ { SSAM_VDEV(HUB, 0x01, 0x00, 0x00) },
|
|
+ { },
|
|
+};
|
|
+
|
|
+static struct ssam_device_driver ssam_hub_driver = {
|
|
+ .probe = ssam_hub_probe,
|
|
+ .remove = ssam_hub_remove,
|
|
+ .match_table = ssam_hub_match,
|
|
+ .driver = {
|
|
+ .name = "surface_aggregator_device_hub",
|
|
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
|
+ },
|
|
+};
|
|
+
|
|
+
|
|
+/* -- SSAM base-hub driver. ------------------------------------------------- */
|
|
+
|
|
+enum ssam_base_hub_state {
|
|
+ SSAM_BASE_HUB_UNINITIALIZED,
|
|
+ SSAM_BASE_HUB_CONNECTED,
|
|
+ SSAM_BASE_HUB_DISCONNECTED,
|
|
+};
|
|
+
|
|
+struct ssam_base_hub {
|
|
+ struct ssam_device *sdev;
|
|
+ const struct ssam_hub_desc *devices;
|
|
+
|
|
+ struct mutex lock;
|
|
+ enum ssam_base_hub_state state;
|
|
+
|
|
+ struct ssam_event_notifier notif;
|
|
+};
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
|
|
+ .target_category = SSAM_SSH_TC_BAS,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x0d,
|
|
+ .instance_id = 0x00,
|
|
+});
|
|
+
|
|
+#define SSAM_BAS_OPMODE_TABLET 0x00
|
|
+#define SSAM_EVENT_BAS_CID_CONNECTION 0x0c
|
|
+
|
|
+static int ssam_base_hub_query_state(struct ssam_device *sdev,
|
|
+ enum ssam_base_hub_state *state)
|
|
+{
|
|
+ u8 opmode;
|
|
+ int status;
|
|
+
|
|
+ status = ssam_retry(ssam_bas_query_opmode, sdev->ctrl, &opmode);
|
|
+ if (status < 0) {
|
|
+ dev_err(&sdev->dev, "failed to query base state: %d\n", status);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ if (opmode != SSAM_BAS_OPMODE_TABLET)
|
|
+ *state = SSAM_BASE_HUB_CONNECTED;
|
|
+ else
|
|
+ *state = SSAM_BASE_HUB_DISCONNECTED;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static ssize_t ssam_base_hub_state_show(struct device *dev,
|
|
+ struct device_attribute *attr,
|
|
+ char *buf)
|
|
+{
|
|
+ struct ssam_device *sdev = to_ssam_device(dev);
|
|
+ struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
|
|
+ bool connected;
|
|
+
|
|
+ mutex_lock(&hub->lock);
|
|
+ connected = hub->state == SSAM_BASE_HUB_CONNECTED;
|
|
+ mutex_unlock(&hub->lock);
|
|
+
|
|
+ return scnprintf(buf, PAGE_SIZE, "%d\n", connected);
|
|
+}
|
|
+
|
|
+static struct device_attribute ssam_base_hub_attr_state =
|
|
+ __ATTR(state, S_IRUGO, ssam_base_hub_state_show, NULL);
|
|
+
|
|
+static struct attribute *ssam_base_hub_attrs[] = {
|
|
+ &ssam_base_hub_attr_state.attr,
|
|
+ NULL,
|
|
+};
|
|
+
|
|
+static const struct attribute_group ssam_base_hub_group = {
|
|
+ .attrs = ssam_base_hub_attrs,
|
|
+};
|
|
+
|
|
+static int ssam_base_hub_update(struct ssam_device *sdev,
|
|
+ enum ssam_base_hub_state new)
|
|
+{
|
|
+ struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
|
|
+ int status = 0;
|
|
+
|
|
+ mutex_lock(&hub->lock);
|
|
+ if (hub->state == new) {
|
|
+ mutex_unlock(&hub->lock);
|
|
+ return 0;
|
|
+ }
|
|
+ hub->state = new;
|
|
+
|
|
+ if (hub->state == SSAM_BASE_HUB_CONNECTED)
|
|
+ status = ssam_hub_add_devices(&sdev->dev, sdev->ctrl, hub->devices);
|
|
+
|
|
+ if (hub->state != SSAM_BASE_HUB_CONNECTED || status)
|
|
+ ssam_hub_remove_devices(&sdev->dev);
|
|
+
|
|
+ mutex_unlock(&hub->lock);
|
|
+
|
|
+ if (status) {
|
|
+ dev_err(&sdev->dev, "failed to update base-hub devices: %d\n",
|
|
+ status);
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf,
|
|
+ const struct ssam_event *event)
|
|
+{
|
|
+ struct ssam_base_hub *hub;
|
|
+ struct ssam_device *sdev;
|
|
+ enum ssam_base_hub_state new;
|
|
+
|
|
+ hub = container_of(nf, struct ssam_base_hub, notif);
|
|
+ sdev = hub->sdev;
|
|
+
|
|
+ if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION)
|
|
+ return 0;
|
|
+
|
|
+ if (event->length < 1) {
|
|
+ dev_err(&sdev->dev, "unexpected payload size: %u\n",
|
|
+ event->length);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (event->data[0])
|
|
+ new = SSAM_BASE_HUB_CONNECTED;
|
|
+ else
|
|
+ new = SSAM_BASE_HUB_DISCONNECTED;
|
|
+
|
|
+ ssam_base_hub_update(sdev, new);
|
|
+
|
|
+ /*
|
|
+ * Do not return SSAM_NOTIF_HANDLED: The event should be picked up and
|
|
+ * consumed by the detachment system driver. We're just a (more or less)
|
|
+ * silent observer.
|
|
+ */
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int __maybe_unused ssam_base_hub_resume(struct device *dev)
|
|
+{
|
|
+ struct ssam_device *sdev = to_ssam_device(dev);
|
|
+ enum ssam_base_hub_state state;
|
|
+ int status;
|
|
+
|
|
+ status = ssam_base_hub_query_state(sdev, &state);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ return ssam_base_hub_update(sdev, state);
|
|
+}
|
|
+static SIMPLE_DEV_PM_OPS(ssam_base_hub_pm_ops, NULL, ssam_base_hub_resume);
|
|
+
|
|
+static int ssam_base_hub_probe(struct ssam_device *sdev)
|
|
+{
|
|
+ const struct ssam_hub_desc *desc = dev_get_platdata(&sdev->dev);
|
|
+ const struct ssam_device_id *match;
|
|
+ enum ssam_base_hub_state state;
|
|
+ struct ssam_base_hub *hub;
|
|
+ int status;
|
|
+
|
|
+ if (!desc)
|
|
+ return -ENODEV;
|
|
+
|
|
+ match = ssam_device_get_match(sdev);
|
|
+ if (!match)
|
|
+ return -ENODEV;
|
|
+
|
|
+ hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL);
|
|
+ if (!hub)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ mutex_init(&hub->lock);
|
|
+
|
|
+ hub->sdev = sdev;
|
|
+ hub->devices = desc;
|
|
+ hub->state = SSAM_BASE_HUB_UNINITIALIZED;
|
|
+
|
|
+ hub->notif.base.priority = 1000; /* This notifier should run first. */
|
|
+ hub->notif.base.fn = ssam_base_hub_notif;
|
|
+ hub->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
|
|
+ hub->notif.event.id.target_category = SSAM_SSH_TC_BAS,
|
|
+ hub->notif.event.id.instance = 0,
|
|
+ hub->notif.event.mask = SSAM_EVENT_MASK_NONE;
|
|
+ hub->notif.event.flags = SSAM_EVENT_SEQUENCED;
|
|
+
|
|
+ status = ssam_notifier_register(sdev->ctrl, &hub->notif);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ ssam_device_set_drvdata(sdev, hub);
|
|
+
|
|
+ status = ssam_base_hub_query_state(sdev, &state);
|
|
+ if (status) {
|
|
+ ssam_notifier_unregister(sdev->ctrl, &hub->notif);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ status = ssam_base_hub_update(sdev, state);
|
|
+ if (status) {
|
|
+ ssam_notifier_unregister(sdev->ctrl, &hub->notif);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ status = sysfs_create_group(&sdev->dev.kobj, &ssam_base_hub_group);
|
|
+ if (status) {
|
|
+ ssam_notifier_unregister(sdev->ctrl, &hub->notif);
|
|
+ ssam_hub_remove_devices(&sdev->dev);
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static void ssam_base_hub_remove(struct ssam_device *sdev)
|
|
+{
|
|
+ struct ssam_base_hub *hub = ssam_device_get_drvdata(sdev);
|
|
+
|
|
+ sysfs_remove_group(&sdev->dev.kobj, &ssam_base_hub_group);
|
|
+
|
|
+ ssam_notifier_unregister(sdev->ctrl, &hub->notif);
|
|
+ ssam_hub_remove_devices(&sdev->dev);
|
|
+
|
|
+ mutex_destroy(&hub->lock);
|
|
+}
|
|
+
|
|
+static const struct ssam_device_id ssam_base_hub_match[] = {
|
|
+ { SSAM_VDEV(HUB, 0x02, 0x00, 0x00) },
|
|
+ { },
|
|
+};
|
|
+
|
|
+static struct ssam_device_driver ssam_base_hub_driver = {
|
|
+ .probe = ssam_base_hub_probe,
|
|
+ .remove = ssam_base_hub_remove,
|
|
+ .match_table = ssam_base_hub_match,
|
|
+ .driver = {
|
|
+ .name = "surface_aggregator_base_hub",
|
|
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
|
+ .pm = &ssam_base_hub_pm_ops,
|
|
+ },
|
|
+};
|
|
+
|
|
+
|
|
+/* -- SSAM platform/meta-hub driver. ---------------------------------------- */
|
|
+
|
|
+static const struct acpi_device_id ssam_platform_hub_match[] = {
|
|
+ /* Surface Pro 4, 5, and 6 */
|
|
+ { "MSHW0081", (unsigned long)&ssam_platform_hub_sp5 },
|
|
+
|
|
+ /* Surface Pro 6 (OMBR >= 0x10) */
|
|
+ { "MSHW0111", (unsigned long)&ssam_platform_hub_sp6 },
|
|
+
|
|
+ /* Surface Pro 7 */
|
|
+ { "MSHW0116", (unsigned long)&ssam_platform_hub_sp7 },
|
|
+
|
|
+ /* Surface Pro 7+ */
|
|
+ { "MSHW0119", (unsigned long)&ssam_platform_hub_sp7 },
|
|
+
|
|
+ /* Surface Book 2 */
|
|
+ { "MSHW0107", (unsigned long)&ssam_platform_hub_sb2 },
|
|
+
|
|
+ /* Surface Book 3 */
|
|
+ { "MSHW0117", (unsigned long)&ssam_platform_hub_sb3 },
|
|
+
|
|
+ /* Surface Laptop 1 */
|
|
+ { "MSHW0086", (unsigned long)&ssam_platform_hub_sl1 },
|
|
+
|
|
+ /* Surface Laptop 2 */
|
|
+ { "MSHW0112", (unsigned long)&ssam_platform_hub_sl2 },
|
|
+
|
|
+ /* Surface Laptop 3 (13", Intel) */
|
|
+ { "MSHW0114", (unsigned long)&ssam_platform_hub_sl3 },
|
|
+
|
|
+ /* Surface Laptop 3 (15", AMD) */
|
|
+ { "MSHW0110", (unsigned long)&ssam_platform_hub_sl3 },
|
|
+
|
|
+ /* Surface Laptop Go 1 */
|
|
+ { "MSHW0118", (unsigned long)&ssam_platform_hub_slg1 },
|
|
+
|
|
+ { },
|
|
+};
|
|
+MODULE_DEVICE_TABLE(acpi, ssam_platform_hub_match);
|
|
+
|
|
+static int ssam_platform_hub_probe(struct platform_device *pdev)
|
|
+{
|
|
+ const struct ssam_hub_desc *desc;
|
|
+ struct ssam_controller *ctrl;
|
|
+
|
|
+ desc = acpi_device_get_match_data(&pdev->dev);
|
|
+ if (!desc)
|
|
+ return -ENODEV;
|
|
+
|
|
+ /*
|
|
+ * As we're adding the SSAM client devices as children under this device
|
|
+ * and not the SSAM controller, we need to add a device link to the
|
|
+ * controller to ensure that we remove all of our devices before the
|
|
+ * controller is removed. This also guarantees proper ordering for
|
|
+ * suspend/resume of the devices on this hub.
|
|
+ */
|
|
+ ctrl = ssam_client_bind(&pdev->dev);
|
|
+ if (IS_ERR(ctrl))
|
|
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
|
|
+
|
|
+ return ssam_hub_add_devices(&pdev->dev, ctrl, desc);
|
|
+}
|
|
+
|
|
+static int ssam_platform_hub_remove(struct platform_device *pdev)
|
|
+{
|
|
+ ssam_hub_remove_devices(&pdev->dev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct platform_driver ssam_platform_hub_driver = {
|
|
+ .probe = ssam_platform_hub_probe,
|
|
+ .remove = ssam_platform_hub_remove,
|
|
+ .driver = {
|
|
+ .name = "surface_aggregator_platform_hub",
|
|
+ .acpi_match_table = ssam_platform_hub_match,
|
|
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
|
+ },
|
|
+};
|
|
+
|
|
+
|
|
+/* -- Module initialization. ------------------------------------------------ */
|
|
+
|
|
+static int __init ssam_device_hub_init(void)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = platform_driver_register(&ssam_platform_hub_driver);
|
|
+ if (status)
|
|
+ goto err_platform;
|
|
+
|
|
+ status = ssam_device_driver_register(&ssam_hub_driver);
|
|
+ if (status)
|
|
+ goto err_main;
|
|
+
|
|
+ status = ssam_device_driver_register(&ssam_base_hub_driver);
|
|
+ if (status)
|
|
+ goto err_base;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+err_base:
|
|
+ ssam_device_driver_unregister(&ssam_hub_driver);
|
|
+err_main:
|
|
+ platform_driver_unregister(&ssam_platform_hub_driver);
|
|
+err_platform:
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static void __exit ssam_device_hub_exit(void)
|
|
+{
|
|
+ ssam_device_driver_unregister(&ssam_base_hub_driver);
|
|
+ ssam_device_driver_unregister(&ssam_hub_driver);
|
|
+ platform_driver_unregister(&ssam_platform_hub_driver);
|
|
+}
|
|
+
|
|
+module_init(ssam_device_hub_init);
|
|
+module_exit(ssam_device_hub_exit);
|
|
+
|
|
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
|
|
+MODULE_DESCRIPTION("Device-registry for Surface System Aggregator Module");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/platform/x86/surface_dtx.c b/drivers/platform/x86/surface_dtx.c
|
|
new file mode 100644
|
|
index 000000000000..bbbdffc5bf8f
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_dtx.c
|
|
@@ -0,0 +1,1281 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Surface Book (gen. 2 and later) detachment system (DTX) driver.
|
|
+ *
|
|
+ * Provides a user-space interface to properly handle clipboard/tablet
|
|
+ * (containing screen and processor) detachment from the base of the device
|
|
+ * (containing the keyboard and optionally a discrete GPU). Allows to
|
|
+ * acknowledge (to speed things up), abort (e.g. in case the dGPU is still in
|
|
+ * use), or request detachment via user-space.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <linux/fs.h>
|
|
+#include <linux/input.h>
|
|
+#include <linux/ioctl.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/kfifo.h>
|
|
+#include <linux/kref.h>
|
|
+#include <linux/miscdevice.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/poll.h>
|
|
+#include <linux/rwsem.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/workqueue.h>
|
|
+
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+#include <linux/surface_aggregator/device.h>
|
|
+#include <linux/surface_aggregator/dtx.h>
|
|
+
|
|
+
|
|
+/* -- SSAM interface. ------------------------------------------------------- */
|
|
+
|
|
+enum sam_event_cid_bas {
|
|
+ SAM_EVENT_CID_DTX_CONNECTION = 0x0c,
|
|
+ SAM_EVENT_CID_DTX_REQUEST = 0x0e,
|
|
+ SAM_EVENT_CID_DTX_CANCEL = 0x0f,
|
|
+ SAM_EVENT_CID_DTX_LATCH_STATUS = 0x11,
|
|
+};
|
|
+
|
|
+enum ssam_bas_base_state {
|
|
+ SSAM_BAS_BASE_STATE_DETACH_SUCCESS = 0x00,
|
|
+ SSAM_BAS_BASE_STATE_ATTACHED = 0x01,
|
|
+ SSAM_BAS_BASE_STATE_NOT_FEASIBLE = 0x02,
|
|
+};
|
|
+
|
|
+enum ssam_bas_latch_status {
|
|
+ SSAM_BAS_LATCH_STATUS_CLOSED = 0x00,
|
|
+ SSAM_BAS_LATCH_STATUS_OPENED = 0x01,
|
|
+ SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN = 0x02,
|
|
+ SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN = 0x03,
|
|
+ SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE = 0x04,
|
|
+};
|
|
+
|
|
+enum ssam_bas_cancel_reason {
|
|
+ SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE = 0x00, /* Low battery. */
|
|
+ SSAM_BAS_CANCEL_REASON_TIMEOUT = 0x02,
|
|
+ SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN = 0x03,
|
|
+ SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN = 0x04,
|
|
+ SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE = 0x05,
|
|
+};
|
|
+
|
|
+struct ssam_bas_base_info {
|
|
+ u8 state;
|
|
+ u8 base_id;
|
|
+} __packed;
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
|
|
+ .target_category = SSAM_SSH_TC_BAS,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x06,
|
|
+ .instance_id = 0x00,
|
|
+});
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
|
|
+ .target_category = SSAM_SSH_TC_BAS,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x07,
|
|
+ .instance_id = 0x00,
|
|
+});
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
|
|
+ .target_category = SSAM_SSH_TC_BAS,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x08,
|
|
+ .instance_id = 0x00,
|
|
+});
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_confirm, {
|
|
+ .target_category = SSAM_SSH_TC_BAS,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x09,
|
|
+ .instance_id = 0x00,
|
|
+});
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_heartbeat, {
|
|
+ .target_category = SSAM_SSH_TC_BAS,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x0a,
|
|
+ .instance_id = 0x00,
|
|
+});
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_cancel, {
|
|
+ .target_category = SSAM_SSH_TC_BAS,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x0b,
|
|
+ .instance_id = 0x00,
|
|
+});
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_base, struct ssam_bas_base_info, {
|
|
+ .target_category = SSAM_SSH_TC_BAS,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x0c,
|
|
+ .instance_id = 0x00,
|
|
+});
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_device_mode, u8, {
|
|
+ .target_category = SSAM_SSH_TC_BAS,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x0d,
|
|
+ .instance_id = 0x00,
|
|
+});
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_latch_status, u8, {
|
|
+ .target_category = SSAM_SSH_TC_BAS,
|
|
+ .target_id = 0x01,
|
|
+ .command_id = 0x11,
|
|
+ .instance_id = 0x00,
|
|
+});
|
|
+
|
|
+
|
|
+/* -- Main structures. ------------------------------------------------------ */
|
|
+
|
|
+enum sdtx_device_state {
|
|
+ SDTX_DEVICE_SHUTDOWN_BIT = BIT(0),
|
|
+ SDTX_DEVICE_DIRTY_BASE_BIT = BIT(1),
|
|
+ SDTX_DEVICE_DIRTY_MODE_BIT = BIT(2),
|
|
+ SDTX_DEVICE_DIRTY_LATCH_BIT = BIT(3),
|
|
+};
|
|
+
|
|
+struct sdtx_device {
|
|
+ struct kref kref;
|
|
+ struct rw_semaphore lock; /* Guards device and controller reference. */
|
|
+
|
|
+ struct device *dev;
|
|
+ struct ssam_controller *ctrl;
|
|
+ unsigned long flags;
|
|
+
|
|
+ struct miscdevice mdev;
|
|
+ wait_queue_head_t waitq;
|
|
+ struct mutex write_lock; /* Guards order of events/notifications. */
|
|
+ struct rw_semaphore client_lock; /* Guards client list. */
|
|
+ struct list_head client_list;
|
|
+
|
|
+ struct delayed_work state_work;
|
|
+ struct {
|
|
+ struct ssam_bas_base_info base;
|
|
+ u8 device_mode;
|
|
+ u8 latch_status;
|
|
+ } state;
|
|
+
|
|
+ struct delayed_work mode_work;
|
|
+ struct input_dev *mode_switch;
|
|
+
|
|
+ struct ssam_event_notifier notif;
|
|
+};
|
|
+
|
|
+enum sdtx_client_state {
|
|
+ SDTX_CLIENT_EVENTS_ENABLED_BIT = BIT(0),
|
|
+};
|
|
+
|
|
+struct sdtx_client {
|
|
+ struct sdtx_device *ddev;
|
|
+ struct list_head node;
|
|
+ unsigned long flags;
|
|
+
|
|
+ struct fasync_struct *fasync;
|
|
+
|
|
+ struct mutex read_lock; /* Guards FIFO buffer read access. */
|
|
+ DECLARE_KFIFO(buffer, u8, 512);
|
|
+};
|
|
+
|
|
+static void __sdtx_device_release(struct kref *kref)
|
|
+{
|
|
+ struct sdtx_device *ddev = container_of(kref, struct sdtx_device, kref);
|
|
+
|
|
+ mutex_destroy(&ddev->write_lock);
|
|
+ kfree(ddev);
|
|
+}
|
|
+
|
|
+static struct sdtx_device *sdtx_device_get(struct sdtx_device *ddev)
|
|
+{
|
|
+ if (ddev)
|
|
+ kref_get(&ddev->kref);
|
|
+
|
|
+ return ddev;
|
|
+}
|
|
+
|
|
+static void sdtx_device_put(struct sdtx_device *ddev)
|
|
+{
|
|
+ if (ddev)
|
|
+ kref_put(&ddev->kref, __sdtx_device_release);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Firmware value translations. ------------------------------------------ */
|
|
+
|
|
+static u16 sdtx_translate_base_state(struct sdtx_device *ddev, u8 state)
|
|
+{
|
|
+ switch (state) {
|
|
+ case SSAM_BAS_BASE_STATE_ATTACHED:
|
|
+ return SDTX_BASE_ATTACHED;
|
|
+
|
|
+ case SSAM_BAS_BASE_STATE_DETACH_SUCCESS:
|
|
+ return SDTX_BASE_DETACHED;
|
|
+
|
|
+ case SSAM_BAS_BASE_STATE_NOT_FEASIBLE:
|
|
+ return SDTX_DETACH_NOT_FEASIBLE;
|
|
+
|
|
+ default:
|
|
+ dev_err(ddev->dev, "unknown base state: %#04x\n", state);
|
|
+ return SDTX_UNKNOWN(state);
|
|
+ }
|
|
+}
|
|
+
|
|
+static u16 sdtx_translate_latch_status(struct sdtx_device *ddev, u8 status)
|
|
+{
|
|
+ switch (status) {
|
|
+ case SSAM_BAS_LATCH_STATUS_CLOSED:
|
|
+ return SDTX_LATCH_CLOSED;
|
|
+
|
|
+ case SSAM_BAS_LATCH_STATUS_OPENED:
|
|
+ return SDTX_LATCH_OPENED;
|
|
+
|
|
+ case SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN:
|
|
+ return SDTX_ERR_FAILED_TO_OPEN;
|
|
+
|
|
+ case SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN:
|
|
+ return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
|
|
+
|
|
+ case SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE:
|
|
+ return SDTX_ERR_FAILED_TO_CLOSE;
|
|
+
|
|
+ default:
|
|
+ dev_err(ddev->dev, "unknown latch status: %#04x\n", status);
|
|
+ return SDTX_UNKNOWN(status);
|
|
+ }
|
|
+}
|
|
+
|
|
+static u16 sdtx_translate_cancel_reason(struct sdtx_device *ddev, u8 reason)
|
|
+{
|
|
+ switch (reason) {
|
|
+ case SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE:
|
|
+ return SDTX_DETACH_NOT_FEASIBLE;
|
|
+
|
|
+ case SSAM_BAS_CANCEL_REASON_TIMEOUT:
|
|
+ return SDTX_DETACH_TIMEDOUT;
|
|
+
|
|
+ case SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN:
|
|
+ return SDTX_ERR_FAILED_TO_OPEN;
|
|
+
|
|
+ case SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN:
|
|
+ return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
|
|
+
|
|
+ case SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE:
|
|
+ return SDTX_ERR_FAILED_TO_CLOSE;
|
|
+
|
|
+ default:
|
|
+ dev_err(ddev->dev, "unknown cancel reason: %#04x\n", reason);
|
|
+ return SDTX_UNKNOWN(reason);
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+/* -- IOCTLs. --------------------------------------------------------------- */
|
|
+
|
|
+static int sdtx_ioctl_get_base_info(struct sdtx_device *ddev,
|
|
+ struct sdtx_base_info __user *buf)
|
|
+{
|
|
+ struct ssam_bas_base_info raw;
|
|
+ struct sdtx_base_info info;
|
|
+ int status;
|
|
+
|
|
+ lockdep_assert_held_read(&ddev->lock);
|
|
+
|
|
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &raw);
|
|
+ if (status < 0)
|
|
+ return status;
|
|
+
|
|
+ info.state = sdtx_translate_base_state(ddev, raw.state);
|
|
+ info.base_id = SDTX_BASE_TYPE_SSH(raw.base_id);
|
|
+
|
|
+ if (copy_to_user(buf, &info, sizeof(info)))
|
|
+ return -EFAULT;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int sdtx_ioctl_get_device_mode(struct sdtx_device *ddev, u16 __user *buf)
|
|
+{
|
|
+ u8 mode;
|
|
+ int status;
|
|
+
|
|
+ lockdep_assert_held_read(&ddev->lock);
|
|
+
|
|
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
|
|
+ if (status < 0)
|
|
+ return status;
|
|
+
|
|
+ return put_user(mode, buf);
|
|
+}
|
|
+
|
|
+static int sdtx_ioctl_get_latch_status(struct sdtx_device *ddev, u16 __user *buf)
|
|
+{
|
|
+ u8 latch;
|
|
+ int status;
|
|
+
|
|
+ lockdep_assert_held_read(&ddev->lock);
|
|
+
|
|
+ status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
|
|
+ if (status < 0)
|
|
+ return status;
|
|
+
|
|
+ return put_user(sdtx_translate_latch_status(ddev, latch), buf);
|
|
+}
|
|
+
|
|
+static long __surface_dtx_ioctl(struct sdtx_client *client, unsigned int cmd, unsigned long arg)
|
|
+{
|
|
+ struct sdtx_device *ddev = client->ddev;
|
|
+
|
|
+ lockdep_assert_held_read(&ddev->lock);
|
|
+
|
|
+ switch (cmd) {
|
|
+ case SDTX_IOCTL_EVENTS_ENABLE:
|
|
+ set_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
|
|
+ return 0;
|
|
+
|
|
+ case SDTX_IOCTL_EVENTS_DISABLE:
|
|
+ clear_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
|
|
+ return 0;
|
|
+
|
|
+ case SDTX_IOCTL_LATCH_LOCK:
|
|
+ return ssam_retry(ssam_bas_latch_lock, ddev->ctrl);
|
|
+
|
|
+ case SDTX_IOCTL_LATCH_UNLOCK:
|
|
+ return ssam_retry(ssam_bas_latch_unlock, ddev->ctrl);
|
|
+
|
|
+ case SDTX_IOCTL_LATCH_REQUEST:
|
|
+ return ssam_retry(ssam_bas_latch_request, ddev->ctrl);
|
|
+
|
|
+ case SDTX_IOCTL_LATCH_CONFIRM:
|
|
+ return ssam_retry(ssam_bas_latch_confirm, ddev->ctrl);
|
|
+
|
|
+ case SDTX_IOCTL_LATCH_HEARTBEAT:
|
|
+ return ssam_retry(ssam_bas_latch_heartbeat, ddev->ctrl);
|
|
+
|
|
+ case SDTX_IOCTL_LATCH_CANCEL:
|
|
+ return ssam_retry(ssam_bas_latch_cancel, ddev->ctrl);
|
|
+
|
|
+ case SDTX_IOCTL_GET_BASE_INFO:
|
|
+ return sdtx_ioctl_get_base_info(ddev, (struct sdtx_base_info __user *)arg);
|
|
+
|
|
+ case SDTX_IOCTL_GET_DEVICE_MODE:
|
|
+ return sdtx_ioctl_get_device_mode(ddev, (u16 __user *)arg);
|
|
+
|
|
+ case SDTX_IOCTL_GET_LATCH_STATUS:
|
|
+ return sdtx_ioctl_get_latch_status(ddev, (u16 __user *)arg);
|
|
+
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+}
|
|
+
|
|
+static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
+{
|
|
+ struct sdtx_client *client = file->private_data;
|
|
+ long status;
|
|
+
|
|
+ if (down_read_killable(&client->ddev->lock))
|
|
+ return -ERESTARTSYS;
|
|
+
|
|
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
|
|
+ up_read(&client->ddev->lock);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ status = __surface_dtx_ioctl(client, cmd, arg);
|
|
+
|
|
+ up_read(&client->ddev->lock);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- File operations. ------------------------------------------------------ */
|
|
+
|
|
+static int surface_dtx_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ struct sdtx_device *ddev = container_of(file->private_data, struct sdtx_device, mdev);
|
|
+ struct sdtx_client *client;
|
|
+
|
|
+ /* Initialize client. */
|
|
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
|
|
+ if (!client)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ client->ddev = sdtx_device_get(ddev);
|
|
+
|
|
+ INIT_LIST_HEAD(&client->node);
|
|
+
|
|
+ mutex_init(&client->read_lock);
|
|
+ INIT_KFIFO(client->buffer);
|
|
+
|
|
+ file->private_data = client;
|
|
+
|
|
+ /* Attach client. */
|
|
+ down_write(&ddev->client_lock);
|
|
+
|
|
+ /*
|
|
+ * Do not add a new client if the device has been shut down. Note that
|
|
+ * it's enough to hold the client_lock here as, during shutdown, we
|
|
+ * only acquire that lock and remove clients after marking the device
|
|
+ * as shut down.
|
|
+ */
|
|
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
|
|
+ up_write(&ddev->client_lock);
|
|
+ sdtx_device_put(client->ddev);
|
|
+ kfree(client);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ list_add_tail(&client->node, &ddev->client_list);
|
|
+ up_write(&ddev->client_lock);
|
|
+
|
|
+ stream_open(inode, file);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int surface_dtx_release(struct inode *inode, struct file *file)
|
|
+{
|
|
+ struct sdtx_client *client = file->private_data;
|
|
+
|
|
+ /* Detach client. */
|
|
+ down_write(&client->ddev->client_lock);
|
|
+ list_del(&client->node);
|
|
+ up_write(&client->ddev->client_lock);
|
|
+
|
|
+ /* Free client. */
|
|
+ sdtx_device_put(client->ddev);
|
|
+ mutex_destroy(&client->read_lock);
|
|
+ kfree(client);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
|
|
+{
|
|
+ struct sdtx_client *client = file->private_data;
|
|
+ struct sdtx_device *ddev = client->ddev;
|
|
+ unsigned int copied;
|
|
+ int status = 0;
|
|
+
|
|
+ if (down_read_killable(&ddev->lock))
|
|
+ return -ERESTARTSYS;
|
|
+
|
|
+ /* Make sure we're not shut down. */
|
|
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
|
|
+ up_read(&ddev->lock);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ do {
|
|
+ /* Check availability, wait if necessary. */
|
|
+ if (kfifo_is_empty(&client->buffer)) {
|
|
+ up_read(&ddev->lock);
|
|
+
|
|
+ if (file->f_flags & O_NONBLOCK)
|
|
+ return -EAGAIN;
|
|
+
|
|
+ status = wait_event_interruptible(ddev->waitq,
|
|
+ !kfifo_is_empty(&client->buffer) ||
|
|
+ test_bit(SDTX_DEVICE_SHUTDOWN_BIT,
|
|
+ &ddev->flags));
|
|
+ if (status < 0)
|
|
+ return status;
|
|
+
|
|
+ if (down_read_killable(&client->ddev->lock))
|
|
+ return -ERESTARTSYS;
|
|
+
|
|
+ /* Need to check that we're not shut down again. */
|
|
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
|
|
+ up_read(&ddev->lock);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Try to read from FIFO. */
|
|
+ if (mutex_lock_interruptible(&client->read_lock)) {
|
|
+ up_read(&ddev->lock);
|
|
+ return -ERESTARTSYS;
|
|
+ }
|
|
+
|
|
+ status = kfifo_to_user(&client->buffer, buf, count, &copied);
|
|
+ mutex_unlock(&client->read_lock);
|
|
+
|
|
+ if (status < 0) {
|
|
+ up_read(&ddev->lock);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ /* We might not have gotten anything, check this here. */
|
|
+ if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
|
|
+ up_read(&ddev->lock);
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+ } while (copied == 0);
|
|
+
|
|
+ up_read(&ddev->lock);
|
|
+ return copied;
|
|
+}
|
|
+
|
|
+static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
|
|
+{
|
|
+ struct sdtx_client *client = file->private_data;
|
|
+ __poll_t events = 0;
|
|
+
|
|
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags))
|
|
+ return EPOLLHUP | EPOLLERR;
|
|
+
|
|
+ poll_wait(file, &client->ddev->waitq, pt);
|
|
+
|
|
+ if (!kfifo_is_empty(&client->buffer))
|
|
+ events |= EPOLLIN | EPOLLRDNORM;
|
|
+
|
|
+ return events;
|
|
+}
|
|
+
|
|
+static int surface_dtx_fasync(int fd, struct file *file, int on)
|
|
+{
|
|
+ struct sdtx_client *client = file->private_data;
|
|
+
|
|
+ return fasync_helper(fd, file, on, &client->fasync);
|
|
+}
|
|
+
|
|
+static const struct file_operations surface_dtx_fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = surface_dtx_open,
|
|
+ .release = surface_dtx_release,
|
|
+ .read = surface_dtx_read,
|
|
+ .poll = surface_dtx_poll,
|
|
+ .fasync = surface_dtx_fasync,
|
|
+ .unlocked_ioctl = surface_dtx_ioctl,
|
|
+ .compat_ioctl = surface_dtx_ioctl,
|
|
+ .llseek = no_llseek,
|
|
+};
|
|
+
|
|
+
|
|
+/* -- Event handling/forwarding. -------------------------------------------- */
|
|
+
|
|
+/*
|
|
+ * The device operation mode is not immediately updated on the EC when the
|
|
+ * base has been connected, i.e. querying the device mode inside the
|
|
+ * connection event callback yields an outdated value. Thus, we can only
|
|
+ * determine the new tablet-mode switch and device mode values after some
|
|
+ * time.
|
|
+ *
|
|
+ * These delays have been chosen by experimenting. We first delay on connect
|
|
+ * events, then check and validate the device mode against the base state and
|
|
+ * if invalid delay again by the "recheck" delay.
|
|
+ */
|
|
+#define SDTX_DEVICE_MODE_DELAY_CONNECT msecs_to_jiffies(100)
|
|
+#define SDTX_DEVICE_MODE_DELAY_RECHECK msecs_to_jiffies(100)
|
|
+
|
|
+struct sdtx_status_event {
|
|
+ struct sdtx_event e;
|
|
+ __u16 v;
|
|
+} __packed;
|
|
+
|
|
+struct sdtx_base_info_event {
|
|
+ struct sdtx_event e;
|
|
+ struct sdtx_base_info v;
|
|
+} __packed;
|
|
+
|
|
+union sdtx_generic_event {
|
|
+ struct sdtx_event common;
|
|
+ struct sdtx_status_event status;
|
|
+ struct sdtx_base_info_event base;
|
|
+};
|
|
+
|
|
+static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay);
|
|
+
|
|
+/* Must be executed with ddev->write_lock held. */
|
|
+static void sdtx_push_event(struct sdtx_device *ddev, struct sdtx_event *evt)
|
|
+{
|
|
+ const size_t len = sizeof(struct sdtx_event) + evt->length;
|
|
+ struct sdtx_client *client;
|
|
+
|
|
+ lockdep_assert_held(&ddev->write_lock);
|
|
+
|
|
+ down_read(&ddev->client_lock);
|
|
+ list_for_each_entry(client, &ddev->client_list, node) {
|
|
+ if (!test_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags))
|
|
+ continue;
|
|
+
|
|
+ if (likely(kfifo_avail(&client->buffer) >= len))
|
|
+ kfifo_in(&client->buffer, (const u8 *)evt, len);
|
|
+ else
|
|
+ dev_warn(ddev->dev, "event buffer overrun\n");
|
|
+
|
|
+ kill_fasync(&client->fasync, SIGIO, POLL_IN);
|
|
+ }
|
|
+ up_read(&ddev->client_lock);
|
|
+
|
|
+ wake_up_interruptible(&ddev->waitq);
|
|
+}
|
|
+
|
|
+static u32 sdtx_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
|
|
+{
|
|
+ struct sdtx_device *ddev = container_of(nf, struct sdtx_device, notif);
|
|
+ union sdtx_generic_event event;
|
|
+ size_t len;
|
|
+
|
|
+ /* Validate event payload length. */
|
|
+ switch (in->command_id) {
|
|
+ case SAM_EVENT_CID_DTX_CONNECTION:
|
|
+ len = 2 * sizeof(u8);
|
|
+ break;
|
|
+
|
|
+ case SAM_EVENT_CID_DTX_REQUEST:
|
|
+ len = 0;
|
|
+ break;
|
|
+
|
|
+ case SAM_EVENT_CID_DTX_CANCEL:
|
|
+ len = sizeof(u8);
|
|
+ break;
|
|
+
|
|
+ case SAM_EVENT_CID_DTX_LATCH_STATUS:
|
|
+ len = sizeof(u8);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ if (in->length != len) {
|
|
+ dev_err(ddev->dev,
|
|
+ "unexpected payload size for event %#04x: got %u, expected %zu\n",
|
|
+ in->command_id, in->length, len);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ mutex_lock(&ddev->write_lock);
|
|
+
|
|
+ /* Translate event. */
|
|
+ switch (in->command_id) {
|
|
+ case SAM_EVENT_CID_DTX_CONNECTION:
|
|
+ clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
|
|
+
|
|
+ /* If state has not changed: do not send new event. */
|
|
+ if (ddev->state.base.state == in->data[0] &&
|
|
+ ddev->state.base.base_id == in->data[1])
|
|
+ goto out;
|
|
+
|
|
+ ddev->state.base.state = in->data[0];
|
|
+ ddev->state.base.base_id = in->data[1];
|
|
+
|
|
+ event.base.e.length = sizeof(struct sdtx_base_info);
|
|
+ event.base.e.code = SDTX_EVENT_BASE_CONNECTION;
|
|
+ event.base.v.state = sdtx_translate_base_state(ddev, in->data[0]);
|
|
+ event.base.v.base_id = SDTX_BASE_TYPE_SSH(in->data[1]);
|
|
+ break;
|
|
+
|
|
+ case SAM_EVENT_CID_DTX_REQUEST:
|
|
+ event.common.code = SDTX_EVENT_REQUEST;
|
|
+ event.common.length = 0;
|
|
+ break;
|
|
+
|
|
+ case SAM_EVENT_CID_DTX_CANCEL:
|
|
+ event.status.e.length = sizeof(u16);
|
|
+ event.status.e.code = SDTX_EVENT_CANCEL;
|
|
+ event.status.v = sdtx_translate_cancel_reason(ddev, in->data[0]);
|
|
+ break;
|
|
+
|
|
+ case SAM_EVENT_CID_DTX_LATCH_STATUS:
|
|
+ clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
|
|
+
|
|
+ /* If state has not changed: do not send new event. */
|
|
+ if (ddev->state.latch_status == in->data[0])
|
|
+ goto out;
|
|
+
|
|
+ ddev->state.latch_status = in->data[0];
|
|
+
|
|
+ event.status.e.length = sizeof(u16);
|
|
+ event.status.e.code = SDTX_EVENT_LATCH_STATUS;
|
|
+ event.status.v = sdtx_translate_latch_status(ddev, in->data[0]);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ sdtx_push_event(ddev, &event.common);
|
|
+
|
|
+ /* Update device mode on base connection change. */
|
|
+ if (in->command_id == SAM_EVENT_CID_DTX_CONNECTION) {
|
|
+ unsigned long delay;
|
|
+
|
|
+ delay = in->data[0] ? SDTX_DEVICE_MODE_DELAY_CONNECT : 0;
|
|
+ sdtx_update_device_mode(ddev, delay);
|
|
+ }
|
|
+
|
|
+out:
|
|
+ mutex_unlock(&ddev->write_lock);
|
|
+ return SSAM_NOTIF_HANDLED;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- State update functions. ----------------------------------------------- */
|
|
+
|
|
+static bool sdtx_device_mode_invalid(u8 mode, u8 base_state)
|
|
+{
|
|
+ return ((base_state == SSAM_BAS_BASE_STATE_ATTACHED) &&
|
|
+ (mode == SDTX_DEVICE_MODE_TABLET)) ||
|
|
+ ((base_state == SSAM_BAS_BASE_STATE_DETACH_SUCCESS) &&
|
|
+ (mode != SDTX_DEVICE_MODE_TABLET));
|
|
+}
|
|
+
|
|
+static void sdtx_device_mode_workfn(struct work_struct *work)
|
|
+{
|
|
+ struct sdtx_device *ddev = container_of(work, struct sdtx_device, mode_work.work);
|
|
+ struct sdtx_status_event event;
|
|
+ struct ssam_bas_base_info base;
|
|
+ int status, tablet;
|
|
+ u8 mode;
|
|
+
|
|
+ /* Get operation mode. */
|
|
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
|
|
+ if (status) {
|
|
+ dev_err(ddev->dev, "failed to get device mode: %d\n", status);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Get base info. */
|
|
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
|
|
+ if (status) {
|
|
+ dev_err(ddev->dev, "failed to get base info: %d\n", status);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * In some cases (specifically when attaching the base), the device
|
|
+ * mode isn't updated right away. Thus we check if the device mode
|
|
+ * makes sense for the given base state and try again later if it
|
|
+ * doesn't.
|
|
+ */
|
|
+ if (sdtx_device_mode_invalid(mode, base.state)) {
|
|
+ dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
|
|
+ sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ mutex_lock(&ddev->write_lock);
|
|
+ clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
|
|
+
|
|
+ /* Avoid sending duplicate device-mode events. */
|
|
+ if (ddev->state.device_mode == mode) {
|
|
+ mutex_unlock(&ddev->write_lock);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ ddev->state.device_mode = mode;
|
|
+
|
|
+ event.e.length = sizeof(u16);
|
|
+ event.e.code = SDTX_EVENT_DEVICE_MODE;
|
|
+ event.v = mode;
|
|
+
|
|
+ sdtx_push_event(ddev, &event.e);
|
|
+
|
|
+ /* Send SW_TABLET_MODE event. */
|
|
+ tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
|
|
+ input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
|
|
+ input_sync(ddev->mode_switch);
|
|
+
|
|
+ mutex_unlock(&ddev->write_lock);
|
|
+}
|
|
+
|
|
+static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay)
|
|
+{
|
|
+ schedule_delayed_work(&ddev->mode_work, delay);
|
|
+}
|
|
+
|
|
+/* Must be executed with ddev->write_lock held. */
|
|
+static void __sdtx_device_state_update_base(struct sdtx_device *ddev,
|
|
+ struct ssam_bas_base_info info)
|
|
+{
|
|
+ struct sdtx_base_info_event event;
|
|
+
|
|
+ lockdep_assert_held(&ddev->write_lock);
|
|
+
|
|
+ /* Prevent duplicate events. */
|
|
+ if (ddev->state.base.state == info.state &&
|
|
+ ddev->state.base.base_id == info.base_id)
|
|
+ return;
|
|
+
|
|
+ ddev->state.base = info;
|
|
+
|
|
+ event.e.length = sizeof(struct sdtx_base_info);
|
|
+ event.e.code = SDTX_EVENT_BASE_CONNECTION;
|
|
+ event.v.state = sdtx_translate_base_state(ddev, info.state);
|
|
+ event.v.base_id = SDTX_BASE_TYPE_SSH(info.base_id);
|
|
+
|
|
+ sdtx_push_event(ddev, &event.e);
|
|
+}
|
|
+
|
|
+/* Must be executed with ddev->write_lock held. */
|
|
+static void __sdtx_device_state_update_mode(struct sdtx_device *ddev, u8 mode)
|
|
+{
|
|
+ struct sdtx_status_event event;
|
|
+ int tablet;
|
|
+
|
|
+ /*
|
|
+ * Note: This function must be called after updating the base state
|
|
+ * via __sdtx_device_state_update_base(), as we rely on the updated
|
|
+ * base state value in the validity check below.
|
|
+ */
|
|
+
|
|
+ lockdep_assert_held(&ddev->write_lock);
|
|
+
|
|
+ if (sdtx_device_mode_invalid(mode, ddev->state.base.state)) {
|
|
+ dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
|
|
+ sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Prevent duplicate events. */
|
|
+ if (ddev->state.device_mode == mode)
|
|
+ return;
|
|
+
|
|
+ ddev->state.device_mode = mode;
|
|
+
|
|
+ /* Send event. */
|
|
+ event.e.length = sizeof(u16);
|
|
+ event.e.code = SDTX_EVENT_DEVICE_MODE;
|
|
+ event.v = mode;
|
|
+
|
|
+ sdtx_push_event(ddev, &event.e);
|
|
+
|
|
+ /* Send SW_TABLET_MODE event. */
|
|
+ tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
|
|
+ input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
|
|
+ input_sync(ddev->mode_switch);
|
|
+}
|
|
+
|
|
+/* Must be executed with ddev->write_lock held. */
|
|
+static void __sdtx_device_state_update_latch(struct sdtx_device *ddev, u8 status)
|
|
+{
|
|
+ struct sdtx_status_event event;
|
|
+
|
|
+ lockdep_assert_held(&ddev->write_lock);
|
|
+
|
|
+ /* Prevent duplicate events. */
|
|
+ if (ddev->state.latch_status == status)
|
|
+ return;
|
|
+
|
|
+ ddev->state.latch_status = status;
|
|
+
|
|
+ event.e.length = sizeof(struct sdtx_base_info);
|
|
+ event.e.code = SDTX_EVENT_BASE_CONNECTION;
|
|
+ event.v = sdtx_translate_latch_status(ddev, status);
|
|
+
|
|
+ sdtx_push_event(ddev, &event.e);
|
|
+}
|
|
+
|
|
+static void sdtx_device_state_workfn(struct work_struct *work)
|
|
+{
|
|
+ struct sdtx_device *ddev = container_of(work, struct sdtx_device, state_work.work);
|
|
+ struct ssam_bas_base_info base;
|
|
+ u8 mode, latch;
|
|
+ int status;
|
|
+
|
|
+ /* Mark everything as dirty. */
|
|
+ set_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
|
|
+ set_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
|
|
+ set_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
|
|
+
|
|
+ /*
|
|
+ * Ensure that the state gets marked as dirty before continuing to
|
|
+ * query it. Necessary to ensure that clear_bit() calls in
|
|
+ * sdtx_notifier() and sdtx_device_mode_workfn() actually clear these
|
|
+ * bits if an event is received while updating the state here.
|
|
+ */
|
|
+ smp_mb__after_atomic();
|
|
+
|
|
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
|
|
+ if (status) {
|
|
+ dev_err(ddev->dev, "failed to get base state: %d\n", status);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
|
|
+ if (status) {
|
|
+ dev_err(ddev->dev, "failed to get device mode: %d\n", status);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
|
|
+ if (status) {
|
|
+ dev_err(ddev->dev, "failed to get latch status: %d\n", status);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ mutex_lock(&ddev->write_lock);
|
|
+
|
|
+ /*
|
|
+ * If the respective dirty-bit has been cleared, an event has been
|
|
+ * received, updating this state. The queried state may thus be out of
|
|
+ * date. At this point, we can safely assume that the state provided
|
|
+ * by the event is either up to date, or we're about to receive
|
|
+ * another event updating it.
|
|
+ */
|
|
+
|
|
+ if (test_and_clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags))
|
|
+ __sdtx_device_state_update_base(ddev, base);
|
|
+
|
|
+ if (test_and_clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags))
|
|
+ __sdtx_device_state_update_mode(ddev, mode);
|
|
+
|
|
+ if (test_and_clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags))
|
|
+ __sdtx_device_state_update_latch(ddev, latch);
|
|
+
|
|
+ mutex_unlock(&ddev->write_lock);
|
|
+}
|
|
+
|
|
+static void sdtx_update_device_state(struct sdtx_device *ddev, unsigned long delay)
|
|
+{
|
|
+ schedule_delayed_work(&ddev->state_work, delay);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Common device initialization. ----------------------------------------- */
|
|
+
|
|
+static int sdtx_device_init(struct sdtx_device *ddev, struct device *dev,
|
|
+ struct ssam_controller *ctrl)
|
|
+{
|
|
+ int status, tablet_mode;
|
|
+
|
|
+ /* Basic initialization. */
|
|
+ kref_init(&ddev->kref);
|
|
+ init_rwsem(&ddev->lock);
|
|
+ ddev->dev = dev;
|
|
+ ddev->ctrl = ctrl;
|
|
+
|
|
+ ddev->mdev.minor = MISC_DYNAMIC_MINOR;
|
|
+ ddev->mdev.name = "surface_dtx";
|
|
+ ddev->mdev.nodename = "surface/dtx";
|
|
+ ddev->mdev.fops = &surface_dtx_fops;
|
|
+
|
|
+ ddev->notif.base.priority = 1;
|
|
+ ddev->notif.base.fn = sdtx_notifier;
|
|
+ ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
|
|
+ ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
|
|
+ ddev->notif.event.id.instance = 0;
|
|
+ ddev->notif.event.mask = SSAM_EVENT_MASK_NONE;
|
|
+ ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
|
|
+
|
|
+ init_waitqueue_head(&ddev->waitq);
|
|
+ mutex_init(&ddev->write_lock);
|
|
+ init_rwsem(&ddev->client_lock);
|
|
+ INIT_LIST_HEAD(&ddev->client_list);
|
|
+
|
|
+ INIT_DELAYED_WORK(&ddev->mode_work, sdtx_device_mode_workfn);
|
|
+ INIT_DELAYED_WORK(&ddev->state_work, sdtx_device_state_workfn);
|
|
+
|
|
+ /*
|
|
+ * Get current device state. We want to guarantee that events are only
|
|
+ * sent when state actually changes. Thus we cannot use special
|
|
+ * "uninitialized" values, as that would cause problems when manually
|
|
+ * querying the state in surface_dtx_pm_complete(). I.e. we would not
|
|
+ * be able to detect state changes there if no change event has been
|
|
+ * received between driver initialization and first device suspension.
|
|
+ *
|
|
+ * Note that we also need to do this before registering the event
|
|
+ * notifier, as that may access the state values.
|
|
+ */
|
|
+ status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &ddev->state.base);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &ddev->state.device_mode);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &ddev->state.latch_status);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ /* Set up tablet mode switch. */
|
|
+ ddev->mode_switch = input_allocate_device();
|
|
+ if (!ddev->mode_switch)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ ddev->mode_switch->name = "Microsoft Surface DTX Device Mode Switch";
|
|
+ ddev->mode_switch->phys = "ssam/01:11:01:00:00/input0";
|
|
+ ddev->mode_switch->id.bustype = BUS_HOST;
|
|
+ ddev->mode_switch->dev.parent = ddev->dev;
|
|
+
|
|
+ tablet_mode = (ddev->state.device_mode != SDTX_DEVICE_MODE_LAPTOP);
|
|
+ input_set_capability(ddev->mode_switch, EV_SW, SW_TABLET_MODE);
|
|
+ input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet_mode);
|
|
+
|
|
+ status = input_register_device(ddev->mode_switch);
|
|
+ if (status) {
|
|
+ input_free_device(ddev->mode_switch);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ /* Set up event notifier. */
|
|
+ status = ssam_notifier_register(ddev->ctrl, &ddev->notif);
|
|
+ if (status)
|
|
+ goto err_notif;
|
|
+
|
|
+ /* Register miscdevice. */
|
|
+ status = misc_register(&ddev->mdev);
|
|
+ if (status)
|
|
+ goto err_mdev;
|
|
+
|
|
+ /*
|
|
+ * Update device state in case it has changed between getting the
|
|
+ * initial mode and registering the event notifier.
|
|
+ */
|
|
+ sdtx_update_device_state(ddev, 0);
|
|
+ return 0;
|
|
+
|
|
+err_notif:
|
|
+ ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
|
|
+ cancel_delayed_work_sync(&ddev->mode_work);
|
|
+err_mdev:
|
|
+ input_unregister_device(ddev->mode_switch);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static struct sdtx_device *sdtx_device_create(struct device *dev, struct ssam_controller *ctrl)
|
|
+{
|
|
+ struct sdtx_device *ddev;
|
|
+ int status;
|
|
+
|
|
+ ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
|
|
+ if (!ddev)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ status = sdtx_device_init(ddev, dev, ctrl);
|
|
+ if (status) {
|
|
+ sdtx_device_put(ddev);
|
|
+ return ERR_PTR(status);
|
|
+ }
|
|
+
|
|
+ return ddev;
|
|
+}
|
|
+
|
|
+static void sdtx_device_destroy(struct sdtx_device *ddev)
|
|
+{
|
|
+ struct sdtx_client *client;
|
|
+
|
|
+ /*
|
|
+ * Mark device as shut-down. Prevent new clients from being added and
|
|
+ * new operations from being executed.
|
|
+ */
|
|
+ set_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags);
|
|
+
|
|
+ /* Disable notifiers, prevent new events from arriving. */
|
|
+ ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
|
|
+
|
|
+ /* Stop mode_work, prevent access to mode_switch. */
|
|
+ cancel_delayed_work_sync(&ddev->mode_work);
|
|
+
|
|
+ /* Stop state_work. */
|
|
+ cancel_delayed_work_sync(&ddev->state_work);
|
|
+
|
|
+ /* With mode_work canceled, we can unregister the mode_switch. */
|
|
+ input_unregister_device(ddev->mode_switch);
|
|
+
|
|
+ /* Wake up async clients. */
|
|
+ down_write(&ddev->client_lock);
|
|
+ list_for_each_entry(client, &ddev->client_list, node) {
|
|
+ kill_fasync(&client->fasync, SIGIO, POLL_HUP);
|
|
+ }
|
|
+ up_write(&ddev->client_lock);
|
|
+
|
|
+ /* Wake up blocking clients. */
|
|
+ wake_up_interruptible(&ddev->waitq);
|
|
+
|
|
+ /*
|
|
+ * Wait for clients to finish their current operation. After this, the
|
|
+ * controller and device references are guaranteed to be no longer in
|
|
+ * use.
|
|
+ */
|
|
+ down_write(&ddev->lock);
|
|
+ ddev->dev = NULL;
|
|
+ ddev->ctrl = NULL;
|
|
+ up_write(&ddev->lock);
|
|
+
|
|
+ /* Finally remove the misc-device. */
|
|
+ misc_deregister(&ddev->mdev);
|
|
+
|
|
+ /*
|
|
+ * We're now guaranteed that sdtx_device_open() won't be called any
|
|
+ * more, so we can now drop out reference.
|
|
+ */
|
|
+ sdtx_device_put(ddev);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- PM ops. --------------------------------------------------------------- */
|
|
+
|
|
+#ifdef CONFIG_PM_SLEEP
|
|
+
|
|
+static void surface_dtx_pm_complete(struct device *dev)
|
|
+{
|
|
+ struct sdtx_device *ddev = dev_get_drvdata(dev);
|
|
+
|
|
+ /*
|
|
+ * Normally, the EC will store events while suspended (i.e. in
|
|
+ * display-off state) and release them when resumed (i.e. transitioned
|
|
+ * to display-on state). During hibernation, however, the EC will be
|
|
+ * shut down and does not store events. Furthermore, events might be
|
|
+ * dropped during prolonged suspension (it is currently unknown how
|
|
+ * big this event buffer is and how it behaves on overruns).
|
|
+ *
|
|
+ * To prevent any problems, we update the device state here. We do
|
|
+ * this delayed to ensure that any events sent by the EC directly
|
|
+ * after resuming will be handled first. The delay below has been
|
|
+ * chosen (experimentally), so that there should be ample time for
|
|
+ * these events to be handled, before we check and, if necessary,
|
|
+ * update the state.
|
|
+ */
|
|
+ sdtx_update_device_state(ddev, msecs_to_jiffies(1000));
|
|
+}
|
|
+
|
|
+static const struct dev_pm_ops surface_dtx_pm_ops = {
|
|
+ .complete = surface_dtx_pm_complete,
|
|
+};
|
|
+
|
|
+#else /* CONFIG_PM_SLEEP */
|
|
+
|
|
+static const struct dev_pm_ops surface_dtx_pm_ops = {};
|
|
+
|
|
+#endif /* CONFIG_PM_SLEEP */
|
|
+
|
|
+
|
|
+/* -- Platform driver. ------------------------------------------------------ */
|
|
+
|
|
+static int surface_dtx_platform_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct ssam_controller *ctrl;
|
|
+ struct sdtx_device *ddev;
|
|
+
|
|
+ /* Link to EC. */
|
|
+ ctrl = ssam_client_bind(&pdev->dev);
|
|
+ if (IS_ERR(ctrl))
|
|
+ return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
|
|
+
|
|
+ ddev = sdtx_device_create(&pdev->dev, ctrl);
|
|
+ if (IS_ERR(ddev))
|
|
+ return PTR_ERR(ddev);
|
|
+
|
|
+ platform_set_drvdata(pdev, ddev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int surface_dtx_platform_remove(struct platform_device *pdev)
|
|
+{
|
|
+ sdtx_device_destroy(platform_get_drvdata(pdev));
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct acpi_device_id surface_dtx_acpi_match[] = {
|
|
+ { "MSHW0133", 0 },
|
|
+ { },
|
|
+};
|
|
+MODULE_DEVICE_TABLE(acpi, surface_dtx_acpi_match);
|
|
+
|
|
+static struct platform_driver surface_dtx_platform_driver = {
|
|
+ .probe = surface_dtx_platform_probe,
|
|
+ .remove = surface_dtx_platform_remove,
|
|
+ .driver = {
|
|
+ .name = "surface_dtx_pltf",
|
|
+ .acpi_match_table = surface_dtx_acpi_match,
|
|
+ .pm = &surface_dtx_pm_ops,
|
|
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
|
+ },
|
|
+};
|
|
+
|
|
+
|
|
+/* -- SSAM device driver. --------------------------------------------------- */
|
|
+
|
|
+#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
|
|
+
|
|
+static int surface_dtx_ssam_probe(struct ssam_device *sdev)
|
|
+{
|
|
+ struct sdtx_device *ddev;
|
|
+
|
|
+ ddev = sdtx_device_create(&sdev->dev, sdev->ctrl);
|
|
+ if (IS_ERR(ddev))
|
|
+ return PTR_ERR(ddev);
|
|
+
|
|
+ ssam_device_set_drvdata(sdev, ddev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void surface_dtx_ssam_remove(struct ssam_device *sdev)
|
|
+{
|
|
+ sdtx_device_destroy(ssam_device_get_drvdata(sdev));
|
|
+}
|
|
+
|
|
+static const struct ssam_device_id surface_dtx_ssam_match[] = {
|
|
+ { SSAM_SDEV(BAS, 0x01, 0x00, 0x00) },
|
|
+ { },
|
|
+};
|
|
+MODULE_DEVICE_TABLE(ssam, surface_dtx_ssam_match);
|
|
+
|
|
+static struct ssam_device_driver surface_dtx_ssam_driver = {
|
|
+ .probe = surface_dtx_ssam_probe,
|
|
+ .remove = surface_dtx_ssam_remove,
|
|
+ .match_table = surface_dtx_ssam_match,
|
|
+ .driver = {
|
|
+ .name = "surface_dtx",
|
|
+ .pm = &surface_dtx_pm_ops,
|
|
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
|
+ },
|
|
+};
|
|
+
|
|
+static int ssam_dtx_driver_register(void)
|
|
+{
|
|
+ return ssam_device_driver_register(&surface_dtx_ssam_driver);
|
|
+}
|
|
+
|
|
+static void ssam_dtx_driver_unregister(void)
|
|
+{
|
|
+ ssam_device_driver_unregister(&surface_dtx_ssam_driver);
|
|
+}
|
|
+
|
|
+#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
|
|
+
|
|
+static int ssam_dtx_driver_register(void)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void ssam_dtx_driver_unregister(void)
|
|
+{
|
|
+}
|
|
+
|
|
+#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
|
|
+
|
|
+
|
|
+/* -- Module setup. --------------------------------------------------------- */
|
|
+
|
|
+static int __init surface_dtx_init(void)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = ssam_dtx_driver_register();
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ status = platform_driver_register(&surface_dtx_platform_driver);
|
|
+ if (status)
|
|
+ ssam_dtx_driver_unregister();
|
|
+
|
|
+ return status;
|
|
+}
|
|
+module_init(surface_dtx_init);
|
|
+
|
|
+static void __exit surface_dtx_exit(void)
|
|
+{
|
|
+ platform_driver_unregister(&surface_dtx_platform_driver);
|
|
+ ssam_dtx_driver_unregister();
|
|
+}
|
|
+module_exit(surface_dtx_exit);
|
|
+
|
|
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
|
|
+MODULE_DESCRIPTION("Detachment-system driver for Surface System Aggregator Module");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/platform/x86/surface_perfmode.c b/drivers/platform/x86/surface_perfmode.c
|
|
new file mode 100644
|
|
index 000000000000..3b92a43f8606
|
|
--- /dev/null
|
|
+++ b/drivers/platform/x86/surface_perfmode.c
|
|
@@ -0,0 +1,122 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Surface performance-mode driver.
|
|
+ *
|
|
+ * Provides a user-space interface for the performance mode control provided
|
|
+ * by the Surface System Aggregator Module (SSAM), influencing cooling
|
|
+ * behavior of the device and potentially managing power limits.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <asm/unaligned.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/sysfs.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+#include <linux/surface_aggregator/device.h>
|
|
+
|
|
+enum sam_perf_mode {
|
|
+ SAM_PERF_MODE_NORMAL = 1,
|
|
+ SAM_PERF_MODE_BATTERY = 2,
|
|
+ SAM_PERF_MODE_PERF1 = 3,
|
|
+ SAM_PERF_MODE_PERF2 = 4,
|
|
+
|
|
+ __SAM_PERF_MODE__MIN = 1,
|
|
+ __SAM_PERF_MODE__MAX = 4,
|
|
+};
|
|
+
|
|
+struct ssam_perf_info {
|
|
+ __le32 mode;
|
|
+ __le16 unknown1;
|
|
+ __le16 unknown2;
|
|
+} __packed;
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_tmp_perf_mode_get, struct ssam_perf_info, {
|
|
+ .target_category = SSAM_SSH_TC_TMP,
|
|
+ .command_id = 0x02,
|
|
+});
|
|
+
|
|
+static SSAM_DEFINE_SYNC_REQUEST_CL_W(__ssam_tmp_perf_mode_set, __le32, {
|
|
+ .target_category = SSAM_SSH_TC_TMP,
|
|
+ .command_id = 0x03,
|
|
+});
|
|
+
|
|
+static int ssam_tmp_perf_mode_set(struct ssam_device *sdev, u32 mode)
|
|
+{
|
|
+ __le32 mode_le = cpu_to_le32(mode);
|
|
+
|
|
+ if (mode < __SAM_PERF_MODE__MIN || mode > __SAM_PERF_MODE__MAX)
|
|
+ return -EINVAL;
|
|
+
|
|
+ return ssam_retry(__ssam_tmp_perf_mode_set, sdev, &mode_le);
|
|
+}
|
|
+
|
|
+static ssize_t perf_mode_show(struct device *dev, struct device_attribute *attr,
|
|
+ char *data)
|
|
+{
|
|
+ struct ssam_device *sdev = to_ssam_device(dev);
|
|
+ struct ssam_perf_info info;
|
|
+ int status;
|
|
+
|
|
+ status = ssam_retry(ssam_tmp_perf_mode_get, sdev, &info);
|
|
+ if (status) {
|
|
+ dev_err(dev, "failed to get current performance mode: %d\n",
|
|
+ status);
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ return sprintf(data, "%d\n", le32_to_cpu(info.mode));
|
|
+}
|
|
+
|
|
+static ssize_t perf_mode_store(struct device *dev, struct device_attribute *attr,
|
|
+ const char *data, size_t count)
|
|
+{
|
|
+ struct ssam_device *sdev = to_ssam_device(dev);
|
|
+ int perf_mode;
|
|
+ int status;
|
|
+
|
|
+ status = kstrtoint(data, 0, &perf_mode);
|
|
+ if (status < 0)
|
|
+ return status;
|
|
+
|
|
+ status = ssam_tmp_perf_mode_set(sdev, perf_mode);
|
|
+ if (status < 0)
|
|
+ return status;
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static const DEVICE_ATTR_RW(perf_mode);
|
|
+
|
|
+static int surface_sam_sid_perfmode_probe(struct ssam_device *sdev)
|
|
+{
|
|
+ return sysfs_create_file(&sdev->dev.kobj, &dev_attr_perf_mode.attr);
|
|
+}
|
|
+
|
|
+static void surface_sam_sid_perfmode_remove(struct ssam_device *sdev)
|
|
+{
|
|
+ sysfs_remove_file(&sdev->dev.kobj, &dev_attr_perf_mode.attr);
|
|
+}
|
|
+
|
|
+static const struct ssam_device_id ssam_perfmode_match[] = {
|
|
+ { SSAM_SDEV(TMP, 0x01, 0x00, 0x01) },
|
|
+ { },
|
|
+};
|
|
+MODULE_DEVICE_TABLE(ssam, ssam_perfmode_match);
|
|
+
|
|
+static struct ssam_device_driver surface_sam_sid_perfmode = {
|
|
+ .probe = surface_sam_sid_perfmode_probe,
|
|
+ .remove = surface_sam_sid_perfmode_remove,
|
|
+ .match_table = ssam_perfmode_match,
|
|
+ .driver = {
|
|
+ .name = "surface_performance_mode",
|
|
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
|
+ },
|
|
+};
|
|
+module_ssam_device_driver(surface_sam_sid_perfmode);
|
|
+
|
|
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
|
|
+MODULE_DESCRIPTION("Performance mode interface for Surface System Aggregator Module");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
|
|
index 1d656aa2c6d6..6876d5b4d6ac 100644
|
|
--- a/drivers/power/supply/Kconfig
|
|
+++ b/drivers/power/supply/Kconfig
|
|
@@ -646,4 +646,36 @@ config CHARGER_CROS_USBPD
|
|
what is connected to USB PD ports from the EC and converts
|
|
that into power_supply properties.
|
|
|
|
+config BATTERY_SURFACE
|
|
+ tristate "Battery driver for 7th-generation Microsoft Surface devices"
|
|
+ depends on SURFACE_AGGREGATOR_REGISTRY
|
|
+ help
|
|
+ Driver for battery devices connected via/managed by the Surface System
|
|
+ Aggregator Module (SSAM).
|
|
+
|
|
+ This driver provides battery-information and -status support for
|
|
+ Surface devices where said data is not exposed via the standard ACPI
|
|
+ devices. On those models (7th-generation), battery-information is
|
|
+ instead handled directly via SSAM client devices and this driver.
|
|
+
|
|
+ Say M or Y here to include battery status support for 7th-generation
|
|
+ Microsoft Surface devices, i.e. Surface Pro 7, Surface Laptop 3,
|
|
+ Surface Book 3, and Surface Laptop Go.
|
|
+
|
|
+config CHARGER_SURFACE
|
|
+ tristate "AC driver for 7th-generation Microsoft Surface devices"
|
|
+ depends on SURFACE_AGGREGATOR_REGISTRY
|
|
+ help
|
|
+ Driver for AC devices connected via/managed by the Surface System
|
|
+ Aggregator Module (SSAM).
|
|
+
|
|
+ This driver provides AC-information and -status support for Surface
|
|
+ devices where said data is not exposed via the standard ACPI devices.
|
|
+ On those models (7th-generation), AC-information is instead handled
|
|
+ directly via a SSAM client device and this driver.
|
|
+
|
|
+ Say M or Y here to include AC status support for 7th-generation
|
|
+ Microsoft Surface devices, i.e. Surface Pro 7, Surface Laptop 3,
|
|
+ Surface Book 3, and Surface Laptop Go.
|
|
+
|
|
endif # POWER_SUPPLY
|
|
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
|
|
index a26b402c45d9..c8dd853ee382 100644
|
|
--- a/drivers/power/supply/Makefile
|
|
+++ b/drivers/power/supply/Makefile
|
|
@@ -85,3 +85,5 @@ obj-$(CONFIG_CHARGER_TPS65217) += tps65217_charger.o
|
|
obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
|
|
obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o
|
|
obj-$(CONFIG_CHARGER_CROS_USBPD) += cros_usbpd-charger.o
|
|
+obj-$(CONFIG_BATTERY_SURFACE) += surface_battery.o
|
|
+obj-$(CONFIG_CHARGER_SURFACE) += surface_charger.o
|
|
diff --git a/drivers/power/supply/surface_battery.c b/drivers/power/supply/surface_battery.c
|
|
new file mode 100644
|
|
index 000000000000..1e48b2363f23
|
|
--- /dev/null
|
|
+++ b/drivers/power/supply/surface_battery.c
|
|
@@ -0,0 +1,816 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * Battery driver for 7th-generation Microsoft Surface devices via Surface
|
|
+ * System Aggregator Module (SSAM).
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <asm/unaligned.h>
|
|
+#include <linux/jiffies.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/power_supply.h>
|
|
+#include <linux/sysfs.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/workqueue.h>
|
|
+
|
|
+#include <linux/surface_aggregator/device.h>
|
|
+
|
|
+
|
|
+/* -- SAM interface. -------------------------------------------------------- */
|
|
+
|
|
+enum sam_event_cid_bat {
|
|
+ SAM_EVENT_CID_BAT_BIX = 0x15,
|
|
+ SAM_EVENT_CID_BAT_BST = 0x16,
|
|
+ SAM_EVENT_CID_BAT_ADP = 0x17,
|
|
+ SAM_EVENT_CID_BAT_PROT = 0x18,
|
|
+ SAM_EVENT_CID_BAT_DPTF = 0x53,
|
|
+};
|
|
+
|
|
+enum sam_battery_sta {
|
|
+ SAM_BATTERY_STA_OK = 0x0f,
|
|
+ SAM_BATTERY_STA_PRESENT = 0x10,
|
|
+};
|
|
+
|
|
+enum sam_battery_state {
|
|
+ SAM_BATTERY_STATE_DISCHARGING = BIT(0),
|
|
+ SAM_BATTERY_STATE_CHARGING = BIT(1),
|
|
+ SAM_BATTERY_STATE_CRITICAL = BIT(2),
|
|
+};
|
|
+
|
|
+enum sam_battery_power_unit {
|
|
+ SAM_BATTERY_POWER_UNIT_mW = 0,
|
|
+ SAM_BATTERY_POWER_UNIT_mA = 1,
|
|
+};
|
|
+
|
|
+/* Equivalent to data returned in ACPI _BIX method, revision 0. */
|
|
+struct spwr_bix {
|
|
+ u8 revision;
|
|
+ __le32 power_unit;
|
|
+ __le32 design_cap;
|
|
+ __le32 last_full_charge_cap;
|
|
+ __le32 technology;
|
|
+ __le32 design_voltage;
|
|
+ __le32 design_cap_warn;
|
|
+ __le32 design_cap_low;
|
|
+ __le32 cycle_count;
|
|
+ __le32 measurement_accuracy;
|
|
+ __le32 max_sampling_time;
|
|
+ __le32 min_sampling_time;
|
|
+ __le32 max_avg_interval;
|
|
+ __le32 min_avg_interval;
|
|
+ __le32 bat_cap_granularity_1;
|
|
+ __le32 bat_cap_granularity_2;
|
|
+ __u8 model[21];
|
|
+ __u8 serial[11];
|
|
+ __u8 type[5];
|
|
+ __u8 oem_info[21];
|
|
+} __packed;
|
|
+
|
|
+/* Equivalent to data returned in ACPI _BST method. */
|
|
+struct spwr_bst {
|
|
+ __le32 state;
|
|
+ __le32 present_rate;
|
|
+ __le32 remaining_cap;
|
|
+ __le32 present_voltage;
|
|
+} __packed;
|
|
+
|
|
+#define SPWR_BIX_REVISION 0
|
|
+#define SPWR_BATTERY_VALUE_UNKNOWN 0xffffffff
|
|
+
|
|
+/* Get battery status (_STA) */
|
|
+static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_sta, __le32, {
|
|
+ .target_category = SSAM_SSH_TC_BAT,
|
|
+ .command_id = 0x01,
|
|
+});
|
|
+
|
|
+/* Get battery static information (_BIX). */
|
|
+static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_bix, struct spwr_bix, {
|
|
+ .target_category = SSAM_SSH_TC_BAT,
|
|
+ .command_id = 0x02,
|
|
+});
|
|
+
|
|
+/* Get battery dynamic information (_BST). */
|
|
+static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_bst, struct spwr_bst, {
|
|
+ .target_category = SSAM_SSH_TC_BAT,
|
|
+ .command_id = 0x03,
|
|
+});
|
|
+
|
|
+/* Set battery trip point (_BTP). */
|
|
+static SSAM_DEFINE_SYNC_REQUEST_CL_W(ssam_bat_set_btp, __le32, {
|
|
+ .target_category = SSAM_SSH_TC_BAT,
|
|
+ .command_id = 0x04,
|
|
+});
|
|
+
|
|
+
|
|
+/* -- Device structures. ---------------------------------------------------- */
|
|
+
|
|
+struct spwr_psy_properties {
|
|
+ const char *name;
|
|
+ struct ssam_event_registry registry;
|
|
+};
|
|
+
|
|
+struct spwr_battery_device {
|
|
+ struct ssam_device *sdev;
|
|
+
|
|
+ char name[32];
|
|
+ struct power_supply *psy;
|
|
+ struct power_supply_desc psy_desc;
|
|
+
|
|
+ struct delayed_work update_work;
|
|
+
|
|
+ struct ssam_event_notifier notif;
|
|
+
|
|
+ struct mutex lock; /* Guards access to state data below. */
|
|
+ unsigned long timestamp;
|
|
+
|
|
+ __le32 sta;
|
|
+ struct spwr_bix bix;
|
|
+ struct spwr_bst bst;
|
|
+ u32 alarm;
|
|
+};
|
|
+
|
|
+
|
|
+/* -- Module parameters. ---------------------------------------------------- */
|
|
+
|
|
+static unsigned int cache_time = 1000;
|
|
+module_param(cache_time, uint, 0644);
|
|
+MODULE_PARM_DESC(cache_time, "battery state caching time in milliseconds [default: 1000]");
|
|
+
|
|
+
|
|
+/* -- State management. ----------------------------------------------------- */
|
|
+
|
|
+/*
|
|
+ * Delay for battery update quirk. See spwr_external_power_changed() below
|
|
+ * for more details.
|
|
+ */
|
|
+#define SPWR_AC_BAT_UPDATE_DELAY msecs_to_jiffies(5000)
|
|
+
|
|
+static bool spwr_battery_present(struct spwr_battery_device *bat)
|
|
+{
|
|
+ lockdep_assert_held(&bat->lock);
|
|
+
|
|
+ return le32_to_cpu(bat->sta) & SAM_BATTERY_STA_PRESENT;
|
|
+}
|
|
+
|
|
+static int spwr_battery_load_sta(struct spwr_battery_device *bat)
|
|
+{
|
|
+ lockdep_assert_held(&bat->lock);
|
|
+
|
|
+ return ssam_retry(ssam_bat_get_sta, bat->sdev, &bat->sta);
|
|
+}
|
|
+
|
|
+static int spwr_battery_load_bix(struct spwr_battery_device *bat)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ lockdep_assert_held(&bat->lock);
|
|
+
|
|
+ if (!spwr_battery_present(bat))
|
|
+ return 0;
|
|
+
|
|
+ status = ssam_retry(ssam_bat_get_bix, bat->sdev, &bat->bix);
|
|
+
|
|
+ /* Enforce NULL terminated strings in case anything goes wrong... */
|
|
+ bat->bix.model[ARRAY_SIZE(bat->bix.model) - 1] = 0;
|
|
+ bat->bix.serial[ARRAY_SIZE(bat->bix.serial) - 1] = 0;
|
|
+ bat->bix.type[ARRAY_SIZE(bat->bix.type) - 1] = 0;
|
|
+ bat->bix.oem_info[ARRAY_SIZE(bat->bix.oem_info) - 1] = 0;
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static int spwr_battery_load_bst(struct spwr_battery_device *bat)
|
|
+{
|
|
+ lockdep_assert_held(&bat->lock);
|
|
+
|
|
+ if (!spwr_battery_present(bat))
|
|
+ return 0;
|
|
+
|
|
+ return ssam_retry(ssam_bat_get_bst, bat->sdev, &bat->bst);
|
|
+}
|
|
+
|
|
+static int spwr_battery_set_alarm_unlocked(struct spwr_battery_device *bat, u32 value)
|
|
+{
|
|
+ __le32 value_le = cpu_to_le32(value);
|
|
+
|
|
+ lockdep_assert_held(&bat->lock);
|
|
+
|
|
+ bat->alarm = value;
|
|
+ return ssam_retry(ssam_bat_set_btp, bat->sdev, &value_le);
|
|
+}
|
|
+
|
|
+static int spwr_battery_update_bst_unlocked(struct spwr_battery_device *bat, bool cached)
|
|
+{
|
|
+ unsigned long cache_deadline = bat->timestamp + msecs_to_jiffies(cache_time);
|
|
+ int status;
|
|
+
|
|
+ lockdep_assert_held(&bat->lock);
|
|
+
|
|
+ if (cached && bat->timestamp && time_is_after_jiffies(cache_deadline))
|
|
+ return 0;
|
|
+
|
|
+ status = spwr_battery_load_sta(bat);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ status = spwr_battery_load_bst(bat);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ bat->timestamp = jiffies;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int spwr_battery_update_bst(struct spwr_battery_device *bat, bool cached)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ mutex_lock(&bat->lock);
|
|
+ status = spwr_battery_update_bst_unlocked(bat, cached);
|
|
+ mutex_unlock(&bat->lock);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static int spwr_battery_update_bix_unlocked(struct spwr_battery_device *bat)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ lockdep_assert_held(&bat->lock);
|
|
+
|
|
+ status = spwr_battery_load_sta(bat);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ status = spwr_battery_load_bix(bat);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ status = spwr_battery_load_bst(bat);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ if (bat->bix.revision != SPWR_BIX_REVISION)
|
|
+ dev_warn(&bat->sdev->dev, "unsupported battery revision: %u\n", bat->bix.revision);
|
|
+
|
|
+ bat->timestamp = jiffies;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static u32 sprw_battery_get_full_cap_safe(struct spwr_battery_device *bat)
|
|
+{
|
|
+ u32 full_cap = get_unaligned_le32(&bat->bix.last_full_charge_cap);
|
|
+
|
|
+ lockdep_assert_held(&bat->lock);
|
|
+
|
|
+ if (full_cap == 0 || full_cap == SPWR_BATTERY_VALUE_UNKNOWN)
|
|
+ full_cap = get_unaligned_le32(&bat->bix.design_cap);
|
|
+
|
|
+ return full_cap;
|
|
+}
|
|
+
|
|
+static bool spwr_battery_is_full(struct spwr_battery_device *bat)
|
|
+{
|
|
+ u32 state = get_unaligned_le32(&bat->bst.state);
|
|
+ u32 full_cap = sprw_battery_get_full_cap_safe(bat);
|
|
+ u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
|
|
+
|
|
+ lockdep_assert_held(&bat->lock);
|
|
+
|
|
+ return full_cap != SPWR_BATTERY_VALUE_UNKNOWN && full_cap != 0 &&
|
|
+ remaining_cap != SPWR_BATTERY_VALUE_UNKNOWN &&
|
|
+ remaining_cap >= full_cap &&
|
|
+ state == 0;
|
|
+}
|
|
+
|
|
+static int spwr_battery_recheck_full(struct spwr_battery_device *bat)
|
|
+{
|
|
+ bool present;
|
|
+ u32 unit;
|
|
+ int status;
|
|
+
|
|
+ mutex_lock(&bat->lock);
|
|
+ unit = get_unaligned_le32(&bat->bix.power_unit);
|
|
+ present = spwr_battery_present(bat);
|
|
+
|
|
+ status = spwr_battery_update_bix_unlocked(bat);
|
|
+ if (status)
|
|
+ goto out;
|
|
+
|
|
+ /* If battery has been attached, (re-)initialize alarm. */
|
|
+ if (!present && spwr_battery_present(bat)) {
|
|
+ u32 cap_warn = get_unaligned_le32(&bat->bix.design_cap_warn);
|
|
+
|
|
+ status = spwr_battery_set_alarm_unlocked(bat, cap_warn);
|
|
+ if (status)
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Warn if the unit has changed. This is something we genuinely don't
|
|
+ * expect to happen, so make this a big warning. If it does, we'll
|
|
+ * need to add support for it.
|
|
+ */
|
|
+ WARN_ON(unit != get_unaligned_le32(&bat->bix.power_unit));
|
|
+
|
|
+out:
|
|
+ mutex_unlock(&bat->lock);
|
|
+
|
|
+ if (!status)
|
|
+ power_supply_changed(bat->psy);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static int spwr_battery_recheck_status(struct spwr_battery_device *bat)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = spwr_battery_update_bst(bat, false);
|
|
+ if (!status)
|
|
+ power_supply_changed(bat->psy);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static u32 spwr_notify_bat(struct ssam_event_notifier *nf, const struct ssam_event *event)
|
|
+{
|
|
+ struct spwr_battery_device *bat = container_of(nf, struct spwr_battery_device, notif);
|
|
+ int status;
|
|
+
|
|
+ /*
|
|
+ * We cannot use strict matching when registering the notifier as the
|
|
+ * EC expects us to register it against instance ID 0. Strict matching
|
|
+ * would thus drop events, as those may have non-zero instance IDs in
|
|
+ * this subsystem. So we need to check the instance ID of the event
|
|
+ * here manually.
|
|
+ */
|
|
+ if (event->instance_id != bat->sdev->uid.instance)
|
|
+ return 0;
|
|
+
|
|
+ dev_dbg(&bat->sdev->dev, "power event (cid = %#04x, iid = %#04x, tid = %#04x)\n",
|
|
+ event->command_id, event->instance_id, event->target_id);
|
|
+
|
|
+ switch (event->command_id) {
|
|
+ case SAM_EVENT_CID_BAT_BIX:
|
|
+ status = spwr_battery_recheck_full(bat);
|
|
+ break;
|
|
+
|
|
+ case SAM_EVENT_CID_BAT_BST:
|
|
+ status = spwr_battery_recheck_status(bat);
|
|
+ break;
|
|
+
|
|
+ case SAM_EVENT_CID_BAT_PROT:
|
|
+ /*
|
|
+ * TODO: Implement support for battery protection status change
|
|
+ * event.
|
|
+ */
|
|
+ status = 0;
|
|
+ break;
|
|
+
|
|
+ case SAM_EVENT_CID_BAT_DPTF:
|
|
+ /*
|
|
+ * TODO: Implement support for DPTF event.
|
|
+ */
|
|
+ status = 0;
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
|
|
+}
|
|
+
|
|
+static void spwr_battery_update_bst_workfn(struct work_struct *work)
|
|
+{
|
|
+ struct delayed_work *dwork = to_delayed_work(work);
|
|
+ struct spwr_battery_device *bat;
|
|
+ int status;
|
|
+
|
|
+ bat = container_of(dwork, struct spwr_battery_device, update_work);
|
|
+
|
|
+ status = spwr_battery_update_bst(bat, false);
|
|
+ if (status) {
|
|
+ dev_err(&bat->sdev->dev, "failed to update battery state: %d\n", status);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ power_supply_changed(bat->psy);
|
|
+}
|
|
+
|
|
+static void spwr_external_power_changed(struct power_supply *psy)
|
|
+{
|
|
+ struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
|
|
+
|
|
+ /*
|
|
+ * Handle battery update quirk: When the battery is fully charged (or
|
|
+ * charged up to the limit imposed by the UEFI battery limit) and the
|
|
+ * adapter is plugged in or removed, the EC does not send a separate
|
|
+ * event for the state (charging/discharging) change. Furthermore it
|
|
+ * may take some time until the state is updated on the battery.
|
|
+ * Schedule an update to solve this.
|
|
+ */
|
|
+
|
|
+ schedule_delayed_work(&bat->update_work, SPWR_AC_BAT_UPDATE_DELAY);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Properties. ----------------------------------------------------------- */
|
|
+
|
|
+static enum power_supply_property spwr_battery_props_chg[] = {
|
|
+ POWER_SUPPLY_PROP_STATUS,
|
|
+ POWER_SUPPLY_PROP_PRESENT,
|
|
+ POWER_SUPPLY_PROP_TECHNOLOGY,
|
|
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
|
|
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
|
|
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
|
|
+ POWER_SUPPLY_PROP_CURRENT_NOW,
|
|
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
|
|
+ POWER_SUPPLY_PROP_CHARGE_FULL,
|
|
+ POWER_SUPPLY_PROP_CHARGE_NOW,
|
|
+ POWER_SUPPLY_PROP_CAPACITY,
|
|
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
|
|
+ POWER_SUPPLY_PROP_MODEL_NAME,
|
|
+ POWER_SUPPLY_PROP_MANUFACTURER,
|
|
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
|
|
+};
|
|
+
|
|
+static enum power_supply_property spwr_battery_props_eng[] = {
|
|
+ POWER_SUPPLY_PROP_STATUS,
|
|
+ POWER_SUPPLY_PROP_PRESENT,
|
|
+ POWER_SUPPLY_PROP_TECHNOLOGY,
|
|
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
|
|
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
|
|
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
|
|
+ POWER_SUPPLY_PROP_POWER_NOW,
|
|
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
|
|
+ POWER_SUPPLY_PROP_ENERGY_FULL,
|
|
+ POWER_SUPPLY_PROP_ENERGY_NOW,
|
|
+ POWER_SUPPLY_PROP_CAPACITY,
|
|
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
|
|
+ POWER_SUPPLY_PROP_MODEL_NAME,
|
|
+ POWER_SUPPLY_PROP_MANUFACTURER,
|
|
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
|
|
+};
|
|
+
|
|
+static int spwr_battery_prop_status(struct spwr_battery_device *bat)
|
|
+{
|
|
+ u32 state = get_unaligned_le32(&bat->bst.state);
|
|
+ u32 present_rate = get_unaligned_le32(&bat->bst.present_rate);
|
|
+
|
|
+ lockdep_assert_held(&bat->lock);
|
|
+
|
|
+ if (state & SAM_BATTERY_STATE_DISCHARGING)
|
|
+ return POWER_SUPPLY_STATUS_DISCHARGING;
|
|
+
|
|
+ if (state & SAM_BATTERY_STATE_CHARGING)
|
|
+ return POWER_SUPPLY_STATUS_CHARGING;
|
|
+
|
|
+ if (spwr_battery_is_full(bat))
|
|
+ return POWER_SUPPLY_STATUS_FULL;
|
|
+
|
|
+ if (present_rate == 0)
|
|
+ return POWER_SUPPLY_STATUS_NOT_CHARGING;
|
|
+
|
|
+ return POWER_SUPPLY_STATUS_UNKNOWN;
|
|
+}
|
|
+
|
|
+static int spwr_battery_prop_technology(struct spwr_battery_device *bat)
|
|
+{
|
|
+ lockdep_assert_held(&bat->lock);
|
|
+
|
|
+ if (!strcasecmp("NiCd", bat->bix.type))
|
|
+ return POWER_SUPPLY_TECHNOLOGY_NiCd;
|
|
+
|
|
+ if (!strcasecmp("NiMH", bat->bix.type))
|
|
+ return POWER_SUPPLY_TECHNOLOGY_NiMH;
|
|
+
|
|
+ if (!strcasecmp("LION", bat->bix.type))
|
|
+ return POWER_SUPPLY_TECHNOLOGY_LION;
|
|
+
|
|
+ if (!strncasecmp("LI-ION", bat->bix.type, 6))
|
|
+ return POWER_SUPPLY_TECHNOLOGY_LION;
|
|
+
|
|
+ if (!strcasecmp("LiP", bat->bix.type))
|
|
+ return POWER_SUPPLY_TECHNOLOGY_LIPO;
|
|
+
|
|
+ return POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
|
|
+}
|
|
+
|
|
+static int spwr_battery_prop_capacity(struct spwr_battery_device *bat)
|
|
+{
|
|
+ u32 full_cap = sprw_battery_get_full_cap_safe(bat);
|
|
+ u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
|
|
+
|
|
+ lockdep_assert_held(&bat->lock);
|
|
+
|
|
+ if (full_cap == 0 || full_cap == SPWR_BATTERY_VALUE_UNKNOWN)
|
|
+ return -ENODATA;
|
|
+
|
|
+ if (remaining_cap == SPWR_BATTERY_VALUE_UNKNOWN)
|
|
+ return -ENODATA;
|
|
+
|
|
+ return remaining_cap * 100 / full_cap;
|
|
+}
|
|
+
|
|
+static int spwr_battery_prop_capacity_level(struct spwr_battery_device *bat)
|
|
+{
|
|
+ u32 state = get_unaligned_le32(&bat->bst.state);
|
|
+ u32 remaining_cap = get_unaligned_le32(&bat->bst.remaining_cap);
|
|
+
|
|
+ lockdep_assert_held(&bat->lock);
|
|
+
|
|
+ if (state & SAM_BATTERY_STATE_CRITICAL)
|
|
+ return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
|
|
+
|
|
+ if (spwr_battery_is_full(bat))
|
|
+ return POWER_SUPPLY_CAPACITY_LEVEL_FULL;
|
|
+
|
|
+ if (remaining_cap <= bat->alarm)
|
|
+ return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
|
|
+
|
|
+ return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
|
|
+}
|
|
+
|
|
+static int spwr_battery_get_property(struct power_supply *psy, enum power_supply_property psp,
|
|
+ union power_supply_propval *val)
|
|
+{
|
|
+ struct spwr_battery_device *bat = power_supply_get_drvdata(psy);
|
|
+ u32 value;
|
|
+ int status;
|
|
+
|
|
+ mutex_lock(&bat->lock);
|
|
+
|
|
+ status = spwr_battery_update_bst_unlocked(bat, true);
|
|
+ if (status)
|
|
+ goto out;
|
|
+
|
|
+ /* Abort if battery is not present. */
|
|
+ if (!spwr_battery_present(bat) && psp != POWER_SUPPLY_PROP_PRESENT) {
|
|
+ status = -ENODEV;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ switch (psp) {
|
|
+ case POWER_SUPPLY_PROP_STATUS:
|
|
+ val->intval = spwr_battery_prop_status(bat);
|
|
+ break;
|
|
+
|
|
+ case POWER_SUPPLY_PROP_PRESENT:
|
|
+ val->intval = spwr_battery_present(bat);
|
|
+ break;
|
|
+
|
|
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
|
|
+ val->intval = spwr_battery_prop_technology(bat);
|
|
+ break;
|
|
+
|
|
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
|
|
+ value = get_unaligned_le32(&bat->bix.cycle_count);
|
|
+ if (value != SPWR_BATTERY_VALUE_UNKNOWN)
|
|
+ val->intval = value;
|
|
+ else
|
|
+ status = -ENODATA;
|
|
+ break;
|
|
+
|
|
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
|
|
+ value = get_unaligned_le32(&bat->bix.design_voltage);
|
|
+ if (value != SPWR_BATTERY_VALUE_UNKNOWN)
|
|
+ val->intval = value * 1000;
|
|
+ else
|
|
+ status = -ENODATA;
|
|
+ break;
|
|
+
|
|
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
|
|
+ value = get_unaligned_le32(&bat->bst.present_voltage);
|
|
+ if (value != SPWR_BATTERY_VALUE_UNKNOWN)
|
|
+ val->intval = value * 1000;
|
|
+ else
|
|
+ status = -ENODATA;
|
|
+ break;
|
|
+
|
|
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
|
|
+ case POWER_SUPPLY_PROP_POWER_NOW:
|
|
+ value = get_unaligned_le32(&bat->bst.present_rate);
|
|
+ if (value != SPWR_BATTERY_VALUE_UNKNOWN)
|
|
+ val->intval = value * 1000;
|
|
+ else
|
|
+ status = -ENODATA;
|
|
+ break;
|
|
+
|
|
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
|
|
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
|
|
+ value = get_unaligned_le32(&bat->bix.design_cap);
|
|
+ if (value != SPWR_BATTERY_VALUE_UNKNOWN)
|
|
+ val->intval = value * 1000;
|
|
+ else
|
|
+ status = -ENODATA;
|
|
+ break;
|
|
+
|
|
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
|
|
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
|
|
+ value = get_unaligned_le32(&bat->bix.last_full_charge_cap);
|
|
+ if (value != SPWR_BATTERY_VALUE_UNKNOWN)
|
|
+ val->intval = value * 1000;
|
|
+ else
|
|
+ status = -ENODATA;
|
|
+ break;
|
|
+
|
|
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
|
|
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
|
|
+ value = get_unaligned_le32(&bat->bst.remaining_cap);
|
|
+ if (value != SPWR_BATTERY_VALUE_UNKNOWN)
|
|
+ val->intval = value * 1000;
|
|
+ else
|
|
+ status = -ENODATA;
|
|
+ break;
|
|
+
|
|
+ case POWER_SUPPLY_PROP_CAPACITY:
|
|
+ val->intval = spwr_battery_prop_capacity(bat);
|
|
+ break;
|
|
+
|
|
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
|
|
+ val->intval = spwr_battery_prop_capacity_level(bat);
|
|
+ break;
|
|
+
|
|
+ case POWER_SUPPLY_PROP_MODEL_NAME:
|
|
+ val->strval = bat->bix.model;
|
|
+ break;
|
|
+
|
|
+ case POWER_SUPPLY_PROP_MANUFACTURER:
|
|
+ val->strval = bat->bix.oem_info;
|
|
+ break;
|
|
+
|
|
+ case POWER_SUPPLY_PROP_SERIAL_NUMBER:
|
|
+ val->strval = bat->bix.serial;
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ status = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+out:
|
|
+ mutex_unlock(&bat->lock);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Device setup. --------------------------------------------------------- */
|
|
+
|
|
+static void spwr_battery_init(struct spwr_battery_device *bat, struct ssam_device *sdev,
|
|
+ struct ssam_event_registry registry, const char *name)
|
|
+{
|
|
+ mutex_init(&bat->lock);
|
|
+ strncpy(bat->name, name, ARRAY_SIZE(bat->name) - 1);
|
|
+
|
|
+ bat->sdev = sdev;
|
|
+
|
|
+ bat->notif.base.priority = 1;
|
|
+ bat->notif.base.fn = spwr_notify_bat;
|
|
+ bat->notif.event.reg = registry;
|
|
+ bat->notif.event.id.target_category = sdev->uid.category;
|
|
+ bat->notif.event.id.instance = 0; /* need to register with instance 0 */
|
|
+ bat->notif.event.mask = SSAM_EVENT_MASK_TARGET;
|
|
+ bat->notif.event.flags = SSAM_EVENT_SEQUENCED;
|
|
+
|
|
+ bat->psy_desc.name = bat->name;
|
|
+ bat->psy_desc.type = POWER_SUPPLY_TYPE_BATTERY;
|
|
+ bat->psy_desc.get_property = spwr_battery_get_property;
|
|
+
|
|
+ INIT_DELAYED_WORK(&bat->update_work, spwr_battery_update_bst_workfn);
|
|
+}
|
|
+
|
|
+static int spwr_battery_register(struct spwr_battery_device *bat)
|
|
+{
|
|
+ struct power_supply_config psy_cfg = {};
|
|
+ __le32 sta;
|
|
+ int status;
|
|
+
|
|
+ /* Make sure the device is there and functioning properly. */
|
|
+ status = ssam_retry(ssam_bat_get_sta, bat->sdev, &sta);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ if ((le32_to_cpu(sta) & SAM_BATTERY_STA_OK) != SAM_BATTERY_STA_OK)
|
|
+ return -ENODEV;
|
|
+
|
|
+ /* Satisfy lockdep although we are in an exclusive context here. */
|
|
+ mutex_lock(&bat->lock);
|
|
+
|
|
+ status = spwr_battery_update_bix_unlocked(bat);
|
|
+ if (status) {
|
|
+ mutex_unlock(&bat->lock);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ if (spwr_battery_present(bat)) {
|
|
+ u32 cap_warn = get_unaligned_le32(&bat->bix.design_cap_warn);
|
|
+
|
|
+ status = spwr_battery_set_alarm_unlocked(bat, cap_warn);
|
|
+ if (status) {
|
|
+ mutex_unlock(&bat->lock);
|
|
+ return status;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&bat->lock);
|
|
+
|
|
+ bat->psy_desc.external_power_changed = spwr_external_power_changed;
|
|
+
|
|
+ switch (get_unaligned_le32(&bat->bix.power_unit)) {
|
|
+ case SAM_BATTERY_POWER_UNIT_mW:
|
|
+ bat->psy_desc.properties = spwr_battery_props_eng;
|
|
+ bat->psy_desc.num_properties = ARRAY_SIZE(spwr_battery_props_eng);
|
|
+ break;
|
|
+
|
|
+ case SAM_BATTERY_POWER_UNIT_mA:
|
|
+ bat->psy_desc.properties = spwr_battery_props_chg;
|
|
+ bat->psy_desc.num_properties = ARRAY_SIZE(spwr_battery_props_chg);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ dev_err(&bat->sdev->dev, "unsupported battery power unit: %u\n",
|
|
+ get_unaligned_le32(&bat->bix.power_unit));
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ psy_cfg.drv_data = bat;
|
|
+ bat->psy = devm_power_supply_register(&bat->sdev->dev, &bat->psy_desc, &psy_cfg);
|
|
+ if (IS_ERR(bat->psy))
|
|
+ return PTR_ERR(bat->psy);
|
|
+
|
|
+ return ssam_notifier_register(bat->sdev->ctrl, &bat->notif);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Driver setup. --------------------------------------------------------- */
|
|
+
|
|
+static int __maybe_unused surface_battery_resume(struct device *dev)
|
|
+{
|
|
+ return spwr_battery_recheck_full(dev_get_drvdata(dev));
|
|
+}
|
|
+static SIMPLE_DEV_PM_OPS(surface_battery_pm_ops, NULL, surface_battery_resume);
|
|
+
|
|
+static int surface_battery_probe(struct ssam_device *sdev)
|
|
+{
|
|
+ const struct spwr_psy_properties *p;
|
|
+ struct spwr_battery_device *bat;
|
|
+
|
|
+ p = ssam_device_get_match_data(sdev);
|
|
+ if (!p)
|
|
+ return -ENODEV;
|
|
+
|
|
+ bat = devm_kzalloc(&sdev->dev, sizeof(*bat), GFP_KERNEL);
|
|
+ if (!bat)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ spwr_battery_init(bat, sdev, p->registry, p->name);
|
|
+ ssam_device_set_drvdata(sdev, bat);
|
|
+
|
|
+ return spwr_battery_register(bat);
|
|
+}
|
|
+
|
|
+static void surface_battery_remove(struct ssam_device *sdev)
|
|
+{
|
|
+ struct spwr_battery_device *bat = ssam_device_get_drvdata(sdev);
|
|
+
|
|
+ ssam_notifier_unregister(sdev->ctrl, &bat->notif);
|
|
+ cancel_delayed_work_sync(&bat->update_work);
|
|
+}
|
|
+
|
|
+static const struct spwr_psy_properties spwr_psy_props_bat1 = {
|
|
+ .name = "BAT1",
|
|
+ .registry = SSAM_EVENT_REGISTRY_SAM,
|
|
+};
|
|
+
|
|
+static const struct spwr_psy_properties spwr_psy_props_bat2_sb3 = {
|
|
+ .name = "BAT2",
|
|
+ .registry = SSAM_EVENT_REGISTRY_KIP,
|
|
+};
|
|
+
|
|
+static const struct ssam_device_id surface_battery_match[] = {
|
|
+ { SSAM_SDEV(BAT, 0x01, 0x01, 0x00), (unsigned long)&spwr_psy_props_bat1 },
|
|
+ { SSAM_SDEV(BAT, 0x02, 0x01, 0x00), (unsigned long)&spwr_psy_props_bat2_sb3 },
|
|
+ { },
|
|
+};
|
|
+MODULE_DEVICE_TABLE(ssam, surface_battery_match);
|
|
+
|
|
+static struct ssam_device_driver surface_battery_driver = {
|
|
+ .probe = surface_battery_probe,
|
|
+ .remove = surface_battery_remove,
|
|
+ .match_table = surface_battery_match,
|
|
+ .driver = {
|
|
+ .name = "surface_battery",
|
|
+ .pm = &surface_battery_pm_ops,
|
|
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
|
+ },
|
|
+};
|
|
+module_ssam_device_driver(surface_battery_driver);
|
|
+
|
|
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
|
|
+MODULE_DESCRIPTION("Battery driver for Surface System Aggregator Module");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/drivers/power/supply/surface_charger.c b/drivers/power/supply/surface_charger.c
|
|
new file mode 100644
|
|
index 000000000000..1b759c2a31ce
|
|
--- /dev/null
|
|
+++ b/drivers/power/supply/surface_charger.c
|
|
@@ -0,0 +1,282 @@
|
|
+// SPDX-License-Identifier: GPL-2.0+
|
|
+/*
|
|
+ * AC driver for 7th-generation Microsoft Surface devices via Surface System
|
|
+ * Aggregator Module (SSAM).
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#include <asm/unaligned.h>
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/mutex.h>
|
|
+#include <linux/power_supply.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+#include <linux/surface_aggregator/device.h>
|
|
+
|
|
+
|
|
+/* -- SAM interface. -------------------------------------------------------- */
|
|
+
|
|
+enum sam_event_cid_bat {
|
|
+ SAM_EVENT_CID_BAT_ADP = 0x17,
|
|
+};
|
|
+
|
|
+enum sam_battery_sta {
|
|
+ SAM_BATTERY_STA_OK = 0x0f,
|
|
+ SAM_BATTERY_STA_PRESENT = 0x10,
|
|
+};
|
|
+
|
|
+/* Get battery status (_STA). */
|
|
+static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_sta, __le32, {
|
|
+ .target_category = SSAM_SSH_TC_BAT,
|
|
+ .command_id = 0x01,
|
|
+});
|
|
+
|
|
+/* Get platform power source for battery (_PSR / DPTF PSRC). */
|
|
+static SSAM_DEFINE_SYNC_REQUEST_CL_R(ssam_bat_get_psrc, __le32, {
|
|
+ .target_category = SSAM_SSH_TC_BAT,
|
|
+ .command_id = 0x0d,
|
|
+});
|
|
+
|
|
+
|
|
+/* -- Device structures. ---------------------------------------------------- */
|
|
+
|
|
+struct spwr_psy_properties {
|
|
+ const char *name;
|
|
+ struct ssam_event_registry registry;
|
|
+};
|
|
+
|
|
+struct spwr_ac_device {
|
|
+ struct ssam_device *sdev;
|
|
+
|
|
+ char name[32];
|
|
+ struct power_supply *psy;
|
|
+ struct power_supply_desc psy_desc;
|
|
+
|
|
+ struct ssam_event_notifier notif;
|
|
+
|
|
+ struct mutex lock; /* Guards access to state below. */
|
|
+
|
|
+ __le32 state;
|
|
+};
|
|
+
|
|
+
|
|
+/* -- State management. ----------------------------------------------------- */
|
|
+
|
|
+static int spwr_ac_update_unlocked(struct spwr_ac_device *ac)
|
|
+{
|
|
+ __le32 old = ac->state;
|
|
+ int status;
|
|
+
|
|
+ lockdep_assert_held(&ac->lock);
|
|
+
|
|
+ status = ssam_retry(ssam_bat_get_psrc, ac->sdev, &ac->state);
|
|
+ if (status < 0)
|
|
+ return status;
|
|
+
|
|
+ return old != ac->state;
|
|
+}
|
|
+
|
|
+static int spwr_ac_update(struct spwr_ac_device *ac)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ mutex_lock(&ac->lock);
|
|
+ status = spwr_ac_update_unlocked(ac);
|
|
+ mutex_unlock(&ac->lock);
|
|
+
|
|
+ return status;
|
|
+}
|
|
+
|
|
+static int spwr_ac_recheck(struct spwr_ac_device *ac)
|
|
+{
|
|
+ int status;
|
|
+
|
|
+ status = spwr_ac_update(ac);
|
|
+ if (status > 0)
|
|
+ power_supply_changed(ac->psy);
|
|
+
|
|
+ return status >= 0 ? 0 : status;
|
|
+}
|
|
+
|
|
+static u32 spwr_notify_ac(struct ssam_event_notifier *nf, const struct ssam_event *event)
|
|
+{
|
|
+ struct spwr_ac_device *ac;
|
|
+ int status;
|
|
+
|
|
+ ac = container_of(nf, struct spwr_ac_device, notif);
|
|
+
|
|
+ dev_dbg(&ac->sdev->dev, "power event (cid = %#04x, iid = %#04x, tid = %#04x)\n",
|
|
+ event->command_id, event->instance_id, event->target_id);
|
|
+
|
|
+ /*
|
|
+ * Allow events of all targets/instances here. Global adapter status
|
|
+ * seems to be handled via target=1 and instance=1, but events are
|
|
+ * reported on all targets/instances in use.
|
|
+ *
|
|
+ * While it should be enough to just listen on 1/1, listen everywhere to
|
|
+ * make sure we don't miss anything.
|
|
+ */
|
|
+
|
|
+ switch (event->command_id) {
|
|
+ case SAM_EVENT_CID_BAT_ADP:
|
|
+ status = spwr_ac_recheck(ac);
|
|
+ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED;
|
|
+
|
|
+ default:
|
|
+ return 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Properties. ----------------------------------------------------------- */
|
|
+
|
|
+static enum power_supply_property spwr_ac_props[] = {
|
|
+ POWER_SUPPLY_PROP_ONLINE,
|
|
+};
|
|
+
|
|
+static int spwr_ac_get_property(struct power_supply *psy, enum power_supply_property psp,
|
|
+ union power_supply_propval *val)
|
|
+{
|
|
+ struct spwr_ac_device *ac = power_supply_get_drvdata(psy);
|
|
+ int status;
|
|
+
|
|
+ mutex_lock(&ac->lock);
|
|
+
|
|
+ status = spwr_ac_update_unlocked(ac);
|
|
+ if (status)
|
|
+ goto out;
|
|
+
|
|
+ switch (psp) {
|
|
+ case POWER_SUPPLY_PROP_ONLINE:
|
|
+ val->intval = !!le32_to_cpu(ac->state);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ status = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+out:
|
|
+ mutex_unlock(&ac->lock);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Device setup. --------------------------------------------------------- */
|
|
+
|
|
+static char *battery_supplied_to[] = {
|
|
+ "BAT1",
|
|
+ "BAT2",
|
|
+};
|
|
+
|
|
+static void spwr_ac_init(struct spwr_ac_device *ac, struct ssam_device *sdev,
|
|
+ struct ssam_event_registry registry, const char *name)
|
|
+{
|
|
+ mutex_init(&ac->lock);
|
|
+ strncpy(ac->name, name, ARRAY_SIZE(ac->name) - 1);
|
|
+
|
|
+ ac->sdev = sdev;
|
|
+
|
|
+ ac->notif.base.priority = 1;
|
|
+ ac->notif.base.fn = spwr_notify_ac;
|
|
+ ac->notif.event.reg = registry;
|
|
+ ac->notif.event.id.target_category = sdev->uid.category;
|
|
+ ac->notif.event.id.instance = 0;
|
|
+ ac->notif.event.mask = SSAM_EVENT_MASK_NONE;
|
|
+ ac->notif.event.flags = SSAM_EVENT_SEQUENCED;
|
|
+
|
|
+ ac->psy_desc.name = ac->name;
|
|
+ ac->psy_desc.type = POWER_SUPPLY_TYPE_MAINS;
|
|
+ ac->psy_desc.properties = spwr_ac_props;
|
|
+ ac->psy_desc.num_properties = ARRAY_SIZE(spwr_ac_props);
|
|
+ ac->psy_desc.get_property = spwr_ac_get_property;
|
|
+}
|
|
+
|
|
+static int spwr_ac_register(struct spwr_ac_device *ac)
|
|
+{
|
|
+ struct power_supply_config psy_cfg = {};
|
|
+ __le32 sta;
|
|
+ int status;
|
|
+
|
|
+ /* Make sure the device is there and functioning properly. */
|
|
+ status = ssam_retry(ssam_bat_get_sta, ac->sdev, &sta);
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+ if ((le32_to_cpu(sta) & SAM_BATTERY_STA_OK) != SAM_BATTERY_STA_OK)
|
|
+ return -ENODEV;
|
|
+
|
|
+ psy_cfg.drv_data = ac;
|
|
+ psy_cfg.supplied_to = battery_supplied_to;
|
|
+ psy_cfg.num_supplicants = ARRAY_SIZE(battery_supplied_to);
|
|
+
|
|
+ ac->psy = devm_power_supply_register(&ac->sdev->dev, &ac->psy_desc, &psy_cfg);
|
|
+ if (IS_ERR(ac->psy))
|
|
+ return PTR_ERR(ac->psy);
|
|
+
|
|
+ return ssam_notifier_register(ac->sdev->ctrl, &ac->notif);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Driver setup. --------------------------------------------------------- */
|
|
+
|
|
+static int __maybe_unused surface_ac_resume(struct device *dev)
|
|
+{
|
|
+ return spwr_ac_recheck(dev_get_drvdata(dev));
|
|
+}
|
|
+static SIMPLE_DEV_PM_OPS(surface_ac_pm_ops, NULL, surface_ac_resume);
|
|
+
|
|
+static int surface_ac_probe(struct ssam_device *sdev)
|
|
+{
|
|
+ const struct spwr_psy_properties *p;
|
|
+ struct spwr_ac_device *ac;
|
|
+
|
|
+ p = ssam_device_get_match_data(sdev);
|
|
+ if (!p)
|
|
+ return -ENODEV;
|
|
+
|
|
+ ac = devm_kzalloc(&sdev->dev, sizeof(*ac), GFP_KERNEL);
|
|
+ if (!ac)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ spwr_ac_init(ac, sdev, p->registry, p->name);
|
|
+ ssam_device_set_drvdata(sdev, ac);
|
|
+
|
|
+ return spwr_ac_register(ac);
|
|
+}
|
|
+
|
|
+static void surface_ac_remove(struct ssam_device *sdev)
|
|
+{
|
|
+ struct spwr_ac_device *ac = ssam_device_get_drvdata(sdev);
|
|
+
|
|
+ ssam_notifier_unregister(sdev->ctrl, &ac->notif);
|
|
+}
|
|
+
|
|
+static const struct spwr_psy_properties spwr_psy_props_adp1 = {
|
|
+ .name = "ADP1",
|
|
+ .registry = SSAM_EVENT_REGISTRY_SAM,
|
|
+};
|
|
+
|
|
+static const struct ssam_device_id surface_ac_match[] = {
|
|
+ { SSAM_SDEV(BAT, 0x01, 0x01, 0x01), (unsigned long)&spwr_psy_props_adp1 },
|
|
+ { },
|
|
+};
|
|
+MODULE_DEVICE_TABLE(ssam, surface_ac_match);
|
|
+
|
|
+static struct ssam_device_driver surface_ac_driver = {
|
|
+ .probe = surface_ac_probe,
|
|
+ .remove = surface_ac_remove,
|
|
+ .match_table = surface_ac_match,
|
|
+ .driver = {
|
|
+ .name = "surface_ac",
|
|
+ .pm = &surface_ac_pm_ops,
|
|
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
|
+ },
|
|
+};
|
|
+module_ssam_device_driver(surface_ac_driver);
|
|
+
|
|
+MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
|
|
+MODULE_DESCRIPTION("AC driver for Surface System Aggregator Module");
|
|
+MODULE_LICENSE("GPL");
|
|
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
|
|
index 69f4527315e7..2c8dd2abbd04 100644
|
|
--- a/include/linux/mod_devicetable.h
|
|
+++ b/include/linux/mod_devicetable.h
|
|
@@ -770,15 +770,16 @@ struct typec_device_id {
|
|
|
|
/* Surface System Aggregator Module */
|
|
|
|
-#define SSAM_MATCH_CHANNEL 0x1
|
|
+#define SSAM_MATCH_TARGET 0x1
|
|
#define SSAM_MATCH_INSTANCE 0x2
|
|
#define SSAM_MATCH_FUNCTION 0x4
|
|
|
|
struct ssam_device_id {
|
|
__u8 match_flags;
|
|
|
|
+ __u8 domain;
|
|
__u8 category;
|
|
- __u8 channel;
|
|
+ __u8 target;
|
|
__u8 instance;
|
|
__u8 function;
|
|
|
|
diff --git a/include/linux/surface_acpi_notify.h b/include/linux/surface_acpi_notify.h
|
|
new file mode 100644
|
|
index 000000000000..8e3e86c7d78c
|
|
--- /dev/null
|
|
+++ b/include/linux/surface_acpi_notify.h
|
|
@@ -0,0 +1,39 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Interface for Surface ACPI Notify (SAN) driver.
|
|
+ *
|
|
+ * Provides access to discrete GPU notifications sent from ACPI via the SAN
|
|
+ * driver, which are not handled by this driver directly.
|
|
+ *
|
|
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#ifndef _LINUX_SURFACE_ACPI_NOTIFY_H
|
|
+#define _LINUX_SURFACE_ACPI_NOTIFY_H
|
|
+
|
|
+#include <linux/notifier.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+/**
|
|
+ * struct san_dgpu_event - Discrete GPU ACPI event.
|
|
+ * @category: Category of the event.
|
|
+ * @target: Target ID of the event source.
|
|
+ * @command: Command ID of the event.
|
|
+ * @instance: Instance ID of the event source.
|
|
+ * @length: Length of the event's payload data (in bytes).
|
|
+ * @payload: Pointer to the event's payload data.
|
|
+ */
|
|
+struct san_dgpu_event {
|
|
+ u8 category;
|
|
+ u8 target;
|
|
+ u8 command;
|
|
+ u8 instance;
|
|
+ u16 length;
|
|
+ u8 *payload;
|
|
+};
|
|
+
|
|
+int san_client_link(struct device *client);
|
|
+int san_dgpu_notifier_register(struct notifier_block *nb);
|
|
+int san_dgpu_notifier_unregister(struct notifier_block *nb);
|
|
+
|
|
+#endif /* _LINUX_SURFACE_ACPI_NOTIFY_H */
|
|
diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h
|
|
new file mode 100644
|
|
index 000000000000..8cee730e01f3
|
|
--- /dev/null
|
|
+++ b/include/linux/surface_aggregator/controller.h
|
|
@@ -0,0 +1,849 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Surface System Aggregator Module (SSAM) controller interface.
|
|
+ *
|
|
+ * Main communication interface for the SSAM EC. Provides a controller
|
|
+ * managing access and communication to and from the SSAM EC, as well as main
|
|
+ * communication structures and definitions.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#ifndef _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H
|
|
+#define _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H
|
|
+
|
|
+#include <linux/completion.h>
|
|
+#include <linux/device.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+#include <linux/surface_aggregator/serial_hub.h>
|
|
+
|
|
+
|
|
+/* -- Main data types and definitions --------------------------------------- */
|
|
+
|
|
+/**
|
|
+ * enum ssam_event_flags - Flags for enabling/disabling SAM events
|
|
+ * @SSAM_EVENT_SEQUENCED: The event will be sent via a sequenced data frame.
|
|
+ */
|
|
+enum ssam_event_flags {
|
|
+ SSAM_EVENT_SEQUENCED = BIT(0),
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_event - SAM event sent from the EC to the host.
|
|
+ * @target_category: Target category of the event source. See &enum ssam_ssh_tc.
|
|
+ * @target_id: Target ID of the event source.
|
|
+ * @command_id: Command ID of the event.
|
|
+ * @instance_id: Instance ID of the event source.
|
|
+ * @length: Length of the event payload in bytes.
|
|
+ * @data: Event payload data.
|
|
+ */
|
|
+struct ssam_event {
|
|
+ u8 target_category;
|
|
+ u8 target_id;
|
|
+ u8 command_id;
|
|
+ u8 instance_id;
|
|
+ u16 length;
|
|
+ u8 data[];
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum ssam_request_flags - Flags for SAM requests.
|
|
+ *
|
|
+ * @SSAM_REQUEST_HAS_RESPONSE:
|
|
+ * Specifies that the request expects a response. If not set, the request
|
|
+ * will be directly completed after its underlying packet has been
|
|
+ * transmitted. If set, the request transport system waits for a response
|
|
+ * of the request.
|
|
+ *
|
|
+ * @SSAM_REQUEST_UNSEQUENCED:
|
|
+ * Specifies that the request should be transmitted via an unsequenced
|
|
+ * packet. If set, the request must not have a response, meaning that this
|
|
+ * flag and the %SSAM_REQUEST_HAS_RESPONSE flag are mutually exclusive.
|
|
+ */
|
|
+enum ssam_request_flags {
|
|
+ SSAM_REQUEST_HAS_RESPONSE = BIT(0),
|
|
+ SSAM_REQUEST_UNSEQUENCED = BIT(1),
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_request - SAM request description.
|
|
+ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
|
|
+ * @target_id: ID of the request's target.
|
|
+ * @command_id: Command ID of the request.
|
|
+ * @instance_id: Instance ID of the request's target.
|
|
+ * @flags: Flags for the request. See &enum ssam_request_flags.
|
|
+ * @length: Length of the request payload in bytes.
|
|
+ * @payload: Request payload data.
|
|
+ *
|
|
+ * This struct fully describes a SAM request with payload. It is intended to
|
|
+ * help set up the actual transport struct, e.g. &struct ssam_request_sync,
|
|
+ * and specifically its raw message data via ssam_request_write_data().
|
|
+ */
|
|
+struct ssam_request {
|
|
+ u8 target_category;
|
|
+ u8 target_id;
|
|
+ u8 command_id;
|
|
+ u8 instance_id;
|
|
+ u16 flags;
|
|
+ u16 length;
|
|
+ const u8 *payload;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_response - Response buffer for SAM request.
|
|
+ * @capacity: Capacity of the buffer, in bytes.
|
|
+ * @length: Length of the actual data stored in the memory pointed to by
|
|
+ * @pointer, in bytes. Set by the transport system.
|
|
+ * @pointer: Pointer to the buffer's memory, storing the response payload data.
|
|
+ */
|
|
+struct ssam_response {
|
|
+ size_t capacity;
|
|
+ size_t length;
|
|
+ u8 *pointer;
|
|
+};
|
|
+
|
|
+struct ssam_controller;
|
|
+
|
|
+struct ssam_controller *ssam_get_controller(void);
|
|
+struct ssam_controller *ssam_client_bind(struct device *client);
|
|
+int ssam_client_link(struct ssam_controller *ctrl, struct device *client);
|
|
+
|
|
+struct device *ssam_controller_device(struct ssam_controller *c);
|
|
+
|
|
+struct ssam_controller *ssam_controller_get(struct ssam_controller *c);
|
|
+void ssam_controller_put(struct ssam_controller *c);
|
|
+
|
|
+void ssam_controller_statelock(struct ssam_controller *c);
|
|
+void ssam_controller_stateunlock(struct ssam_controller *c);
|
|
+
|
|
+ssize_t ssam_request_write_data(struct ssam_span *buf,
|
|
+ struct ssam_controller *ctrl,
|
|
+ const struct ssam_request *spec);
|
|
+
|
|
+
|
|
+/* -- Synchronous request interface. ---------------------------------------- */
|
|
+
|
|
+/**
|
|
+ * struct ssam_request_sync - Synchronous SAM request struct.
|
|
+ * @base: Underlying SSH request.
|
|
+ * @comp: Completion used to signal full completion of the request. After the
|
|
+ * request has been submitted, this struct may only be modified or
|
|
+ * deallocated after the completion has been signaled.
|
|
+ * request has been submitted,
|
|
+ * @resp: Buffer to store the response.
|
|
+ * @status: Status of the request, set after the base request has been
|
|
+ * completed or has failed.
|
|
+ */
|
|
+struct ssam_request_sync {
|
|
+ struct ssh_request base;
|
|
+ struct completion comp;
|
|
+ struct ssam_response *resp;
|
|
+ int status;
|
|
+};
|
|
+
|
|
+int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
|
|
+ struct ssam_request_sync **rqst,
|
|
+ struct ssam_span *buffer);
|
|
+
|
|
+void ssam_request_sync_free(struct ssam_request_sync *rqst);
|
|
+
|
|
+int ssam_request_sync_init(struct ssam_request_sync *rqst,
|
|
+ enum ssam_request_flags flags);
|
|
+
|
|
+/**
|
|
+ * ssam_request_sync_set_data - Set message data of a synchronous request.
|
|
+ * @rqst: The request.
|
|
+ * @ptr: Pointer to the request message data.
|
|
+ * @len: Length of the request message data.
|
|
+ *
|
|
+ * Set the request message data of a synchronous request. The provided buffer
|
|
+ * needs to live until the request has been completed.
|
|
+ */
|
|
+static inline void ssam_request_sync_set_data(struct ssam_request_sync *rqst,
|
|
+ u8 *ptr, size_t len)
|
|
+{
|
|
+ ssh_request_set_data(&rqst->base, ptr, len);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_request_sync_set_resp - Set response buffer of a synchronous request.
|
|
+ * @rqst: The request.
|
|
+ * @resp: The response buffer.
|
|
+ *
|
|
+ * Sets the response buffer of a synchronous request. This buffer will store
|
|
+ * the response of the request after it has been completed. May be %NULL if no
|
|
+ * response is expected.
|
|
+ */
|
|
+static inline void ssam_request_sync_set_resp(struct ssam_request_sync *rqst,
|
|
+ struct ssam_response *resp)
|
|
+{
|
|
+ rqst->resp = resp;
|
|
+}
|
|
+
|
|
+int ssam_request_sync_submit(struct ssam_controller *ctrl,
|
|
+ struct ssam_request_sync *rqst);
|
|
+
|
|
+/**
|
|
+ * ssam_request_sync_wait - Wait for completion of a synchronous request.
|
|
+ * @rqst: The request to wait for.
|
|
+ *
|
|
+ * Wait for completion and release of a synchronous request. After this
|
|
+ * function terminates, the request is guaranteed to have left the transport
|
|
+ * system. After successful submission of a request, this function must be
|
|
+ * called before accessing the response of the request, freeing the request,
|
|
+ * or freeing any of the buffers associated with the request.
|
|
+ *
|
|
+ * This function must not be called if the request has not been submitted yet
|
|
+ * and may lead to a deadlock/infinite wait if a subsequent request submission
|
|
+ * fails in that case, due to the completion never triggering.
|
|
+ *
|
|
+ * Return: Returns the status of the given request, which is set on completion
|
|
+ * of the packet. This value is zero on success and negative on failure.
|
|
+ */
|
|
+static inline int ssam_request_sync_wait(struct ssam_request_sync *rqst)
|
|
+{
|
|
+ wait_for_completion(&rqst->comp);
|
|
+ return rqst->status;
|
|
+}
|
|
+
|
|
+int ssam_request_sync(struct ssam_controller *ctrl,
|
|
+ const struct ssam_request *spec,
|
|
+ struct ssam_response *rsp);
|
|
+
|
|
+int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
|
|
+ const struct ssam_request *spec,
|
|
+ struct ssam_response *rsp,
|
|
+ struct ssam_span *buf);
|
|
+
|
|
+/**
|
|
+ * ssam_request_sync_onstack - Execute a synchronous request on the stack.
|
|
+ * @ctrl: The controller via which the request is submitted.
|
|
+ * @rqst: The request specification.
|
|
+ * @rsp: The response buffer.
|
|
+ * @payload_len: The (maximum) request payload length.
|
|
+ *
|
|
+ * Allocates a synchronous request with specified payload length on the stack,
|
|
+ * fully initializes it via the provided request specification, submits it,
|
|
+ * and finally waits for its completion before returning its status. This
|
|
+ * helper macro essentially allocates the request message buffer on the stack
|
|
+ * and then calls ssam_request_sync_with_buffer().
|
|
+ *
|
|
+ * Note: The @payload_len parameter specifies the maximum payload length, used
|
|
+ * for buffer allocation. The actual payload length may be smaller.
|
|
+ *
|
|
+ * Return: Returns the status of the request or any failure during setup, i.e.
|
|
+ * zero on success and a negative value on failure.
|
|
+ */
|
|
+#define ssam_request_sync_onstack(ctrl, rqst, rsp, payload_len) \
|
|
+ ({ \
|
|
+ u8 __data[SSH_COMMAND_MESSAGE_LENGTH(payload_len)]; \
|
|
+ struct ssam_span __buf = { &__data[0], ARRAY_SIZE(__data) }; \
|
|
+ \
|
|
+ ssam_request_sync_with_buffer(ctrl, rqst, rsp, &__buf); \
|
|
+ })
|
|
+
|
|
+/**
|
|
+ * __ssam_retry - Retry request in case of I/O errors or timeouts.
|
|
+ * @request: The request function to execute. Must return an integer.
|
|
+ * @n: Number of tries.
|
|
+ * @args: Arguments for the request function.
|
|
+ *
|
|
+ * Executes the given request function, i.e. calls @request. In case the
|
|
+ * request returns %-EREMOTEIO (indicates I/O error) or %-ETIMEDOUT (request
|
|
+ * or underlying packet timed out), @request will be re-executed again, up to
|
|
+ * @n times in total.
|
|
+ *
|
|
+ * Return: Returns the return value of the last execution of @request.
|
|
+ */
|
|
+#define __ssam_retry(request, n, args...) \
|
|
+ ({ \
|
|
+ int __i, __s = 0; \
|
|
+ \
|
|
+ for (__i = (n); __i > 0; __i--) { \
|
|
+ __s = request(args); \
|
|
+ if (__s != -ETIMEDOUT && __s != -EREMOTEIO) \
|
|
+ break; \
|
|
+ } \
|
|
+ __s; \
|
|
+ })
|
|
+
|
|
+/**
|
|
+ * ssam_retry - Retry request in case of I/O errors or timeouts up to three
|
|
+ * times in total.
|
|
+ * @request: The request function to execute. Must return an integer.
|
|
+ * @args: Arguments for the request function.
|
|
+ *
|
|
+ * Executes the given request function, i.e. calls @request. In case the
|
|
+ * request returns %-EREMOTEIO (indicates I/O error) or -%ETIMEDOUT (request
|
|
+ * or underlying packet timed out), @request will be re-executed again, up to
|
|
+ * three times in total.
|
|
+ *
|
|
+ * See __ssam_retry() for a more generic macro for this purpose.
|
|
+ *
|
|
+ * Return: Returns the return value of the last execution of @request.
|
|
+ */
|
|
+#define ssam_retry(request, args...) \
|
|
+ __ssam_retry(request, 3, args)
|
|
+
|
|
+/**
|
|
+ * struct ssam_request_spec - Blue-print specification of SAM request.
|
|
+ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
|
|
+ * @target_id: ID of the request's target.
|
|
+ * @command_id: Command ID of the request.
|
|
+ * @instance_id: Instance ID of the request's target.
|
|
+ * @flags: Flags for the request. See &enum ssam_request_flags.
|
|
+ *
|
|
+ * Blue-print specification for a SAM request. This struct describes the
|
|
+ * unique static parameters of a request (i.e. type) without specifying any of
|
|
+ * its instance-specific data (e.g. payload). It is intended to be used as base
|
|
+ * for defining simple request functions via the
|
|
+ * ``SSAM_DEFINE_SYNC_REQUEST_x()`` family of macros.
|
|
+ */
|
|
+struct ssam_request_spec {
|
|
+ u8 target_category;
|
|
+ u8 target_id;
|
|
+ u8 command_id;
|
|
+ u8 instance_id;
|
|
+ u8 flags;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_request_spec_md - Blue-print specification for multi-device SAM
|
|
+ * request.
|
|
+ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
|
|
+ * @command_id: Command ID of the request.
|
|
+ * @flags: Flags for the request. See &enum ssam_request_flags.
|
|
+ *
|
|
+ * Blue-print specification for a multi-device SAM request, i.e. a request
|
|
+ * that is applicable to multiple device instances, described by their
|
|
+ * individual target and instance IDs. This struct describes the unique static
|
|
+ * parameters of a request (i.e. type) without specifying any of its
|
|
+ * instance-specific data (e.g. payload) and without specifying any of its
|
|
+ * device specific IDs (i.e. target and instance ID). It is intended to be
|
|
+ * used as base for defining simple multi-device request functions via the
|
|
+ * ``SSAM_DEFINE_SYNC_REQUEST_MD_x()`` and ``SSAM_DEFINE_SYNC_REQUEST_CL_x()``
|
|
+ * families of macros.
|
|
+ */
|
|
+struct ssam_request_spec_md {
|
|
+ u8 target_category;
|
|
+ u8 command_id;
|
|
+ u8 flags;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * SSAM_DEFINE_SYNC_REQUEST_N() - Define synchronous SAM request function
|
|
+ * with neither argument nor return value.
|
|
+ * @name: Name of the generated function.
|
|
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
|
|
+ *
|
|
+ * Defines a function executing the synchronous SAM request specified by
|
|
+ * @spec, with the request having neither argument nor return value. The
|
|
+ * generated function takes care of setting up the request struct and buffer
|
|
+ * allocation, as well as execution of the request itself, returning once the
|
|
+ * request has been fully completed. The required transport buffer will be
|
|
+ * allocated on the stack.
|
|
+ *
|
|
+ * The generated function is defined as ``int name(struct ssam_controller
|
|
+ * *ctrl)``, returning the status of the request, which is zero on success and
|
|
+ * negative on failure. The ``ctrl`` parameter is the controller via which the
|
|
+ * request is being sent.
|
|
+ *
|
|
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
|
|
+ * the generated function.
|
|
+ */
|
|
+#define SSAM_DEFINE_SYNC_REQUEST_N(name, spec...) \
|
|
+ int name(struct ssam_controller *ctrl) \
|
|
+ { \
|
|
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
|
|
+ struct ssam_request rqst; \
|
|
+ \
|
|
+ rqst.target_category = s.target_category; \
|
|
+ rqst.target_id = s.target_id; \
|
|
+ rqst.command_id = s.command_id; \
|
|
+ rqst.instance_id = s.instance_id; \
|
|
+ rqst.flags = s.flags; \
|
|
+ rqst.length = 0; \
|
|
+ rqst.payload = NULL; \
|
|
+ \
|
|
+ return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0); \
|
|
+ }
|
|
+
|
|
+/**
|
|
+ * SSAM_DEFINE_SYNC_REQUEST_W() - Define synchronous SAM request function with
|
|
+ * argument.
|
|
+ * @name: Name of the generated function.
|
|
+ * @atype: Type of the request's argument.
|
|
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
|
|
+ *
|
|
+ * Defines a function executing the synchronous SAM request specified by
|
|
+ * @spec, with the request taking an argument of type @atype and having no
|
|
+ * return value. The generated function takes care of setting up the request
|
|
+ * struct, buffer allocation, as well as execution of the request itself,
|
|
+ * returning once the request has been fully completed. The required transport
|
|
+ * buffer will be allocated on the stack.
|
|
+ *
|
|
+ * The generated function is defined as ``int name(struct ssam_controller
|
|
+ * *ctrl, const atype *arg)``, returning the status of the request, which is
|
|
+ * zero on success and negative on failure. The ``ctrl`` parameter is the
|
|
+ * controller via which the request is sent. The request argument is specified
|
|
+ * via the ``arg`` pointer.
|
|
+ *
|
|
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
|
|
+ * the generated function.
|
|
+ */
|
|
+#define SSAM_DEFINE_SYNC_REQUEST_W(name, atype, spec...) \
|
|
+ int name(struct ssam_controller *ctrl, const atype *arg) \
|
|
+ { \
|
|
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
|
|
+ struct ssam_request rqst; \
|
|
+ \
|
|
+ rqst.target_category = s.target_category; \
|
|
+ rqst.target_id = s.target_id; \
|
|
+ rqst.command_id = s.command_id; \
|
|
+ rqst.instance_id = s.instance_id; \
|
|
+ rqst.flags = s.flags; \
|
|
+ rqst.length = sizeof(atype); \
|
|
+ rqst.payload = (u8 *)arg; \
|
|
+ \
|
|
+ return ssam_request_sync_onstack(ctrl, &rqst, NULL, \
|
|
+ sizeof(atype)); \
|
|
+ }
|
|
+
|
|
+/**
|
|
+ * SSAM_DEFINE_SYNC_REQUEST_R() - Define synchronous SAM request function with
|
|
+ * return value.
|
|
+ * @name: Name of the generated function.
|
|
+ * @rtype: Type of the request's return value.
|
|
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
|
|
+ *
|
|
+ * Defines a function executing the synchronous SAM request specified by
|
|
+ * @spec, with the request taking no argument but having a return value of
|
|
+ * type @rtype. The generated function takes care of setting up the request
|
|
+ * and response structs, buffer allocation, as well as execution of the
|
|
+ * request itself, returning once the request has been fully completed. The
|
|
+ * required transport buffer will be allocated on the stack.
|
|
+ *
|
|
+ * The generated function is defined as ``int name(struct ssam_controller
|
|
+ * *ctrl, rtype *ret)``, returning the status of the request, which is zero on
|
|
+ * success and negative on failure. The ``ctrl`` parameter is the controller
|
|
+ * via which the request is sent. The request's return value is written to the
|
|
+ * memory pointed to by the ``ret`` parameter.
|
|
+ *
|
|
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
|
|
+ * the generated function.
|
|
+ */
|
|
+#define SSAM_DEFINE_SYNC_REQUEST_R(name, rtype, spec...) \
|
|
+ int name(struct ssam_controller *ctrl, rtype *ret) \
|
|
+ { \
|
|
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
|
|
+ struct ssam_request rqst; \
|
|
+ struct ssam_response rsp; \
|
|
+ int status; \
|
|
+ \
|
|
+ rqst.target_category = s.target_category; \
|
|
+ rqst.target_id = s.target_id; \
|
|
+ rqst.command_id = s.command_id; \
|
|
+ rqst.instance_id = s.instance_id; \
|
|
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
|
|
+ rqst.length = 0; \
|
|
+ rqst.payload = NULL; \
|
|
+ \
|
|
+ rsp.capacity = sizeof(rtype); \
|
|
+ rsp.length = 0; \
|
|
+ rsp.pointer = (u8 *)ret; \
|
|
+ \
|
|
+ status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \
|
|
+ if (status) \
|
|
+ return status; \
|
|
+ \
|
|
+ if (rsp.length != sizeof(rtype)) { \
|
|
+ struct device *dev = ssam_controller_device(ctrl); \
|
|
+ dev_err(dev, \
|
|
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
|
|
+ sizeof(rtype), rsp.length, rqst.target_category,\
|
|
+ rqst.command_id); \
|
|
+ return -EIO; \
|
|
+ } \
|
|
+ \
|
|
+ return 0; \
|
|
+ }
|
|
+
|
|
+/**
|
|
+ * SSAM_DEFINE_SYNC_REQUEST_MD_N() - Define synchronous multi-device SAM
|
|
+ * request function with neither argument nor return value.
|
|
+ * @name: Name of the generated function.
|
|
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
|
|
+ *
|
|
+ * Defines a function executing the synchronous SAM request specified by
|
|
+ * @spec, with the request having neither argument nor return value. Device
|
|
+ * specifying parameters are not hard-coded, but instead must be provided to
|
|
+ * the function. The generated function takes care of setting up the request
|
|
+ * struct, buffer allocation, as well as execution of the request itself,
|
|
+ * returning once the request has been fully completed. The required transport
|
|
+ * buffer will be allocated on the stack.
|
|
+ *
|
|
+ * The generated function is defined as ``int name(struct ssam_controller
|
|
+ * *ctrl, u8 tid, u8 iid)``, returning the status of the request, which is
|
|
+ * zero on success and negative on failure. The ``ctrl`` parameter is the
|
|
+ * controller via which the request is sent, ``tid`` the target ID for the
|
|
+ * request, and ``iid`` the instance ID.
|
|
+ *
|
|
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
|
|
+ * the generated function.
|
|
+ */
|
|
+#define SSAM_DEFINE_SYNC_REQUEST_MD_N(name, spec...) \
|
|
+ int name(struct ssam_controller *ctrl, u8 tid, u8 iid) \
|
|
+ { \
|
|
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
|
|
+ struct ssam_request rqst; \
|
|
+ \
|
|
+ rqst.target_category = s.target_category; \
|
|
+ rqst.target_id = tid; \
|
|
+ rqst.command_id = s.command_id; \
|
|
+ rqst.instance_id = iid; \
|
|
+ rqst.flags = s.flags; \
|
|
+ rqst.length = 0; \
|
|
+ rqst.payload = NULL; \
|
|
+ \
|
|
+ return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0); \
|
|
+ }
|
|
+
|
|
+/**
|
|
+ * SSAM_DEFINE_SYNC_REQUEST_MD_W() - Define synchronous multi-device SAM
|
|
+ * request function with argument.
|
|
+ * @name: Name of the generated function.
|
|
+ * @atype: Type of the request's argument.
|
|
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
|
|
+ *
|
|
+ * Defines a function executing the synchronous SAM request specified by
|
|
+ * @spec, with the request taking an argument of type @atype and having no
|
|
+ * return value. Device specifying parameters are not hard-coded, but instead
|
|
+ * must be provided to the function. The generated function takes care of
|
|
+ * setting up the request struct, buffer allocation, as well as execution of
|
|
+ * the request itself, returning once the request has been fully completed.
|
|
+ * The required transport buffer will be allocated on the stack.
|
|
+ *
|
|
+ * The generated function is defined as ``int name(struct ssam_controller
|
|
+ * *ctrl, u8 tid, u8 iid, const atype *arg)``, returning the status of the
|
|
+ * request, which is zero on success and negative on failure. The ``ctrl``
|
|
+ * parameter is the controller via which the request is sent, ``tid`` the
|
|
+ * target ID for the request, and ``iid`` the instance ID. The request argument
|
|
+ * is specified via the ``arg`` pointer.
|
|
+ *
|
|
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
|
|
+ * the generated function.
|
|
+ */
|
|
+#define SSAM_DEFINE_SYNC_REQUEST_MD_W(name, atype, spec...) \
|
|
+ int name(struct ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg)\
|
|
+ { \
|
|
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
|
|
+ struct ssam_request rqst; \
|
|
+ \
|
|
+ rqst.target_category = s.target_category; \
|
|
+ rqst.target_id = tid; \
|
|
+ rqst.command_id = s.command_id; \
|
|
+ rqst.instance_id = iid; \
|
|
+ rqst.flags = s.flags; \
|
|
+ rqst.length = sizeof(atype); \
|
|
+ rqst.payload = (u8 *)arg; \
|
|
+ \
|
|
+ return ssam_request_sync_onstack(ctrl, &rqst, NULL, \
|
|
+ sizeof(atype)); \
|
|
+ }
|
|
+
|
|
+/**
|
|
+ * SSAM_DEFINE_SYNC_REQUEST_MD_R() - Define synchronous multi-device SAM
|
|
+ * request function with return value.
|
|
+ * @name: Name of the generated function.
|
|
+ * @rtype: Type of the request's return value.
|
|
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
|
|
+ *
|
|
+ * Defines a function executing the synchronous SAM request specified by
|
|
+ * @spec, with the request taking no argument but having a return value of
|
|
+ * type @rtype. Device specifying parameters are not hard-coded, but instead
|
|
+ * must be provided to the function. The generated function takes care of
|
|
+ * setting up the request and response structs, buffer allocation, as well as
|
|
+ * execution of the request itself, returning once the request has been fully
|
|
+ * completed. The required transport buffer will be allocated on the stack.
|
|
+ *
|
|
+ * The generated function is defined as ``int name(struct ssam_controller
|
|
+ * *ctrl, u8 tid, u8 iid, rtype *ret)``, returning the status of the request,
|
|
+ * which is zero on success and negative on failure. The ``ctrl`` parameter is
|
|
+ * the controller via which the request is sent, ``tid`` the target ID for the
|
|
+ * request, and ``iid`` the instance ID. The request's return value is written
|
|
+ * to the memory pointed to by the ``ret`` parameter.
|
|
+ *
|
|
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
|
|
+ * the generated function.
|
|
+ */
|
|
+#define SSAM_DEFINE_SYNC_REQUEST_MD_R(name, rtype, spec...) \
|
|
+ int name(struct ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret) \
|
|
+ { \
|
|
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
|
|
+ struct ssam_request rqst; \
|
|
+ struct ssam_response rsp; \
|
|
+ int status; \
|
|
+ \
|
|
+ rqst.target_category = s.target_category; \
|
|
+ rqst.target_id = tid; \
|
|
+ rqst.command_id = s.command_id; \
|
|
+ rqst.instance_id = iid; \
|
|
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
|
|
+ rqst.length = 0; \
|
|
+ rqst.payload = NULL; \
|
|
+ \
|
|
+ rsp.capacity = sizeof(rtype); \
|
|
+ rsp.length = 0; \
|
|
+ rsp.pointer = (u8 *)ret; \
|
|
+ \
|
|
+ status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \
|
|
+ if (status) \
|
|
+ return status; \
|
|
+ \
|
|
+ if (rsp.length != sizeof(rtype)) { \
|
|
+ struct device *dev = ssam_controller_device(ctrl); \
|
|
+ dev_err(dev, \
|
|
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
|
|
+ sizeof(rtype), rsp.length, rqst.target_category,\
|
|
+ rqst.command_id); \
|
|
+ return -EIO; \
|
|
+ } \
|
|
+ \
|
|
+ return 0; \
|
|
+ }
|
|
+
|
|
+
|
|
+/* -- Event notifier/callbacks. --------------------------------------------- */
|
|
+
|
|
+#define SSAM_NOTIF_STATE_SHIFT 2
|
|
+#define SSAM_NOTIF_STATE_MASK ((1 << SSAM_NOTIF_STATE_SHIFT) - 1)
|
|
+
|
|
+/**
|
|
+ * enum ssam_notif_flags - Flags used in return values from SSAM notifier
|
|
+ * callback functions.
|
|
+ *
|
|
+ * @SSAM_NOTIF_HANDLED:
|
|
+ * Indicates that the notification has been handled. This flag should be
|
|
+ * set by the handler if the handler can act/has acted upon the event
|
|
+ * provided to it. This flag should not be set if the handler is not a
|
|
+ * primary handler intended for the provided event.
|
|
+ *
|
|
+ * If this flag has not been set by any handler after the notifier chain
|
|
+ * has been traversed, a warning will be emitted, stating that the event
|
|
+ * has not been handled.
|
|
+ *
|
|
+ * @SSAM_NOTIF_STOP:
|
|
+ * Indicates that the notifier traversal should stop. If this flag is
|
|
+ * returned from a notifier callback, notifier chain traversal will
|
|
+ * immediately stop and any remaining notifiers will not be called. This
|
|
+ * flag is automatically set when ssam_notifier_from_errno() is called
|
|
+ * with a negative error value.
|
|
+ */
|
|
+enum ssam_notif_flags {
|
|
+ SSAM_NOTIF_HANDLED = BIT(0),
|
|
+ SSAM_NOTIF_STOP = BIT(1),
|
|
+};
|
|
+
|
|
+struct ssam_event_notifier;
|
|
+
|
|
+typedef u32 (*ssam_notifier_fn_t)(struct ssam_event_notifier *nf,
|
|
+ const struct ssam_event *event);
|
|
+
|
|
+/**
|
|
+ * struct ssam_notifier_block - Base notifier block for SSAM event
|
|
+ * notifications.
|
|
+ * @node: The node for the list of notifiers.
|
|
+ * @fn: The callback function of this notifier. This function takes the
|
|
+ * respective notifier block and event as input and should return
|
|
+ * a notifier value, which can either be obtained from the flags
|
|
+ * provided in &enum ssam_notif_flags, converted from a standard
|
|
+ * error value via ssam_notifier_from_errno(), or a combination of
|
|
+ * both (e.g. ``ssam_notifier_from_errno(e) | SSAM_NOTIF_HANDLED``).
|
|
+ * @priority: Priority value determining the order in which notifier callbacks
|
|
+ * will be called. A higher value means higher priority, i.e. the
|
|
+ * associated callback will be executed earlier than other (lower
|
|
+ * priority) callbacks.
|
|
+ */
|
|
+struct ssam_notifier_block {
|
|
+ struct list_head node;
|
|
+ ssam_notifier_fn_t fn;
|
|
+ int priority;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * ssam_notifier_from_errno() - Convert standard error value to notifier
|
|
+ * return code.
|
|
+ * @err: The error code to convert, must be negative (in case of failure) or
|
|
+ * zero (in case of success).
|
|
+ *
|
|
+ * Return: Returns the notifier return value obtained by converting the
|
|
+ * specified @err value. In case @err is negative, the %SSAM_NOTIF_STOP flag
|
|
+ * will be set, causing notifier call chain traversal to abort.
|
|
+ */
|
|
+static inline u32 ssam_notifier_from_errno(int err)
|
|
+{
|
|
+ if (WARN_ON(err > 0) || err == 0)
|
|
+ return 0;
|
|
+ else
|
|
+ return ((-err) << SSAM_NOTIF_STATE_SHIFT) | SSAM_NOTIF_STOP;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_notifier_to_errno() - Convert notifier return code to standard error
|
|
+ * value.
|
|
+ * @ret: The notifier return value to convert.
|
|
+ *
|
|
+ * Return: Returns the negative error value encoded in @ret or zero if @ret
|
|
+ * indicates success.
|
|
+ */
|
|
+static inline int ssam_notifier_to_errno(u32 ret)
|
|
+{
|
|
+ return -(ret >> SSAM_NOTIF_STATE_SHIFT);
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Event/notification registry. ------------------------------------------ */
|
|
+
|
|
+/**
|
|
+ * struct ssam_event_registry - Registry specification used for enabling events.
|
|
+ * @target_category: Target category for the event registry requests.
|
|
+ * @target_id: Target ID for the event registry requests.
|
|
+ * @cid_enable: Command ID for the event-enable request.
|
|
+ * @cid_disable: Command ID for the event-disable request.
|
|
+ *
|
|
+ * This struct describes a SAM event registry via the minimal collection of
|
|
+ * SAM IDs specifying the requests to use for enabling and disabling an event.
|
|
+ * The individual event to be enabled/disabled itself is specified via &struct
|
|
+ * ssam_event_id.
|
|
+ */
|
|
+struct ssam_event_registry {
|
|
+ u8 target_category;
|
|
+ u8 target_id;
|
|
+ u8 cid_enable;
|
|
+ u8 cid_disable;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_event_id - Unique event ID used for enabling events.
|
|
+ * @target_category: Target category of the event source.
|
|
+ * @instance: Instance ID of the event source.
|
|
+ *
|
|
+ * This struct specifies the event to be enabled/disabled via an externally
|
|
+ * provided registry. It does not specify the registry to be used itself, this
|
|
+ * is done via &struct ssam_event_registry.
|
|
+ */
|
|
+struct ssam_event_id {
|
|
+ u8 target_category;
|
|
+ u8 instance;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum ssam_event_mask - Flags specifying how events are matched to notifiers.
|
|
+ *
|
|
+ * @SSAM_EVENT_MASK_NONE:
|
|
+ * Run the callback for any event with matching target category. Do not
|
|
+ * do any additional filtering.
|
|
+ *
|
|
+ * @SSAM_EVENT_MASK_TARGET:
|
|
+ * In addition to filtering by target category, only execute the notifier
|
|
+ * callback for events with a target ID matching to the one of the
|
|
+ * registry used for enabling/disabling the event.
|
|
+ *
|
|
+ * @SSAM_EVENT_MASK_INSTANCE:
|
|
+ * In addition to filtering by target category, only execute the notifier
|
|
+ * callback for events with an instance ID matching to the instance ID
|
|
+ * used when enabling the event.
|
|
+ *
|
|
+ * @SSAM_EVENT_MASK_STRICT:
|
|
+ * Do all the filtering above.
|
|
+ */
|
|
+enum ssam_event_mask {
|
|
+ SSAM_EVENT_MASK_TARGET = BIT(0),
|
|
+ SSAM_EVENT_MASK_INSTANCE = BIT(1),
|
|
+
|
|
+ SSAM_EVENT_MASK_NONE = 0,
|
|
+ SSAM_EVENT_MASK_STRICT =
|
|
+ SSAM_EVENT_MASK_TARGET
|
|
+ | SSAM_EVENT_MASK_INSTANCE,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * SSAM_EVENT_REGISTRY() - Define a new event registry.
|
|
+ * @tc: Target category for the event registry requests.
|
|
+ * @tid: Target ID for the event registry requests.
|
|
+ * @cid_en: Command ID for the event-enable request.
|
|
+ * @cid_dis: Command ID for the event-disable request.
|
|
+ *
|
|
+ * Return: Returns the &struct ssam_event_registry specified by the given
|
|
+ * parameters.
|
|
+ */
|
|
+#define SSAM_EVENT_REGISTRY(tc, tid, cid_en, cid_dis) \
|
|
+ ((struct ssam_event_registry) { \
|
|
+ .target_category = (tc), \
|
|
+ .target_id = (tid), \
|
|
+ .cid_enable = (cid_en), \
|
|
+ .cid_disable = (cid_dis), \
|
|
+ })
|
|
+
|
|
+#define SSAM_EVENT_REGISTRY_SAM \
|
|
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_SAM, 0x01, 0x0b, 0x0c)
|
|
+
|
|
+#define SSAM_EVENT_REGISTRY_KIP \
|
|
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, 0x02, 0x27, 0x28)
|
|
+
|
|
+#define SSAM_EVENT_REGISTRY_REG \
|
|
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02)
|
|
+
|
|
+/**
|
|
+ * enum ssam_event_notifier_flags - Flags for event notifiers.
|
|
+ * @SSAM_EVENT_NOTIFIER_OBSERVER:
|
|
+ * The corresponding notifier acts as observer. Registering a notifier
|
|
+ * with this flag set will not attempt to enable any event. Equally,
|
|
+ * unregistering will not attempt to disable any event. Note that a
|
|
+ * notifier with this flag may not even correspond to a certain event at
|
|
+ * all, only to a specific event target category. Event matching will not
|
|
+ * be influenced by this flag.
|
|
+ */
|
|
+enum ssam_event_notifier_flags {
|
|
+ SSAM_EVENT_NOTIFIER_OBSERVER = BIT(0),
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_event_notifier - Notifier block for SSAM events.
|
|
+ * @base: The base notifier block with callback function and priority.
|
|
+ * @event: The event for which this block will receive notifications.
|
|
+ * @event.reg: Registry via which the event will be enabled/disabled.
|
|
+ * @event.id: ID specifying the event.
|
|
+ * @event.mask: Flags determining how events are matched to the notifier.
|
|
+ * @event.flags: Flags used for enabling the event.
|
|
+ * @flags: Notifier flags (see &enum ssam_event_notifier_flags).
|
|
+ */
|
|
+struct ssam_event_notifier {
|
|
+ struct ssam_notifier_block base;
|
|
+
|
|
+ struct {
|
|
+ struct ssam_event_registry reg;
|
|
+ struct ssam_event_id id;
|
|
+ enum ssam_event_mask mask;
|
|
+ u8 flags;
|
|
+ } event;
|
|
+
|
|
+ unsigned long flags;
|
|
+};
|
|
+
|
|
+int ssam_notifier_register(struct ssam_controller *ctrl,
|
|
+ struct ssam_event_notifier *n);
|
|
+
|
|
+int ssam_notifier_unregister(struct ssam_controller *ctrl,
|
|
+ struct ssam_event_notifier *n);
|
|
+
|
|
+int ssam_controller_event_enable(struct ssam_controller *ctrl,
|
|
+ struct ssam_event_registry reg,
|
|
+ struct ssam_event_id id, u8 flags);
|
|
+
|
|
+int ssam_controller_event_disable(struct ssam_controller *ctrl,
|
|
+ struct ssam_event_registry reg,
|
|
+ struct ssam_event_id id, u8 flags);
|
|
+
|
|
+#endif /* _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H */
|
|
diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h
|
|
new file mode 100644
|
|
index 000000000000..c092211a154d
|
|
--- /dev/null
|
|
+++ b/include/linux/surface_aggregator/device.h
|
|
@@ -0,0 +1,423 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Surface System Aggregator Module (SSAM) bus and client-device subsystem.
|
|
+ *
|
|
+ * Main interface for the surface-aggregator bus, surface-aggregator client
|
|
+ * devices, and respective drivers building on top of the SSAM controller.
|
|
+ * Provides support for non-platform/non-ACPI SSAM clients via dedicated
|
|
+ * subsystem.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#ifndef _LINUX_SURFACE_AGGREGATOR_DEVICE_H
|
|
+#define _LINUX_SURFACE_AGGREGATOR_DEVICE_H
|
|
+
|
|
+#include <linux/device.h>
|
|
+#include <linux/mod_devicetable.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+#include <linux/surface_aggregator/controller.h>
|
|
+
|
|
+
|
|
+/* -- Surface System Aggregator Module bus. --------------------------------- */
|
|
+
|
|
+/**
|
|
+ * enum ssam_device_domain - SAM device domain.
|
|
+ * @SSAM_DOMAIN_VIRTUAL: Virtual device.
|
|
+ * @SSAM_DOMAIN_SERIALHUB: Physical device connected via Surface Serial Hub.
|
|
+ */
|
|
+enum ssam_device_domain {
|
|
+ SSAM_DOMAIN_VIRTUAL = 0x00,
|
|
+ SSAM_DOMAIN_SERIALHUB = 0x01,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * enum ssam_virtual_tc - Target categories for the virtual SAM domain.
|
|
+ * @SSAM_VIRTUAL_TC_HUB: Device hub category.
|
|
+ */
|
|
+enum ssam_virtual_tc {
|
|
+ SSAM_VIRTUAL_TC_HUB = 0x00,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_device_uid - Unique identifier for SSAM device.
|
|
+ * @domain: Domain of the device.
|
|
+ * @category: Target category of the device.
|
|
+ * @target: Target ID of the device.
|
|
+ * @instance: Instance ID of the device.
|
|
+ * @function: Sub-function of the device. This field can be used to split a
|
|
+ * single SAM device into multiple virtual subdevices to separate
|
|
+ * different functionality of that device and allow one driver per
|
|
+ * such functionality.
|
|
+ */
|
|
+struct ssam_device_uid {
|
|
+ u8 domain;
|
|
+ u8 category;
|
|
+ u8 target;
|
|
+ u8 instance;
|
|
+ u8 function;
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Special values for device matching.
|
|
+ *
|
|
+ * These values are intended to be used with SSAM_DEVICE(), SSAM_VDEV(), and
|
|
+ * SSAM_SDEV() exclusively. Specifically, they are used to initialize the
|
|
+ * match_flags member of the device ID structure. Do not use them directly
|
|
+ * with struct ssam_device_id or struct ssam_device_uid.
|
|
+ */
|
|
+#define SSAM_ANY_TID 0xffff
|
|
+#define SSAM_ANY_IID 0xffff
|
|
+#define SSAM_ANY_FUN 0xffff
|
|
+
|
|
+/**
|
|
+ * SSAM_DEVICE() - Initialize a &struct ssam_device_id with the given
|
|
+ * parameters.
|
|
+ * @d: Domain of the device.
|
|
+ * @cat: Target category of the device.
|
|
+ * @tid: Target ID of the device.
|
|
+ * @iid: Instance ID of the device.
|
|
+ * @fun: Sub-function of the device.
|
|
+ *
|
|
+ * Initializes a &struct ssam_device_id with the given parameters. See &struct
|
|
+ * ssam_device_uid for details regarding the parameters. The special values
|
|
+ * %SSAM_ANY_TID, %SSAM_ANY_IID, and %SSAM_ANY_FUN can be used to specify that
|
|
+ * matching should ignore target ID, instance ID, and/or sub-function,
|
|
+ * respectively. This macro initializes the ``match_flags`` field based on the
|
|
+ * given parameters.
|
|
+ *
|
|
+ * Note: The parameters @d and @cat must be valid &u8 values, the parameters
|
|
+ * @tid, @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
|
|
+ * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
|
|
+ * allowed.
|
|
+ */
|
|
+#define SSAM_DEVICE(d, cat, tid, iid, fun) \
|
|
+ .match_flags = (((tid) != SSAM_ANY_TID) ? SSAM_MATCH_TARGET : 0) \
|
|
+ | (((iid) != SSAM_ANY_IID) ? SSAM_MATCH_INSTANCE : 0) \
|
|
+ | (((fun) != SSAM_ANY_FUN) ? SSAM_MATCH_FUNCTION : 0), \
|
|
+ .domain = d, \
|
|
+ .category = cat, \
|
|
+ .target = __builtin_choose_expr((tid) != SSAM_ANY_TID, (tid), 0), \
|
|
+ .instance = __builtin_choose_expr((iid) != SSAM_ANY_IID, (iid), 0), \
|
|
+ .function = __builtin_choose_expr((fun) != SSAM_ANY_FUN, (fun), 0)
|
|
+
|
|
+/**
|
|
+ * SSAM_VDEV() - Initialize a &struct ssam_device_id as virtual device with
|
|
+ * the given parameters.
|
|
+ * @cat: Target category of the device.
|
|
+ * @tid: Target ID of the device.
|
|
+ * @iid: Instance ID of the device.
|
|
+ * @fun: Sub-function of the device.
|
|
+ *
|
|
+ * Initializes a &struct ssam_device_id with the given parameters in the
|
|
+ * virtual domain. See &struct ssam_device_uid for details regarding the
|
|
+ * parameters. The special values %SSAM_ANY_TID, %SSAM_ANY_IID, and
|
|
+ * %SSAM_ANY_FUN can be used to specify that matching should ignore target ID,
|
|
+ * instance ID, and/or sub-function, respectively. This macro initializes the
|
|
+ * ``match_flags`` field based on the given parameters.
|
|
+ *
|
|
+ * Note: The parameter @cat must be a valid &u8 value, the parameters @tid,
|
|
+ * @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
|
|
+ * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
|
|
+ * allowed.
|
|
+ */
|
|
+#define SSAM_VDEV(cat, tid, iid, fun) \
|
|
+ SSAM_DEVICE(SSAM_DOMAIN_VIRTUAL, SSAM_VIRTUAL_TC_##cat, tid, iid, fun)
|
|
+
|
|
+/**
|
|
+ * SSAM_SDEV() - Initialize a &struct ssam_device_id as physical SSH device
|
|
+ * with the given parameters.
|
|
+ * @cat: Target category of the device.
|
|
+ * @tid: Target ID of the device.
|
|
+ * @iid: Instance ID of the device.
|
|
+ * @fun: Sub-function of the device.
|
|
+ *
|
|
+ * Initializes a &struct ssam_device_id with the given parameters in the SSH
|
|
+ * domain. See &struct ssam_device_uid for details regarding the parameters.
|
|
+ * The special values %SSAM_ANY_TID, %SSAM_ANY_IID, and %SSAM_ANY_FUN can be
|
|
+ * used to specify that matching should ignore target ID, instance ID, and/or
|
|
+ * sub-function, respectively. This macro initializes the ``match_flags``
|
|
+ * field based on the given parameters.
|
|
+ *
|
|
+ * Note: The parameter @cat must be a valid &u8 value, the parameters @tid,
|
|
+ * @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
|
|
+ * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
|
|
+ * allowed.
|
|
+ */
|
|
+#define SSAM_SDEV(cat, tid, iid, fun) \
|
|
+ SSAM_DEVICE(SSAM_DOMAIN_SERIALHUB, SSAM_SSH_TC_##cat, tid, iid, fun)
|
|
+
|
|
+/**
|
|
+ * struct ssam_device - SSAM client device.
|
|
+ * @dev: Driver model representation of the device.
|
|
+ * @ctrl: SSAM controller managing this device.
|
|
+ * @uid: UID identifying the device.
|
|
+ */
|
|
+struct ssam_device {
|
|
+ struct device dev;
|
|
+ struct ssam_controller *ctrl;
|
|
+
|
|
+ struct ssam_device_uid uid;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_device_driver - SSAM client device driver.
|
|
+ * @driver: Base driver model structure.
|
|
+ * @match_table: Match table specifying which devices the driver should bind to.
|
|
+ * @probe: Called when the driver is being bound to a device.
|
|
+ * @remove: Called when the driver is being unbound from the device.
|
|
+ */
|
|
+struct ssam_device_driver {
|
|
+ struct device_driver driver;
|
|
+
|
|
+ const struct ssam_device_id *match_table;
|
|
+
|
|
+ int (*probe)(struct ssam_device *sdev);
|
|
+ void (*remove)(struct ssam_device *sdev);
|
|
+};
|
|
+
|
|
+extern struct bus_type ssam_bus_type;
|
|
+extern const struct device_type ssam_device_type;
|
|
+
|
|
+/**
|
|
+ * is_ssam_device() - Check if the given device is a SSAM client device.
|
|
+ * @d: The device to test the type of.
|
|
+ *
|
|
+ * Return: Returns %true if the specified device is of type &struct
|
|
+ * ssam_device, i.e. the device type points to %ssam_device_type, and %false
|
|
+ * otherwise.
|
|
+ */
|
|
+static inline bool is_ssam_device(struct device *d)
|
|
+{
|
|
+ return d->type == &ssam_device_type;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * to_ssam_device() - Casts the given device to a SSAM client device.
|
|
+ * @d: The device to cast.
|
|
+ *
|
|
+ * Casts the given &struct device to a &struct ssam_device. The caller has to
|
|
+ * ensure that the given device is actually enclosed in a &struct ssam_device,
|
|
+ * e.g. by calling is_ssam_device().
|
|
+ *
|
|
+ * Return: Returns a pointer to the &struct ssam_device wrapping the given
|
|
+ * device @d.
|
|
+ */
|
|
+static inline struct ssam_device *to_ssam_device(struct device *d)
|
|
+{
|
|
+ return container_of(d, struct ssam_device, dev);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * to_ssam_device_driver() - Casts the given device driver to a SSAM client
|
|
+ * device driver.
|
|
+ * @d: The driver to cast.
|
|
+ *
|
|
+ * Casts the given &struct device_driver to a &struct ssam_device_driver. The
|
|
+ * caller has to ensure that the given driver is actually enclosed in a
|
|
+ * &struct ssam_device_driver.
|
|
+ *
|
|
+ * Return: Returns the pointer to the &struct ssam_device_driver wrapping the
|
|
+ * given device driver @d.
|
|
+ */
|
|
+static inline
|
|
+struct ssam_device_driver *to_ssam_device_driver(struct device_driver *d)
|
|
+{
|
|
+ return container_of(d, struct ssam_device_driver, driver);
|
|
+}
|
|
+
|
|
+const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table,
|
|
+ const struct ssam_device_uid uid);
|
|
+
|
|
+const struct ssam_device_id *ssam_device_get_match(const struct ssam_device *dev);
|
|
+
|
|
+const void *ssam_device_get_match_data(const struct ssam_device *dev);
|
|
+
|
|
+struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl,
|
|
+ struct ssam_device_uid uid);
|
|
+
|
|
+int ssam_device_add(struct ssam_device *sdev);
|
|
+void ssam_device_remove(struct ssam_device *sdev);
|
|
+
|
|
+/**
|
|
+ * ssam_device_get() - Increment reference count of SSAM client device.
|
|
+ * @sdev: The device to increment the reference count of.
|
|
+ *
|
|
+ * Increments the reference count of the given SSAM client device by
|
|
+ * incrementing the reference count of the enclosed &struct device via
|
|
+ * get_device().
|
|
+ *
|
|
+ * See ssam_device_put() for the counter-part of this function.
|
|
+ *
|
|
+ * Return: Returns the device provided as input.
|
|
+ */
|
|
+static inline struct ssam_device *ssam_device_get(struct ssam_device *sdev)
|
|
+{
|
|
+ return sdev ? to_ssam_device(get_device(&sdev->dev)) : NULL;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_device_put() - Decrement reference count of SSAM client device.
|
|
+ * @sdev: The device to decrement the reference count of.
|
|
+ *
|
|
+ * Decrements the reference count of the given SSAM client device by
|
|
+ * decrementing the reference count of the enclosed &struct device via
|
|
+ * put_device().
|
|
+ *
|
|
+ * See ssam_device_get() for the counter-part of this function.
|
|
+ */
|
|
+static inline void ssam_device_put(struct ssam_device *sdev)
|
|
+{
|
|
+ if (sdev)
|
|
+ put_device(&sdev->dev);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_device_get_drvdata() - Get driver-data of SSAM client device.
|
|
+ * @sdev: The device to get the driver-data from.
|
|
+ *
|
|
+ * Return: Returns the driver-data of the given device, previously set via
|
|
+ * ssam_device_set_drvdata().
|
|
+ */
|
|
+static inline void *ssam_device_get_drvdata(struct ssam_device *sdev)
|
|
+{
|
|
+ return dev_get_drvdata(&sdev->dev);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssam_device_set_drvdata() - Set driver-data of SSAM client device.
|
|
+ * @sdev: The device to set the driver-data of.
|
|
+ * @data: The data to set the device's driver-data pointer to.
|
|
+ */
|
|
+static inline void ssam_device_set_drvdata(struct ssam_device *sdev, void *data)
|
|
+{
|
|
+ dev_set_drvdata(&sdev->dev, data);
|
|
+}
|
|
+
|
|
+int __ssam_device_driver_register(struct ssam_device_driver *d, struct module *o);
|
|
+void ssam_device_driver_unregister(struct ssam_device_driver *d);
|
|
+
|
|
+/**
|
|
+ * ssam_device_driver_register() - Register a SSAM client device driver.
|
|
+ * @drv: The driver to register.
|
|
+ */
|
|
+#define ssam_device_driver_register(drv) \
|
|
+ __ssam_device_driver_register(drv, THIS_MODULE)
|
|
+
|
|
+/**
|
|
+ * module_ssam_device_driver() - Helper macro for SSAM device driver
|
|
+ * registration.
|
|
+ * @drv: The driver managed by this module.
|
|
+ *
|
|
+ * Helper macro to register a SSAM device driver via module_init() and
|
|
+ * module_exit(). This macro may only be used once per module and replaces the
|
|
+ * aforementioned definitions.
|
|
+ */
|
|
+#define module_ssam_device_driver(drv) \
|
|
+ module_driver(drv, ssam_device_driver_register, \
|
|
+ ssam_device_driver_unregister)
|
|
+
|
|
+
|
|
+/* -- Helpers for client-device requests. ----------------------------------- */
|
|
+
|
|
+/**
|
|
+ * SSAM_DEFINE_SYNC_REQUEST_CL_N() - Define synchronous client-device SAM
|
|
+ * request function with neither argument nor return value.
|
|
+ * @name: Name of the generated function.
|
|
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
|
|
+ *
|
|
+ * Defines a function executing the synchronous SAM request specified by
|
|
+ * @spec, with the request having neither argument nor return value. Device
|
|
+ * specifying parameters are not hard-coded, but instead are provided via the
|
|
+ * client device, specifically its UID, supplied when calling this function.
|
|
+ * The generated function takes care of setting up the request struct, buffer
|
|
+ * allocation, as well as execution of the request itself, returning once the
|
|
+ * request has been fully completed. The required transport buffer will be
|
|
+ * allocated on the stack.
|
|
+ *
|
|
+ * The generated function is defined as ``int name(struct ssam_device *sdev)``,
|
|
+ * returning the status of the request, which is zero on success and negative
|
|
+ * on failure. The ``sdev`` parameter specifies both the target device of the
|
|
+ * request and by association the controller via which the request is sent.
|
|
+ *
|
|
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
|
|
+ * the generated function.
|
|
+ */
|
|
+#define SSAM_DEFINE_SYNC_REQUEST_CL_N(name, spec...) \
|
|
+ SSAM_DEFINE_SYNC_REQUEST_MD_N(__raw_##name, spec) \
|
|
+ int name(struct ssam_device *sdev) \
|
|
+ { \
|
|
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
|
|
+ sdev->uid.instance); \
|
|
+ }
|
|
+
|
|
+/**
|
|
+ * SSAM_DEFINE_SYNC_REQUEST_CL_W() - Define synchronous client-device SAM
|
|
+ * request function with argument.
|
|
+ * @name: Name of the generated function.
|
|
+ * @atype: Type of the request's argument.
|
|
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
|
|
+ *
|
|
+ * Defines a function executing the synchronous SAM request specified by
|
|
+ * @spec, with the request taking an argument of type @atype and having no
|
|
+ * return value. Device specifying parameters are not hard-coded, but instead
|
|
+ * are provided via the client device, specifically its UID, supplied when
|
|
+ * calling this function. The generated function takes care of setting up the
|
|
+ * request struct, buffer allocation, as well as execution of the request
|
|
+ * itself, returning once the request has been fully completed. The required
|
|
+ * transport buffer will be allocated on the stack.
|
|
+ *
|
|
+ * The generated function is defined as ``int name(struct ssam_device *sdev,
|
|
+ * const atype *arg)``, returning the status of the request, which is zero on
|
|
+ * success and negative on failure. The ``sdev`` parameter specifies both the
|
|
+ * target device of the request and by association the controller via which
|
|
+ * the request is sent. The request's argument is specified via the ``arg``
|
|
+ * pointer.
|
|
+ *
|
|
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
|
|
+ * the generated function.
|
|
+ */
|
|
+#define SSAM_DEFINE_SYNC_REQUEST_CL_W(name, atype, spec...) \
|
|
+ SSAM_DEFINE_SYNC_REQUEST_MD_W(__raw_##name, atype, spec) \
|
|
+ int name(struct ssam_device *sdev, const atype *arg) \
|
|
+ { \
|
|
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
|
|
+ sdev->uid.instance, arg); \
|
|
+ }
|
|
+
|
|
+/**
|
|
+ * SSAM_DEFINE_SYNC_REQUEST_CL_R() - Define synchronous client-device SAM
|
|
+ * request function with return value.
|
|
+ * @name: Name of the generated function.
|
|
+ * @rtype: Type of the request's return value.
|
|
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
|
|
+ *
|
|
+ * Defines a function executing the synchronous SAM request specified by
|
|
+ * @spec, with the request taking no argument but having a return value of
|
|
+ * type @rtype. Device specifying parameters are not hard-coded, but instead
|
|
+ * are provided via the client device, specifically its UID, supplied when
|
|
+ * calling this function. The generated function takes care of setting up the
|
|
+ * request struct, buffer allocation, as well as execution of the request
|
|
+ * itself, returning once the request has been fully completed. The required
|
|
+ * transport buffer will be allocated on the stack.
|
|
+ *
|
|
+ * The generated function is defined as ``int name(struct ssam_device *sdev,
|
|
+ * rtype *ret)``, returning the status of the request, which is zero on
|
|
+ * success and negative on failure. The ``sdev`` parameter specifies both the
|
|
+ * target device of the request and by association the controller via which
|
|
+ * the request is sent. The request's return value is written to the memory
|
|
+ * pointed to by the ``ret`` parameter.
|
|
+ *
|
|
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
|
|
+ * the generated function.
|
|
+ */
|
|
+#define SSAM_DEFINE_SYNC_REQUEST_CL_R(name, rtype, spec...) \
|
|
+ SSAM_DEFINE_SYNC_REQUEST_MD_R(__raw_##name, rtype, spec) \
|
|
+ int name(struct ssam_device *sdev, rtype *ret) \
|
|
+ { \
|
|
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
|
|
+ sdev->uid.instance, ret); \
|
|
+ }
|
|
+
|
|
+#endif /* _LINUX_SURFACE_AGGREGATOR_DEVICE_H */
|
|
diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h
|
|
new file mode 100644
|
|
index 000000000000..f02d89168533
|
|
--- /dev/null
|
|
+++ b/include/linux/surface_aggregator/serial_hub.h
|
|
@@ -0,0 +1,668 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ */
|
|
+/*
|
|
+ * Surface Serial Hub (SSH) protocol and communication interface.
|
|
+ *
|
|
+ * Lower-level communication layers and SSH protocol definitions for the
|
|
+ * Surface System Aggregator Module (SSAM). Provides the interface for basic
|
|
+ * packet- and request-based communication with the SSAM EC via SSH.
|
|
+ *
|
|
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#ifndef _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
|
|
+#define _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
|
|
+
|
|
+#include <linux/crc-ccitt.h>
|
|
+#include <linux/kref.h>
|
|
+#include <linux/ktime.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+
|
|
+/* -- Data structures for SAM-over-SSH communication. ----------------------- */
|
|
+
|
|
+/**
|
|
+ * enum ssh_frame_type - Frame types for SSH frames.
|
|
+ *
|
|
+ * @SSH_FRAME_TYPE_DATA_SEQ:
|
|
+ * Indicates a data frame, followed by a payload with the length specified
|
|
+ * in the ``struct ssh_frame.len`` field. This frame is sequenced, meaning
|
|
+ * that an ACK is required.
|
|
+ *
|
|
+ * @SSH_FRAME_TYPE_DATA_NSQ:
|
|
+ * Same as %SSH_FRAME_TYPE_DATA_SEQ, but unsequenced, meaning that the
|
|
+ * message does not have to be ACKed.
|
|
+ *
|
|
+ * @SSH_FRAME_TYPE_ACK:
|
|
+ * Indicates an ACK message.
|
|
+ *
|
|
+ * @SSH_FRAME_TYPE_NAK:
|
|
+ * Indicates an error response for previously sent frame. In general, this
|
|
+ * means that the frame and/or payload is malformed, e.g. a CRC is wrong.
|
|
+ * For command-type payloads, this can also mean that the command is
|
|
+ * invalid.
|
|
+ */
|
|
+enum ssh_frame_type {
|
|
+ SSH_FRAME_TYPE_DATA_SEQ = 0x80,
|
|
+ SSH_FRAME_TYPE_DATA_NSQ = 0x00,
|
|
+ SSH_FRAME_TYPE_ACK = 0x40,
|
|
+ SSH_FRAME_TYPE_NAK = 0x04,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssh_frame - SSH communication frame.
|
|
+ * @type: The type of the frame. See &enum ssh_frame_type.
|
|
+ * @len: The length of the frame payload directly following the CRC for this
|
|
+ * frame. Does not include the final CRC for that payload.
|
|
+ * @seq: The sequence number for this message/exchange.
|
|
+ */
|
|
+struct ssh_frame {
|
|
+ u8 type;
|
|
+ __le16 len;
|
|
+ u8 seq;
|
|
+} __packed;
|
|
+
|
|
+/*
|
|
+ * SSH_FRAME_MAX_PAYLOAD_SIZE - Maximum SSH frame payload length in bytes.
|
|
+ *
|
|
+ * This is the physical maximum length of the protocol. Implementations may
|
|
+ * set a more constrained limit.
|
|
+ */
|
|
+#define SSH_FRAME_MAX_PAYLOAD_SIZE U16_MAX
|
|
+
|
|
+/**
|
|
+ * enum ssh_payload_type - Type indicator for the SSH payload.
|
|
+ * @SSH_PLD_TYPE_CMD: The payload is a command structure with optional command
|
|
+ * payload.
|
|
+ */
|
|
+enum ssh_payload_type {
|
|
+ SSH_PLD_TYPE_CMD = 0x80,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssh_command - Payload of a command-type frame.
|
|
+ * @type: The type of the payload. See &enum ssh_payload_type. Should be
|
|
+ * SSH_PLD_TYPE_CMD for this struct.
|
|
+ * @tc: Command target category.
|
|
+ * @tid_out: Output target ID. Should be zero if this an incoming (EC to host)
|
|
+ * message.
|
|
+ * @tid_in: Input target ID. Should be zero if this is an outgoing (host to
|
|
+ * EC) message.
|
|
+ * @iid: Instance ID.
|
|
+ * @rqid: Request ID. Used to match requests with responses and differentiate
|
|
+ * between responses and events.
|
|
+ * @cid: Command ID.
|
|
+ */
|
|
+struct ssh_command {
|
|
+ u8 type;
|
|
+ u8 tc;
|
|
+ u8 tid_out;
|
|
+ u8 tid_in;
|
|
+ u8 iid;
|
|
+ __le16 rqid;
|
|
+ u8 cid;
|
|
+} __packed;
|
|
+
|
|
+/*
|
|
+ * SSH_COMMAND_MAX_PAYLOAD_SIZE - Maximum SSH command payload length in bytes.
|
|
+ *
|
|
+ * This is the physical maximum length of the protocol. Implementations may
|
|
+ * set a more constrained limit.
|
|
+ */
|
|
+#define SSH_COMMAND_MAX_PAYLOAD_SIZE \
|
|
+ (SSH_FRAME_MAX_PAYLOAD_SIZE - sizeof(struct ssh_command))
|
|
+
|
|
+/*
|
|
+ * SSH_MSG_LEN_BASE - Base-length of a SSH message.
|
|
+ *
|
|
+ * This is the minimum number of bytes required to form a message. The actual
|
|
+ * message length is SSH_MSG_LEN_BASE plus the length of the frame payload.
|
|
+ */
|
|
+#define SSH_MSG_LEN_BASE (sizeof(struct ssh_frame) + 3ull * sizeof(u16))
|
|
+
|
|
+/*
|
|
+ * SSH_MSG_LEN_CTRL - Length of a SSH control message.
|
|
+ *
|
|
+ * This is the length of a SSH control message, which is equal to a SSH
|
|
+ * message without any payload.
|
|
+ */
|
|
+#define SSH_MSG_LEN_CTRL SSH_MSG_LEN_BASE
|
|
+
|
|
+/**
|
|
+ * SSH_MESSAGE_LENGTH() - Compute length of SSH message.
|
|
+ * @payload_size: Length of the payload inside the SSH frame.
|
|
+ *
|
|
+ * Return: Returns the length of a SSH message with payload of specified size.
|
|
+ */
|
|
+#define SSH_MESSAGE_LENGTH(payload_size) (SSH_MSG_LEN_BASE + (payload_size))
|
|
+
|
|
+/**
|
|
+ * SSH_COMMAND_MESSAGE_LENGTH() - Compute length of SSH command message.
|
|
+ * @payload_size: Length of the command payload.
|
|
+ *
|
|
+ * Return: Returns the length of a SSH command message with command payload of
|
|
+ * specified size.
|
|
+ */
|
|
+#define SSH_COMMAND_MESSAGE_LENGTH(payload_size) \
|
|
+ SSH_MESSAGE_LENGTH(sizeof(struct ssh_command) + (payload_size))
|
|
+
|
|
+/**
|
|
+ * SSH_MSGOFFSET_FRAME() - Compute offset in SSH message to specified field in
|
|
+ * frame.
|
|
+ * @field: The field for which the offset should be computed.
|
|
+ *
|
|
+ * Return: Returns the offset of the specified &struct ssh_frame field in the
|
|
+ * raw SSH message data as. Takes SYN bytes (u16) preceding the frame into
|
|
+ * account.
|
|
+ */
|
|
+#define SSH_MSGOFFSET_FRAME(field) \
|
|
+ (sizeof(u16) + offsetof(struct ssh_frame, field))
|
|
+
|
|
+/**
|
|
+ * SSH_MSGOFFSET_COMMAND() - Compute offset in SSH message to specified field
|
|
+ * in command.
|
|
+ * @field: The field for which the offset should be computed.
|
|
+ *
|
|
+ * Return: Returns the offset of the specified &struct ssh_command field in
|
|
+ * the raw SSH message data. Takes SYN bytes (u16) preceding the frame and the
|
|
+ * frame CRC (u16) between frame and command into account.
|
|
+ */
|
|
+#define SSH_MSGOFFSET_COMMAND(field) \
|
|
+ (2ull * sizeof(u16) + sizeof(struct ssh_frame) \
|
|
+ + offsetof(struct ssh_command, field))
|
|
+
|
|
+/*
|
|
+ * SSH_MSG_SYN - SSH message synchronization (SYN) bytes as u16.
|
|
+ */
|
|
+#define SSH_MSG_SYN ((u16)0x55aa)
|
|
+
|
|
+/**
|
|
+ * ssh_crc() - Compute CRC for SSH messages.
|
|
+ * @buf: The pointer pointing to the data for which the CRC should be computed.
|
|
+ * @len: The length of the data for which the CRC should be computed.
|
|
+ *
|
|
+ * Return: Returns the CRC computed on the provided data, as used for SSH
|
|
+ * messages.
|
|
+ */
|
|
+static inline u16 ssh_crc(const u8 *buf, size_t len)
|
|
+{
|
|
+ return crc_ccitt_false(0xffff, buf, len);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * SSH_NUM_EVENTS - The number of reserved event IDs.
|
|
+ *
|
|
+ * The number of reserved event IDs, used for registering an SSH event
|
|
+ * handler. Valid event IDs are numbers below or equal to this value, with
|
|
+ * exception of zero, which is not an event ID. Thus, this is also the
|
|
+ * absolute maximum number of event handlers that can be registered.
|
|
+ */
|
|
+#define SSH_NUM_EVENTS 34
|
|
+
|
|
+/*
|
|
+ * SSH_NUM_TARGETS - The number of communication targets used in the protocol.
|
|
+ */
|
|
+#define SSH_NUM_TARGETS 2
|
|
+
|
|
+/**
|
|
+ * ssh_rqid_next_valid() - Return the next valid request ID.
|
|
+ * @rqid: The current request ID.
|
|
+ *
|
|
+ * Return: Returns the next valid request ID, following the current request ID
|
|
+ * provided to this function. This function skips any request IDs reserved for
|
|
+ * events.
|
|
+ */
|
|
+static inline u16 ssh_rqid_next_valid(u16 rqid)
|
|
+{
|
|
+ return rqid > 0 ? rqid + 1u : rqid + SSH_NUM_EVENTS + 1u;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_rqid_to_event() - Convert request ID to its corresponding event ID.
|
|
+ * @rqid: The request ID to convert.
|
|
+ */
|
|
+static inline u16 ssh_rqid_to_event(u16 rqid)
|
|
+{
|
|
+ return rqid - 1u;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_rqid_is_event() - Check if given request ID is a valid event ID.
|
|
+ * @rqid: The request ID to check.
|
|
+ */
|
|
+static inline bool ssh_rqid_is_event(u16 rqid)
|
|
+{
|
|
+ return ssh_rqid_to_event(rqid) < SSH_NUM_EVENTS;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_tc_to_rqid() - Convert target category to its corresponding request ID.
|
|
+ * @tc: The target category to convert.
|
|
+ */
|
|
+static inline u16 ssh_tc_to_rqid(u8 tc)
|
|
+{
|
|
+ return tc;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_tid_to_index() - Convert target ID to its corresponding target index.
|
|
+ * @tid: The target ID to convert.
|
|
+ */
|
|
+static inline u8 ssh_tid_to_index(u8 tid)
|
|
+{
|
|
+ return tid - 1u;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_tid_is_valid() - Check if target ID is valid/supported.
|
|
+ * @tid: The target ID to check.
|
|
+ */
|
|
+static inline bool ssh_tid_is_valid(u8 tid)
|
|
+{
|
|
+ return ssh_tid_to_index(tid) < SSH_NUM_TARGETS;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * struct ssam_span - Reference to a buffer region.
|
|
+ * @ptr: Pointer to the buffer region.
|
|
+ * @len: Length of the buffer region.
|
|
+ *
|
|
+ * A reference to a (non-owned) buffer segment, consisting of pointer and
|
|
+ * length. Use of this struct indicates non-owned data, i.e. data of which the
|
|
+ * life-time is managed (i.e. it is allocated/freed) via another pointer.
|
|
+ */
|
|
+struct ssam_span {
|
|
+ u8 *ptr;
|
|
+ size_t len;
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Known SSH/EC target categories.
|
|
+ *
|
|
+ * List of currently known target category values; "Known" as in we know they
|
|
+ * exist and are valid on at least some device/model. Detailed functionality
|
|
+ * or the full category name is only known for some of these categories and
|
|
+ * is detailed in the respective comment below.
|
|
+ *
|
|
+ * These values and abbreviations have been extracted from strings inside the
|
|
+ * Windows driver.
|
|
+ */
|
|
+enum ssam_ssh_tc {
|
|
+ /* Category 0x00 is invalid for EC use. */
|
|
+ SSAM_SSH_TC_SAM = 0x01, /* Generic system functionality, real-time clock. */
|
|
+ SSAM_SSH_TC_BAT = 0x02, /* Battery/power subsystem. */
|
|
+ SSAM_SSH_TC_TMP = 0x03, /* Thermal subsystem. */
|
|
+ SSAM_SSH_TC_PMC = 0x04,
|
|
+ SSAM_SSH_TC_FAN = 0x05,
|
|
+ SSAM_SSH_TC_PoM = 0x06,
|
|
+ SSAM_SSH_TC_DBG = 0x07,
|
|
+ SSAM_SSH_TC_KBD = 0x08, /* Legacy keyboard (Laptop 1/2). */
|
|
+ SSAM_SSH_TC_FWU = 0x09,
|
|
+ SSAM_SSH_TC_UNI = 0x0a,
|
|
+ SSAM_SSH_TC_LPC = 0x0b,
|
|
+ SSAM_SSH_TC_TCL = 0x0c,
|
|
+ SSAM_SSH_TC_SFL = 0x0d,
|
|
+ SSAM_SSH_TC_KIP = 0x0e,
|
|
+ SSAM_SSH_TC_EXT = 0x0f,
|
|
+ SSAM_SSH_TC_BLD = 0x10,
|
|
+ SSAM_SSH_TC_BAS = 0x11, /* Detachment system (Surface Book 2/3). */
|
|
+ SSAM_SSH_TC_SEN = 0x12,
|
|
+ SSAM_SSH_TC_SRQ = 0x13,
|
|
+ SSAM_SSH_TC_MCU = 0x14,
|
|
+ SSAM_SSH_TC_HID = 0x15, /* Generic HID input subsystem. */
|
|
+ SSAM_SSH_TC_TCH = 0x16,
|
|
+ SSAM_SSH_TC_BKL = 0x17,
|
|
+ SSAM_SSH_TC_TAM = 0x18,
|
|
+ SSAM_SSH_TC_ACC = 0x19,
|
|
+ SSAM_SSH_TC_UFI = 0x1a,
|
|
+ SSAM_SSH_TC_USC = 0x1b,
|
|
+ SSAM_SSH_TC_PEN = 0x1c,
|
|
+ SSAM_SSH_TC_VID = 0x1d,
|
|
+ SSAM_SSH_TC_AUD = 0x1e,
|
|
+ SSAM_SSH_TC_SMC = 0x1f,
|
|
+ SSAM_SSH_TC_KPD = 0x20,
|
|
+ SSAM_SSH_TC_REG = 0x21, /* Extended event registry. */
|
|
+};
|
|
+
|
|
+
|
|
+/* -- Packet transport layer (ptl). ----------------------------------------- */
|
|
+
|
|
+/**
|
|
+ * enum ssh_packet_base_priority - Base priorities for &struct ssh_packet.
|
|
+ * @SSH_PACKET_PRIORITY_FLUSH: Base priority for flush packets.
|
|
+ * @SSH_PACKET_PRIORITY_DATA: Base priority for normal data packets.
|
|
+ * @SSH_PACKET_PRIORITY_NAK: Base priority for NAK packets.
|
|
+ * @SSH_PACKET_PRIORITY_ACK: Base priority for ACK packets.
|
|
+ */
|
|
+enum ssh_packet_base_priority {
|
|
+ SSH_PACKET_PRIORITY_FLUSH = 0, /* same as DATA to sequence flush */
|
|
+ SSH_PACKET_PRIORITY_DATA = 0,
|
|
+ SSH_PACKET_PRIORITY_NAK = 1,
|
|
+ SSH_PACKET_PRIORITY_ACK = 2,
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Same as SSH_PACKET_PRIORITY() below, only with actual values.
|
|
+ */
|
|
+#define __SSH_PACKET_PRIORITY(base, try) \
|
|
+ (((base) << 4) | ((try) & 0x0f))
|
|
+
|
|
+/**
|
|
+ * SSH_PACKET_PRIORITY() - Compute packet priority from base priority and
|
|
+ * number of tries.
|
|
+ * @base: The base priority as suffix of &enum ssh_packet_base_priority, e.g.
|
|
+ * ``FLUSH``, ``DATA``, ``ACK``, or ``NAK``.
|
|
+ * @try: The number of tries (must be less than 16).
|
|
+ *
|
|
+ * Compute the combined packet priority. The combined priority is dominated by
|
|
+ * the base priority, whereas the number of (re-)tries decides the precedence
|
|
+ * of packets with the same base priority, giving higher priority to packets
|
|
+ * that already have more tries.
|
|
+ *
|
|
+ * Return: Returns the computed priority as value fitting inside a &u8. A
|
|
+ * higher number means a higher priority.
|
|
+ */
|
|
+#define SSH_PACKET_PRIORITY(base, try) \
|
|
+ __SSH_PACKET_PRIORITY(SSH_PACKET_PRIORITY_##base, (try))
|
|
+
|
|
+/**
|
|
+ * ssh_packet_priority_get_try() - Get number of tries from packet priority.
|
|
+ * @priority: The packet priority.
|
|
+ *
|
|
+ * Return: Returns the number of tries encoded in the specified packet
|
|
+ * priority.
|
|
+ */
|
|
+static inline u8 ssh_packet_priority_get_try(u8 priority)
|
|
+{
|
|
+ return priority & 0x0f;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_packet_priority_get_base - Get base priority from packet priority.
|
|
+ * @priority: The packet priority.
|
|
+ *
|
|
+ * Return: Returns the base priority encoded in the given packet priority.
|
|
+ */
|
|
+static inline u8 ssh_packet_priority_get_base(u8 priority)
|
|
+{
|
|
+ return (priority & 0xf0) >> 4;
|
|
+}
|
|
+
|
|
+enum ssh_packet_flags {
|
|
+ /* state flags */
|
|
+ SSH_PACKET_SF_LOCKED_BIT,
|
|
+ SSH_PACKET_SF_QUEUED_BIT,
|
|
+ SSH_PACKET_SF_PENDING_BIT,
|
|
+ SSH_PACKET_SF_TRANSMITTING_BIT,
|
|
+ SSH_PACKET_SF_TRANSMITTED_BIT,
|
|
+ SSH_PACKET_SF_ACKED_BIT,
|
|
+ SSH_PACKET_SF_CANCELED_BIT,
|
|
+ SSH_PACKET_SF_COMPLETED_BIT,
|
|
+
|
|
+ /* type flags */
|
|
+ SSH_PACKET_TY_FLUSH_BIT,
|
|
+ SSH_PACKET_TY_SEQUENCED_BIT,
|
|
+ SSH_PACKET_TY_BLOCKING_BIT,
|
|
+
|
|
+ /* mask for state flags */
|
|
+ SSH_PACKET_FLAGS_SF_MASK =
|
|
+ BIT(SSH_PACKET_SF_LOCKED_BIT)
|
|
+ | BIT(SSH_PACKET_SF_QUEUED_BIT)
|
|
+ | BIT(SSH_PACKET_SF_PENDING_BIT)
|
|
+ | BIT(SSH_PACKET_SF_TRANSMITTING_BIT)
|
|
+ | BIT(SSH_PACKET_SF_TRANSMITTED_BIT)
|
|
+ | BIT(SSH_PACKET_SF_ACKED_BIT)
|
|
+ | BIT(SSH_PACKET_SF_CANCELED_BIT)
|
|
+ | BIT(SSH_PACKET_SF_COMPLETED_BIT),
|
|
+
|
|
+ /* mask for type flags */
|
|
+ SSH_PACKET_FLAGS_TY_MASK =
|
|
+ BIT(SSH_PACKET_TY_FLUSH_BIT)
|
|
+ | BIT(SSH_PACKET_TY_SEQUENCED_BIT)
|
|
+ | BIT(SSH_PACKET_TY_BLOCKING_BIT),
|
|
+};
|
|
+
|
|
+struct ssh_ptl;
|
|
+struct ssh_packet;
|
|
+
|
|
+/**
|
|
+ * struct ssh_packet_ops - Callback operations for a SSH packet.
|
|
+ * @release: Function called when the packet reference count reaches zero.
|
|
+ * This callback must be relied upon to ensure that the packet has
|
|
+ * left the transport system(s).
|
|
+ * @complete: Function called when the packet is completed, either with
|
|
+ * success or failure. In case of failure, the reason for the
|
|
+ * failure is indicated by the value of the provided status code
|
|
+ * argument. This value will be zero in case of success. Note that
|
|
+ * a call to this callback does not guarantee that the packet is
|
|
+ * not in use by the transport system any more.
|
|
+ */
|
|
+struct ssh_packet_ops {
|
|
+ void (*release)(struct ssh_packet *p);
|
|
+ void (*complete)(struct ssh_packet *p, int status);
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssh_packet - SSH transport packet.
|
|
+ * @ptl: Pointer to the packet transport layer. May be %NULL if the packet
|
|
+ * (or enclosing request) has not been submitted yet.
|
|
+ * @refcnt: Reference count of the packet.
|
|
+ * @priority: Priority of the packet. Must be computed via
|
|
+ * SSH_PACKET_PRIORITY(). Must only be accessed while holding the
|
|
+ * queue lock after first submission.
|
|
+ * @data: Raw message data.
|
|
+ * @data.len: Length of the raw message data.
|
|
+ * @data.ptr: Pointer to the raw message data buffer.
|
|
+ * @state: State and type flags describing current packet state (dynamic)
|
|
+ * and type (static). See &enum ssh_packet_flags for possible
|
|
+ * options.
|
|
+ * @timestamp: Timestamp specifying when the latest transmission of a
|
|
+ * currently pending packet has been started. May be %KTIME_MAX
|
|
+ * before or in-between transmission attempts. Used for the packet
|
|
+ * timeout implementation. Must only be accessed while holding the
|
|
+ * pending lock after first submission.
|
|
+ * @queue_node: The list node for the packet queue.
|
|
+ * @pending_node: The list node for the set of pending packets.
|
|
+ * @ops: Packet operations.
|
|
+ */
|
|
+struct ssh_packet {
|
|
+ struct ssh_ptl *ptl;
|
|
+ struct kref refcnt;
|
|
+
|
|
+ u8 priority;
|
|
+
|
|
+ struct {
|
|
+ size_t len;
|
|
+ u8 *ptr;
|
|
+ } data;
|
|
+
|
|
+ unsigned long state;
|
|
+ ktime_t timestamp;
|
|
+
|
|
+ struct list_head queue_node;
|
|
+ struct list_head pending_node;
|
|
+
|
|
+ const struct ssh_packet_ops *ops;
|
|
+};
|
|
+
|
|
+struct ssh_packet *ssh_packet_get(struct ssh_packet *p);
|
|
+void ssh_packet_put(struct ssh_packet *p);
|
|
+
|
|
+/**
|
|
+ * ssh_packet_set_data() - Set raw message data of packet.
|
|
+ * @p: The packet for which the message data should be set.
|
|
+ * @ptr: Pointer to the memory holding the message data.
|
|
+ * @len: Length of the message data.
|
|
+ *
|
|
+ * Sets the raw message data buffer of the packet to the provided memory. The
|
|
+ * memory is not copied. Instead, the caller is responsible for management
|
|
+ * (i.e. allocation and deallocation) of the memory. The caller must ensure
|
|
+ * that the provided memory is valid and contains a valid SSH message,
|
|
+ * starting from the time of submission of the packet until the ``release``
|
|
+ * callback has been called. During this time, the memory may not be altered
|
|
+ * in any way.
|
|
+ */
|
|
+static inline void ssh_packet_set_data(struct ssh_packet *p, u8 *ptr, size_t len)
|
|
+{
|
|
+ p->data.ptr = ptr;
|
|
+ p->data.len = len;
|
|
+}
|
|
+
|
|
+
|
|
+/* -- Request transport layer (rtl). ---------------------------------------- */
|
|
+
|
|
+enum ssh_request_flags {
|
|
+ /* state flags */
|
|
+ SSH_REQUEST_SF_LOCKED_BIT,
|
|
+ SSH_REQUEST_SF_QUEUED_BIT,
|
|
+ SSH_REQUEST_SF_PENDING_BIT,
|
|
+ SSH_REQUEST_SF_TRANSMITTING_BIT,
|
|
+ SSH_REQUEST_SF_TRANSMITTED_BIT,
|
|
+ SSH_REQUEST_SF_RSPRCVD_BIT,
|
|
+ SSH_REQUEST_SF_CANCELED_BIT,
|
|
+ SSH_REQUEST_SF_COMPLETED_BIT,
|
|
+
|
|
+ /* type flags */
|
|
+ SSH_REQUEST_TY_FLUSH_BIT,
|
|
+ SSH_REQUEST_TY_HAS_RESPONSE_BIT,
|
|
+
|
|
+ /* mask for state flags */
|
|
+ SSH_REQUEST_FLAGS_SF_MASK =
|
|
+ BIT(SSH_REQUEST_SF_LOCKED_BIT)
|
|
+ | BIT(SSH_REQUEST_SF_QUEUED_BIT)
|
|
+ | BIT(SSH_REQUEST_SF_PENDING_BIT)
|
|
+ | BIT(SSH_REQUEST_SF_TRANSMITTING_BIT)
|
|
+ | BIT(SSH_REQUEST_SF_TRANSMITTED_BIT)
|
|
+ | BIT(SSH_REQUEST_SF_RSPRCVD_BIT)
|
|
+ | BIT(SSH_REQUEST_SF_CANCELED_BIT)
|
|
+ | BIT(SSH_REQUEST_SF_COMPLETED_BIT),
|
|
+
|
|
+ /* mask for type flags */
|
|
+ SSH_REQUEST_FLAGS_TY_MASK =
|
|
+ BIT(SSH_REQUEST_TY_FLUSH_BIT)
|
|
+ | BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT),
|
|
+};
|
|
+
|
|
+struct ssh_rtl;
|
|
+struct ssh_request;
|
|
+
|
|
+/**
|
|
+ * struct ssh_request_ops - Callback operations for a SSH request.
|
|
+ * @release: Function called when the request's reference count reaches zero.
|
|
+ * This callback must be relied upon to ensure that the request has
|
|
+ * left the transport systems (both, packet an request systems).
|
|
+ * @complete: Function called when the request is completed, either with
|
|
+ * success or failure. The command data for the request response
|
|
+ * is provided via the &struct ssh_command parameter (``cmd``),
|
|
+ * the command payload of the request response via the &struct
|
|
+ * ssh_span parameter (``data``).
|
|
+ *
|
|
+ * If the request does not have any response or has not been
|
|
+ * completed with success, both ``cmd`` and ``data`` parameters will
|
|
+ * be NULL. If the request response does not have any command
|
|
+ * payload, the ``data`` span will be an empty (zero-length) span.
|
|
+ *
|
|
+ * In case of failure, the reason for the failure is indicated by
|
|
+ * the value of the provided status code argument (``status``). This
|
|
+ * value will be zero in case of success and a regular errno
|
|
+ * otherwise.
|
|
+ *
|
|
+ * Note that a call to this callback does not guarantee that the
|
|
+ * request is not in use by the transport systems any more.
|
|
+ */
|
|
+struct ssh_request_ops {
|
|
+ void (*release)(struct ssh_request *rqst);
|
|
+ void (*complete)(struct ssh_request *rqst,
|
|
+ const struct ssh_command *cmd,
|
|
+ const struct ssam_span *data, int status);
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssh_request - SSH transport request.
|
|
+ * @packet: The underlying SSH transport packet.
|
|
+ * @node: List node for the request queue and pending set.
|
|
+ * @state: State and type flags describing current request state (dynamic)
|
|
+ * and type (static). See &enum ssh_request_flags for possible
|
|
+ * options.
|
|
+ * @timestamp: Timestamp specifying when we start waiting on the response of
|
|
+ * the request. This is set once the underlying packet has been
|
|
+ * completed and may be %KTIME_MAX before that, or when the request
|
|
+ * does not expect a response. Used for the request timeout
|
|
+ * implementation.
|
|
+ * @ops: Request Operations.
|
|
+ */
|
|
+struct ssh_request {
|
|
+ struct ssh_packet packet;
|
|
+ struct list_head node;
|
|
+
|
|
+ unsigned long state;
|
|
+ ktime_t timestamp;
|
|
+
|
|
+ const struct ssh_request_ops *ops;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * to_ssh_request() - Cast a SSH packet to its enclosing SSH request.
|
|
+ * @p: The packet to cast.
|
|
+ *
|
|
+ * Casts the given &struct ssh_packet to its enclosing &struct ssh_request.
|
|
+ * The caller is responsible for making sure that the packet is actually
|
|
+ * wrapped in a &struct ssh_request.
|
|
+ *
|
|
+ * Return: Returns the &struct ssh_request wrapping the provided packet.
|
|
+ */
|
|
+static inline struct ssh_request *to_ssh_request(struct ssh_packet *p)
|
|
+{
|
|
+ return container_of(p, struct ssh_request, packet);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_request_get() - Increment reference count of request.
|
|
+ * @r: The request to increment the reference count of.
|
|
+ *
|
|
+ * Increments the reference count of the given request by incrementing the
|
|
+ * reference count of the underlying &struct ssh_packet, enclosed in it.
|
|
+ *
|
|
+ * See also ssh_request_put(), ssh_packet_get().
|
|
+ *
|
|
+ * Return: Returns the request provided as input.
|
|
+ */
|
|
+static inline struct ssh_request *ssh_request_get(struct ssh_request *r)
|
|
+{
|
|
+ return r ? to_ssh_request(ssh_packet_get(&r->packet)) : NULL;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_request_put() - Decrement reference count of request.
|
|
+ * @r: The request to decrement the reference count of.
|
|
+ *
|
|
+ * Decrements the reference count of the given request by decrementing the
|
|
+ * reference count of the underlying &struct ssh_packet, enclosed in it. If
|
|
+ * the reference count reaches zero, the ``release`` callback specified in the
|
|
+ * request's &struct ssh_request_ops, i.e. ``r->ops->release``, will be
|
|
+ * called.
|
|
+ *
|
|
+ * See also ssh_request_get(), ssh_packet_put().
|
|
+ */
|
|
+static inline void ssh_request_put(struct ssh_request *r)
|
|
+{
|
|
+ if (r)
|
|
+ ssh_packet_put(&r->packet);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * ssh_request_set_data() - Set raw message data of request.
|
|
+ * @r: The request for which the message data should be set.
|
|
+ * @ptr: Pointer to the memory holding the message data.
|
|
+ * @len: Length of the message data.
|
|
+ *
|
|
+ * Sets the raw message data buffer of the underlying packet to the specified
|
|
+ * buffer. Does not copy the actual message data, just sets the buffer pointer
|
|
+ * and length. Refer to ssh_packet_set_data() for more details.
|
|
+ */
|
|
+static inline void ssh_request_set_data(struct ssh_request *r, u8 *ptr, size_t len)
|
|
+{
|
|
+ ssh_packet_set_data(&r->packet, ptr, len);
|
|
+}
|
|
+
|
|
+#endif /* _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H */
|
|
diff --git a/include/uapi/linux/surface_aggregator/cdev.h b/include/uapi/linux/surface_aggregator/cdev.h
|
|
new file mode 100644
|
|
index 000000000000..08f46b60b151
|
|
--- /dev/null
|
|
+++ b/include/uapi/linux/surface_aggregator/cdev.h
|
|
@@ -0,0 +1,147 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
|
|
+/*
|
|
+ * Surface System Aggregator Module (SSAM) user-space EC interface.
|
|
+ *
|
|
+ * Definitions, structs, and IOCTLs for the /dev/surface/aggregator misc
|
|
+ * device. This device provides direct user-space access to the SSAM EC.
|
|
+ * Intended for debugging and development.
|
|
+ *
|
|
+ * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H
|
|
+#define _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H
|
|
+
|
|
+#include <linux/ioctl.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+/**
|
|
+ * enum ssam_cdev_request_flags - Request flags for SSAM cdev request IOCTL.
|
|
+ *
|
|
+ * @SSAM_CDEV_REQUEST_HAS_RESPONSE:
|
|
+ * Specifies that the request expects a response. If not set, the request
|
|
+ * will be directly completed after its underlying packet has been
|
|
+ * transmitted. If set, the request transport system waits for a response
|
|
+ * of the request.
|
|
+ *
|
|
+ * @SSAM_CDEV_REQUEST_UNSEQUENCED:
|
|
+ * Specifies that the request should be transmitted via an unsequenced
|
|
+ * packet. If set, the request must not have a response, meaning that this
|
|
+ * flag and the %SSAM_CDEV_REQUEST_HAS_RESPONSE flag are mutually
|
|
+ * exclusive.
|
|
+ */
|
|
+enum ssam_cdev_request_flags {
|
|
+ SSAM_CDEV_REQUEST_HAS_RESPONSE = 0x01,
|
|
+ SSAM_CDEV_REQUEST_UNSEQUENCED = 0x02,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct ssam_cdev_request - Controller request IOCTL argument.
|
|
+ * @target_category: Target category of the SAM request.
|
|
+ * @target_id: Target ID of the SAM request.
|
|
+ * @command_id: Command ID of the SAM request.
|
|
+ * @instance_id: Instance ID of the SAM request.
|
|
+ * @flags: Request flags (see &enum ssam_cdev_request_flags).
|
|
+ * @status: Request status (output).
|
|
+ * @payload: Request payload (input data).
|
|
+ * @payload.data: Pointer to request payload data.
|
|
+ * @payload.length: Length of request payload data (in bytes).
|
|
+ * @response: Request response (output data).
|
|
+ * @response.data: Pointer to response buffer.
|
|
+ * @response.length: On input: Capacity of response buffer (in bytes).
|
|
+ * On output: Length of request response (number of bytes
|
|
+ * in the buffer that are actually used).
|
|
+ */
|
|
+struct ssam_cdev_request {
|
|
+ __u8 target_category;
|
|
+ __u8 target_id;
|
|
+ __u8 command_id;
|
|
+ __u8 instance_id;
|
|
+ __u16 flags;
|
|
+ __s16 status;
|
|
+
|
|
+ struct {
|
|
+ __u64 data;
|
|
+ __u16 length;
|
|
+ __u8 __pad[6];
|
|
+ } payload;
|
|
+
|
|
+ struct {
|
|
+ __u64 data;
|
|
+ __u16 length;
|
|
+ __u8 __pad[6];
|
|
+ } response;
|
|
+} __attribute__((__packed__));
|
|
+
|
|
+/**
|
|
+ * struct ssam_cdev_notifier_desc - Notifier descriptor.
|
|
+ * @priority: Priority value determining the order in which notifier
|
|
+ * callbacks will be called. A higher value means higher
|
|
+ * priority, i.e. the associated callback will be executed
|
|
+ * earlier than other (lower priority) callbacks.
|
|
+ * @target_category: The event target category for which this notifier should
|
|
+ * receive events.
|
|
+ *
|
|
+ * Specifies the notifier that should be registered or unregistered,
|
|
+ * specifically with which priority and for which target category of events.
|
|
+ */
|
|
+struct ssam_cdev_notifier_desc {
|
|
+ __s32 priority;
|
|
+ __u8 target_category;
|
|
+} __attribute__((__packed__));
|
|
+
|
|
+/**
|
|
+ * struct ssam_cdev_event_desc - Event descriptor.
|
|
+ * @reg: Registry via which the event will be enabled/disabled.
|
|
+ * @reg.target_category: Target category for the event registry requests.
|
|
+ * @reg.target_id: Target ID for the event registry requests.
|
|
+ * @reg.cid_enable: Command ID for the event-enable request.
|
|
+ * @reg.cid_disable: Command ID for the event-disable request.
|
|
+ * @id: ID specifying the event.
|
|
+ * @id.target_category: Target category of the event source.
|
|
+ * @id.instance: Instance ID of the event source.
|
|
+ * @flags: Flags used for enabling the event.
|
|
+ *
|
|
+ * Specifies which event should be enabled/disabled and how to do that.
|
|
+ */
|
|
+struct ssam_cdev_event_desc {
|
|
+ struct {
|
|
+ __u8 target_category;
|
|
+ __u8 target_id;
|
|
+ __u8 cid_enable;
|
|
+ __u8 cid_disable;
|
|
+ } reg;
|
|
+
|
|
+ struct {
|
|
+ __u8 target_category;
|
|
+ __u8 instance;
|
|
+ } id;
|
|
+
|
|
+ __u8 flags;
|
|
+} __attribute__((__packed__));
|
|
+
|
|
+/**
|
|
+ * struct ssam_cdev_event - SSAM event sent by the EC.
|
|
+ * @target_category: Target category of the event source. See &enum ssam_ssh_tc.
|
|
+ * @target_id: Target ID of the event source.
|
|
+ * @command_id: Command ID of the event.
|
|
+ * @instance_id: Instance ID of the event source.
|
|
+ * @length: Length of the event payload in bytes.
|
|
+ * @data: Event payload data.
|
|
+ */
|
|
+struct ssam_cdev_event {
|
|
+ __u8 target_category;
|
|
+ __u8 target_id;
|
|
+ __u8 command_id;
|
|
+ __u8 instance_id;
|
|
+ __u16 length;
|
|
+ __u8 data[];
|
|
+} __attribute__((__packed__));
|
|
+
|
|
+#define SSAM_CDEV_REQUEST _IOWR(0xA5, 1, struct ssam_cdev_request)
|
|
+#define SSAM_CDEV_NOTIF_REGISTER _IOW(0xA5, 2, struct ssam_cdev_notifier_desc)
|
|
+#define SSAM_CDEV_NOTIF_UNREGISTER _IOW(0xA5, 3, struct ssam_cdev_notifier_desc)
|
|
+#define SSAM_CDEV_EVENT_ENABLE _IOW(0xA5, 4, struct ssam_cdev_event_desc)
|
|
+#define SSAM_CDEV_EVENT_DISABLE _IOW(0xA5, 5, struct ssam_cdev_event_desc)
|
|
+
|
|
+#endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_CDEV_H */
|
|
diff --git a/include/uapi/linux/surface_aggregator/dtx.h b/include/uapi/linux/surface_aggregator/dtx.h
|
|
new file mode 100644
|
|
index 000000000000..fc0ba6cbe3e8
|
|
--- /dev/null
|
|
+++ b/include/uapi/linux/surface_aggregator/dtx.h
|
|
@@ -0,0 +1,146 @@
|
|
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
|
|
+/*
|
|
+ * Surface DTX (clipboard detachment system driver) user-space interface.
|
|
+ *
|
|
+ * Definitions, structs, and IOCTLs for the /dev/surface/dtx misc device. This
|
|
+ * device allows user-space to control the clipboard detachment process on
|
|
+ * Surface Book series devices.
|
|
+ *
|
|
+ * Copyright (C) 2020 Maximilian Luz <luzmaximilian@gmail.com>
|
|
+ */
|
|
+
|
|
+#ifndef _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H
|
|
+#define _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H
|
|
+
|
|
+#include <linux/ioctl.h>
|
|
+#include <linux/types.h>
|
|
+
|
|
+/* Status/error categories */
|
|
+#define SDTX_CATEGORY_STATUS 0x0000
|
|
+#define SDTX_CATEGORY_RUNTIME_ERROR 0x1000
|
|
+#define SDTX_CATEGORY_HARDWARE_ERROR 0x2000
|
|
+#define SDTX_CATEGORY_UNKNOWN 0xf000
|
|
+
|
|
+#define SDTX_CATEGORY_MASK 0xf000
|
|
+#define SDTX_CATEGORY(value) ((value) & SDTX_CATEGORY_MASK)
|
|
+
|
|
+#define SDTX_STATUS(code) ((code) | SDTX_CATEGORY_STATUS)
|
|
+#define SDTX_ERR_RT(code) ((code) | SDTX_CATEGORY_RUNTIME_ERROR)
|
|
+#define SDTX_ERR_HW(code) ((code) | SDTX_CATEGORY_HARDWARE_ERROR)
|
|
+#define SDTX_UNKNOWN(code) ((code) | SDTX_CATEGORY_UNKNOWN)
|
|
+
|
|
+#define SDTX_SUCCESS(value) (SDTX_CATEGORY(value) == SDTX_CATEGORY_STATUS)
|
|
+
|
|
+/* Latch status values */
|
|
+#define SDTX_LATCH_CLOSED SDTX_STATUS(0x00)
|
|
+#define SDTX_LATCH_OPENED SDTX_STATUS(0x01)
|
|
+
|
|
+/* Base state values */
|
|
+#define SDTX_BASE_DETACHED SDTX_STATUS(0x00)
|
|
+#define SDTX_BASE_ATTACHED SDTX_STATUS(0x01)
|
|
+
|
|
+/* Runtime errors (non-critical) */
|
|
+#define SDTX_DETACH_NOT_FEASIBLE SDTX_ERR_RT(0x01)
|
|
+#define SDTX_DETACH_TIMEDOUT SDTX_ERR_RT(0x02)
|
|
+
|
|
+/* Hardware errors (critical) */
|
|
+#define SDTX_ERR_FAILED_TO_OPEN SDTX_ERR_HW(0x01)
|
|
+#define SDTX_ERR_FAILED_TO_REMAIN_OPEN SDTX_ERR_HW(0x02)
|
|
+#define SDTX_ERR_FAILED_TO_CLOSE SDTX_ERR_HW(0x03)
|
|
+
|
|
+/* Base types */
|
|
+#define SDTX_DEVICE_TYPE_HID 0x0100
|
|
+#define SDTX_DEVICE_TYPE_SSH 0x0200
|
|
+
|
|
+#define SDTX_DEVICE_TYPE_MASK 0x0f00
|
|
+#define SDTX_DEVICE_TYPE(value) ((value) & SDTX_DEVICE_TYPE_MASK)
|
|
+
|
|
+#define SDTX_BASE_TYPE_HID(id) ((id) | SDTX_DEVICE_TYPE_HID)
|
|
+#define SDTX_BASE_TYPE_SSH(id) ((id) | SDTX_DEVICE_TYPE_SSH)
|
|
+
|
|
+/**
|
|
+ * enum sdtx_device_mode - Mode describing how (and if) the clipboard is
|
|
+ * attached to the base of the device.
|
|
+ * @SDTX_DEVICE_MODE_TABLET: The clipboard is detached from the base and the
|
|
+ * device operates as tablet.
|
|
+ * @SDTX_DEVICE_MODE_LAPTOP: The clipboard is attached normally to the base
|
|
+ * and the device operates as laptop.
|
|
+ * @SDTX_DEVICE_MODE_STUDIO: The clipboard is attached to the base in reverse.
|
|
+ * The device operates as tablet with keyboard and
|
|
+ * touchpad deactivated, however, the base battery
|
|
+ * and, if present in the specific device model, dGPU
|
|
+ * are available to the system.
|
|
+ */
|
|
+enum sdtx_device_mode {
|
|
+ SDTX_DEVICE_MODE_TABLET = 0x00,
|
|
+ SDTX_DEVICE_MODE_LAPTOP = 0x01,
|
|
+ SDTX_DEVICE_MODE_STUDIO = 0x02,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct sdtx_event - Event provided by reading from the DTX device file.
|
|
+ * @length: Length of the event payload, in bytes.
|
|
+ * @code: Event code, detailing what type of event this is.
|
|
+ * @data: Payload of the event, containing @length bytes.
|
|
+ *
|
|
+ * See &enum sdtx_event_code for currently valid event codes.
|
|
+ */
|
|
+struct sdtx_event {
|
|
+ __u16 length;
|
|
+ __u16 code;
|
|
+ __u8 data[];
|
|
+} __attribute__((__packed__));
|
|
+
|
|
+/**
|
|
+ * enum sdtx_event_code - Code describing the type of an event.
|
|
+ * @SDTX_EVENT_REQUEST: Detachment request event type.
|
|
+ * @SDTX_EVENT_CANCEL: Cancel detachment process event type.
|
|
+ * @SDTX_EVENT_BASE_CONNECTION: Base/clipboard connection change event type.
|
|
+ * @SDTX_EVENT_LATCH_STATUS: Latch status change event type.
|
|
+ * @SDTX_EVENT_DEVICE_MODE: Device mode change event type.
|
|
+ *
|
|
+ * Used in @struct sdtx_event to describe the type of the event. Further event
|
|
+ * codes are reserved for future use. Any event parser should be able to
|
|
+ * gracefully handle unknown events, i.e. by simply skipping them.
|
|
+ *
|
|
+ * Consult the DTX user-space interface documentation for details regarding
|
|
+ * the individual event types.
|
|
+ */
|
|
+enum sdtx_event_code {
|
|
+ SDTX_EVENT_REQUEST = 1,
|
|
+ SDTX_EVENT_CANCEL = 2,
|
|
+ SDTX_EVENT_BASE_CONNECTION = 3,
|
|
+ SDTX_EVENT_LATCH_STATUS = 4,
|
|
+ SDTX_EVENT_DEVICE_MODE = 5,
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct sdtx_base_info - Describes if and what type of base is connected.
|
|
+ * @state: The state of the connection. Valid values are %SDTX_BASE_DETACHED,
|
|
+ * %SDTX_BASE_ATTACHED, and %SDTX_DETACH_NOT_FEASIBLE (in case a base
|
|
+ * is attached but low clipboard battery prevents detachment). Other
|
|
+ * values are currently reserved.
|
|
+ * @base_id: The type of base connected. Zero if no base is connected.
|
|
+ */
|
|
+struct sdtx_base_info {
|
|
+ __u16 state;
|
|
+ __u16 base_id;
|
|
+} __attribute__((__packed__));
|
|
+
|
|
+/* IOCTLs */
|
|
+#define SDTX_IOCTL_EVENTS_ENABLE _IO(0xa5, 0x21)
|
|
+#define SDTX_IOCTL_EVENTS_DISABLE _IO(0xa5, 0x22)
|
|
+
|
|
+#define SDTX_IOCTL_LATCH_LOCK _IO(0xa5, 0x23)
|
|
+#define SDTX_IOCTL_LATCH_UNLOCK _IO(0xa5, 0x24)
|
|
+
|
|
+#define SDTX_IOCTL_LATCH_REQUEST _IO(0xa5, 0x25)
|
|
+#define SDTX_IOCTL_LATCH_CONFIRM _IO(0xa5, 0x26)
|
|
+#define SDTX_IOCTL_LATCH_HEARTBEAT _IO(0xa5, 0x27)
|
|
+#define SDTX_IOCTL_LATCH_CANCEL _IO(0xa5, 0x28)
|
|
+
|
|
+#define SDTX_IOCTL_GET_BASE_INFO _IOR(0xa5, 0x29, struct sdtx_base_info)
|
|
+#define SDTX_IOCTL_GET_DEVICE_MODE _IOR(0xa5, 0x2a, __u16)
|
|
+#define SDTX_IOCTL_GET_LATCH_STATUS _IOR(0xa5, 0x2b, __u16)
|
|
+
|
|
+#endif /* _UAPI_LINUX_SURFACE_AGGREGATOR_DTX_H */
|
|
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
|
|
index 13acbf55c6fd..6a319852083e 100644
|
|
--- a/scripts/mod/devicetable-offsets.c
|
|
+++ b/scripts/mod/devicetable-offsets.c
|
|
@@ -227,8 +227,9 @@ int main(void)
|
|
|
|
DEVID(ssam_device_id);
|
|
DEVID_FIELD(ssam_device_id, match_flags);
|
|
+ DEVID_FIELD(ssam_device_id, domain);
|
|
DEVID_FIELD(ssam_device_id, category);
|
|
- DEVID_FIELD(ssam_device_id, channel);
|
|
+ DEVID_FIELD(ssam_device_id, target);
|
|
DEVID_FIELD(ssam_device_id, instance);
|
|
DEVID_FIELD(ssam_device_id, function);
|
|
|
|
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
|
|
index 76e3b1d7db45..f171616ab318 100644
|
|
--- a/scripts/mod/file2alias.c
|
|
+++ b/scripts/mod/file2alias.c
|
|
@@ -1276,20 +1276,22 @@ static int do_typec_entry(const char *filename, void *symval, char *alias)
|
|
return 1;
|
|
}
|
|
|
|
-/* Looks like: ssam:cNtNiNfN
|
|
+/*
|
|
+ * Looks like: ssam:dNcNtNiNfN
|
|
*
|
|
* N is exactly 2 digits, where each is an upper-case hex digit.
|
|
*/
|
|
static int do_ssam_entry(const char *filename, void *symval, char *alias)
|
|
{
|
|
DEF_FIELD(symval, ssam_device_id, match_flags);
|
|
+ DEF_FIELD(symval, ssam_device_id, domain);
|
|
DEF_FIELD(symval, ssam_device_id, category);
|
|
- DEF_FIELD(symval, ssam_device_id, channel);
|
|
+ DEF_FIELD(symval, ssam_device_id, target);
|
|
DEF_FIELD(symval, ssam_device_id, instance);
|
|
DEF_FIELD(symval, ssam_device_id, function);
|
|
|
|
- sprintf(alias, "ssam:c%02X", category);
|
|
- ADD(alias, "t", match_flags & SSAM_MATCH_CHANNEL, channel);
|
|
+ sprintf(alias, "ssam:d%02Xc%02X", domain, category);
|
|
+ ADD(alias, "t", match_flags & SSAM_MATCH_TARGET, target);
|
|
ADD(alias, "i", match_flags & SSAM_MATCH_INSTANCE, instance);
|
|
ADD(alias, "f", match_flags & SSAM_MATCH_FUNCTION, function);
|
|
|
|
--
|
|
2.33.0
|
|
|