diff --git a/patches/4.19/0001-surface3-power.patch b/patches/4.19/0001-surface3-power.patch index 20048bd1f..8695570c2 100644 --- a/patches/4.19/0001-surface3-power.patch +++ b/patches/4.19/0001-surface3-power.patch @@ -1,7 +1,7 @@ -From a2b33dd39478cc3c162c3e76a2a0bd888be6b019 Mon Sep 17 00:00:00 2001 +From 0cdf7b25f3375ca695c9622052aea85b5d62aa26 Mon Sep 17 00:00:00 2001 From: Maximilian Luz Date: Sat, 28 Sep 2019 18:00:43 +0200 -Subject: [PATCH 01/10] surface3-power +Subject: [PATCH 1/8] surface3-power --- drivers/platform/x86/Kconfig | 7 + @@ -11,7 +11,7 @@ Subject: [PATCH 01/10] surface3-power create mode 100644 drivers/platform/x86/surface3_power.c diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig -index 1e2524de6a63..2ad19dc64a4a 100644 +index 1e2524de6a63c..2ad19dc64a4af 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1160,6 +1160,13 @@ config SURFACE_3_BUTTON @@ -29,7 +29,7 @@ index 1e2524de6a63..2ad19dc64a4a 100644 tristate "Intel P-Unit IPC Driver" ---help--- diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile -index dc29af4d8e2f..2ea90039a3e4 100644 +index dc29af4d8e2fa..2ea90039a3e49 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -81,6 +81,7 @@ obj-$(CONFIG_INTEL_PMC_IPC) += intel_pmc_ipc.o @@ -42,7 +42,7 @@ index dc29af4d8e2f..2ea90039a3e4 100644 obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \ diff --git a/drivers/platform/x86/surface3_power.c b/drivers/platform/x86/surface3_power.c new file mode 100644 -index 000000000000..e0af01a60302 +index 0000000000000..e0af01a603025 --- /dev/null +++ b/drivers/platform/x86/surface3_power.c @@ -0,0 +1,604 @@ @@ -651,5 +651,5 @@ index 000000000000..e0af01a60302 +MODULE_DESCRIPTION("mshw0011 driver"); +MODULE_LICENSE("GPL v2"); -- -2.26.2 +2.27.0 diff --git a/patches/4.19/0002-surface3-spi.patch b/patches/4.19/0002-surface3-spi.patch index e8313795c..8e41f3a37 100644 --- a/patches/4.19/0002-surface3-spi.patch +++ b/patches/4.19/0002-surface3-spi.patch @@ -1,14 +1,14 @@ -From 06e4d51c83a77d54ca5b1d402fb57554c51d3926 Mon Sep 17 00:00:00 2001 +From 269aabb2d471ff68167a0d515a1fb7d96b6b0a1b Mon Sep 17 00:00:00 2001 From: kitakar5525 <34676735+kitakar5525@users.noreply.github.com> Date: Fri, 6 Dec 2019 23:10:30 +0900 -Subject: [PATCH 02/10] surface3-spi +Subject: [PATCH 2/8] surface3-spi --- drivers/input/touchscreen/surface3_spi.c | 26 ++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/input/touchscreen/surface3_spi.c b/drivers/input/touchscreen/surface3_spi.c -index 5db0f1c4ef38..8935ddbc2357 100644 +index 5db0f1c4ef384..8935ddbc23574 100644 --- a/drivers/input/touchscreen/surface3_spi.c +++ b/drivers/input/touchscreen/surface3_spi.c @@ -29,6 +29,12 @@ @@ -59,5 +59,5 @@ index 5db0f1c4ef38..8935ddbc2357 100644 } -- -2.26.2 +2.27.0 diff --git a/patches/4.19/0003-surface3-oemb.patch b/patches/4.19/0003-surface3-oemb.patch index 3c72bbec4..bc2f065ac 100644 --- a/patches/4.19/0003-surface3-oemb.patch +++ b/patches/4.19/0003-surface3-oemb.patch @@ -1,7 +1,7 @@ -From c9fd73ef014ee91d0cda46c2c456ae579ba3b4e0 Mon Sep 17 00:00:00 2001 +From 39843798645bf98c1176b18a12d090706bf63337 Mon Sep 17 00:00:00 2001 From: Chih-Wei Huang Date: Tue, 18 Sep 2018 11:01:37 +0800 -Subject: [PATCH 03/10] surface3-oemb +Subject: [PATCH 3/8] surface3-oemb --- drivers/platform/x86/surface3-wmi.c | 7 +++++++ @@ -10,7 +10,7 @@ Subject: [PATCH 03/10] surface3-oemb 3 files changed, 24 insertions(+) diff --git a/drivers/platform/x86/surface3-wmi.c b/drivers/platform/x86/surface3-wmi.c -index 25b176996cb7..58d11877677f 100644 +index 25b176996cb79..58d11877677f2 100644 --- a/drivers/platform/x86/surface3-wmi.c +++ b/drivers/platform/x86/surface3-wmi.c @@ -41,6 +41,13 @@ static const struct dmi_system_id surface3_dmi_table[] = { @@ -28,10 +28,10 @@ index 25b176996cb7..58d11877677f 100644 { } }; diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c -index 7e3b47eeea04..85c0731dfd4b 100644 +index 9185bd7c5a6dc..a514d03ae58fc 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c -@@ -3706,6 +3706,15 @@ static const struct dmi_system_id dmi_platform_data[] = { +@@ -3712,6 +3712,15 @@ static const struct dmi_system_id dmi_platform_data[] = { }, .driver_data = (void *)&intel_braswell_platform_data, }, @@ -48,7 +48,7 @@ index 7e3b47eeea04..85c0731dfd4b 100644 /* * Match for the GPDwin which unfortunately uses somewhat diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c -index 91bb99b69601..8418938b32ad 100644 +index 91bb99b69601d..8418938b32ad5 100644 --- a/sound/soc/intel/common/soc-acpi-intel-cht-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c @@ -36,6 +36,14 @@ static const struct dmi_system_id cht_table[] = { @@ -67,5 +67,5 @@ index 91bb99b69601..8418938b32ad 100644 }; -- -2.26.2 +2.27.0 diff --git a/patches/4.19/0004-surface-buttons.patch b/patches/4.19/0004-surface-buttons.patch index 6ed0d3744..5123e8287 100644 --- a/patches/4.19/0004-surface-buttons.patch +++ b/patches/4.19/0004-surface-buttons.patch @@ -1,7 +1,7 @@ -From 25c1108f70fde7cca28e5f851427108737443088 Mon Sep 17 00:00:00 2001 +From 36033252cb6c138adf31c22e9f80abd852598c9c Mon Sep 17 00:00:00 2001 From: Maximilian Luz Date: Sat, 27 Jul 2019 17:51:37 +0200 -Subject: [PATCH 04/10] surface-buttons +Subject: [PATCH 4/8] surface-buttons --- drivers/input/misc/Kconfig | 6 +- @@ -10,7 +10,7 @@ Subject: [PATCH 04/10] surface-buttons 3 files changed, 151 insertions(+), 16 deletions(-) diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig -index ca59a2be9bc5..ea69610370e8 100644 +index ca59a2be9bc53..ea69610370e84 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -781,10 +781,10 @@ config INPUT_IDEAPAD_SLIDEBAR @@ -28,7 +28,7 @@ index ca59a2be9bc5..ea69610370e8 100644 To compile this driver as a module, choose M here: the module will be called soc_button_array. diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c -index 55cd6e0b409c..c564ea99f47d 100644 +index 55cd6e0b409c3..c564ea99f47db 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -29,6 +29,11 @@ struct soc_button_info { @@ -200,7 +200,7 @@ index 55cd6e0b409c..c564ea99f47d 100644 }; diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c -index 1b491690ce07..96627627060e 100644 +index 1b491690ce070..96627627060e9 100644 --- a/drivers/platform/x86/surfacepro3_button.c +++ b/drivers/platform/x86/surfacepro3_button.c @@ -24,6 +24,12 @@ @@ -272,5 +272,5 @@ index 1b491690ce07..96627627060e 100644 if (!button) return -ENOMEM; -- -2.26.2 +2.27.0 diff --git a/patches/4.19/0005-surface-sam.patch b/patches/4.19/0005-surface-sam.patch index d1e9a8a8c..32de12e4f 100644 --- a/patches/4.19/0005-surface-sam.patch +++ b/patches/4.19/0005-surface-sam.patch @@ -1,29 +1,30 @@ -From 09f283be44830292c64b4af888e3ddc9fff46c11 Mon Sep 17 00:00:00 2001 +From dee763e9f3fe65a7448085c14f21dc961a1ca525 Mon Sep 17 00:00:00 2001 From: qzed Date: Mon, 26 Aug 2019 01:15:40 +0200 -Subject: [PATCH 05/10] surface-sam +Subject: [PATCH 5/8] surface-sam --- drivers/acpi/acpica/dsopcode.c | 2 +- drivers/acpi/acpica/exfield.c | 26 +- drivers/platform/x86/Kconfig | 2 + drivers/platform/x86/Makefile | 1 + - drivers/platform/x86/surface_sam/Kconfig | 164 ++ - drivers/platform/x86/surface_sam/Makefile | 10 + - .../x86/surface_sam/surface_sam_dtx.c | 604 ++++++ - .../x86/surface_sam/surface_sam_hps.c | 1110 +++++++++++ - .../x86/surface_sam/surface_sam_san.c | 883 +++++++++ + drivers/platform/x86/surface_sam/Kconfig | 164 + + drivers/platform/x86/surface_sam/Makefile | 15 + + .../x86/surface_sam/surface_sam_dtx.c | 590 ++ + .../x86/surface_sam/surface_sam_hps.c | 1281 +++++ + .../x86/surface_sam/surface_sam_san.c | 913 +++ .../x86/surface_sam/surface_sam_san.h | 30 + - .../x86/surface_sam/surface_sam_sid.c | 137 ++ - .../x86/surface_sam/surface_sam_sid_gpelid.c | 224 +++ - .../surface_sam/surface_sam_sid_perfmode.c | 216 ++ - .../x86/surface_sam/surface_sam_sid_power.c | 1264 ++++++++++++ - .../x86/surface_sam/surface_sam_sid_vhf.c | 428 ++++ - .../x86/surface_sam/surface_sam_ssh.c | 1744 +++++++++++++++++ - .../x86/surface_sam/surface_sam_ssh.h | 98 + - .../x86/surface_sam/surface_sam_vhf.c | 270 +++ + .../x86/surface_sam/surface_sam_sid.c | 147 + + .../x86/surface_sam/surface_sam_sid_gpelid.c | 224 + + .../surface_sam/surface_sam_sid_perfmode.c | 216 + + .../x86/surface_sam/surface_sam_sid_power.c | 1146 ++++ + .../x86/surface_sam/surface_sam_sid_vhf.c | 420 ++ + .../x86/surface_sam/surface_sam_ssh.c | 5115 +++++++++++++++++ + .../x86/surface_sam/surface_sam_ssh.h | 482 ++ + .../x86/surface_sam/surface_sam_ssh_trace.h | 536 ++ + .../x86/surface_sam/surface_sam_vhf.c | 261 + drivers/tty/serdev/core.c | 110 +- - 19 files changed, 7294 insertions(+), 29 deletions(-) + 20 files changed, 11652 insertions(+), 29 deletions(-) create mode 100644 drivers/platform/x86/surface_sam/Kconfig create mode 100644 drivers/platform/x86/surface_sam/Makefile create mode 100644 drivers/platform/x86/surface_sam/surface_sam_dtx.c @@ -37,10 +38,11 @@ Subject: [PATCH 05/10] surface-sam create mode 100644 drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c create mode 100644 drivers/platform/x86/surface_sam/surface_sam_ssh.c create mode 100644 drivers/platform/x86/surface_sam/surface_sam_ssh.h + create mode 100644 drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h create mode 100644 drivers/platform/x86/surface_sam/surface_sam_vhf.c diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c -index 2f4641e5ecde..beb22d7e245e 100644 +index 2f4641e5ecde8..beb22d7e245e3 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -123,7 +123,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode, @@ -53,7 +55,7 @@ index 2f4641e5ecde..beb22d7e245e 100644 bit_count = (u32) length_desc->integer.value; diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c -index b272c329d45d..cf547883a993 100644 +index b272c329d45db..cf547883a9937 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c @@ -102,6 +102,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, @@ -106,7 +108,7 @@ index b272c329d45d..cf547883a993 100644 } else { /* IPMI */ diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig -index 2ad19dc64a4a..7cee1015981d 100644 +index 2ad19dc64a4af..7cee1015981d5 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1250,6 +1250,8 @@ config INTEL_ATOMISP2_PM @@ -119,7 +121,7 @@ index 2ad19dc64a4a..7cee1015981d 100644 config PMC_ATOM diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile -index 2ea90039a3e4..cbea9579c1d2 100644 +index 2ea90039a3e49..cbea9579c1d2f 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -94,3 +94,4 @@ obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o @@ -129,7 +131,7 @@ index 2ea90039a3e4..cbea9579c1d2 100644 +obj-$(CONFIG_SURFACE_SAM) += surface_sam/ diff --git a/drivers/platform/x86/surface_sam/Kconfig b/drivers/platform/x86/surface_sam/Kconfig new file mode 100644 -index 000000000000..c4556e58b9a5 +index 0000000000000..51278a80569aa --- /dev/null +++ b/drivers/platform/x86/surface_sam/Kconfig @@ -0,0 +1,164 @@ @@ -231,7 +233,7 @@ index 000000000000..c4556e58b9a5 + default m + help + Driver to properly handle hot-plugging and explicit power-on/power-off -+ of the discrete GPU (dGPU) on the Surface Book 2. ++ of the discrete GPU (dGPU) on the Surface Book 2 and 3. + + If you are not sure, say M here. + @@ -299,10 +301,15 @@ index 000000000000..c4556e58b9a5 + If you are not sure, say M here. diff --git a/drivers/platform/x86/surface_sam/Makefile b/drivers/platform/x86/surface_sam/Makefile new file mode 100644 -index 000000000000..188975ccde5c +index 0000000000000..1a5c1260639dc --- /dev/null +++ b/drivers/platform/x86/surface_sam/Makefile -@@ -0,0 +1,10 @@ +@@ -0,0 +1,15 @@ ++# SPDX-License-Identifier: GPL-2.0-or-later ++ ++# For include/trace/define_trace.h to include surface_sam_ssh_trace.h ++CFLAGS_surface_sam_ssh.o = -I$(src) ++ +obj-$(CONFIG_SURFACE_SAM_SSH) += surface_sam_ssh.o +obj-$(CONFIG_SURFACE_SAM_SAN) += surface_sam_san.o +obj-$(CONFIG_SURFACE_SAM_DTX) += surface_sam_dtx.o @@ -315,11 +322,11 @@ index 000000000000..188975ccde5c +obj-$(CONFIG_SURFACE_SAM_SID_VHF) += surface_sam_sid_vhf.o diff --git a/drivers/platform/x86/surface_sam/surface_sam_dtx.c b/drivers/platform/x86/surface_sam/surface_sam_dtx.c new file mode 100644 -index 000000000000..1e772fd5b0be +index 0000000000000..88dba7bced3a4 --- /dev/null +++ b/drivers/platform/x86/surface_sam/surface_sam_dtx.c -@@ -0,0 +1,604 @@ -+// SPDX-License-Identifier: GPL-2.0 +@@ -0,0 +1,590 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Detachment system (DTX) driver for Microsoft Surface Book 2. + */ @@ -361,8 +368,6 @@ index 000000000000..1e772fd5b0be +#define SAM_RQST_DTX_CID_LATCH_OPEN 0x09 +#define SAM_RQST_DTX_CID_GET_OPMODE 0x0D + -+#define SAM_EVENT_DTX_TC 0x11 -+#define SAM_EVENT_DTX_RQID 0x0011 +#define SAM_EVENT_DTX_CID_CONNECTION 0x0c +#define SAM_EVENT_DTX_CID_BUTTON 0x0e +#define SAM_EVENT_DTX_CID_ERROR 0x0f @@ -393,6 +398,8 @@ index 000000000000..1e772fd5b0be +} __packed; + +struct surface_dtx_dev { ++ struct ssam_event_notifier notif; ++ struct delayed_work opmode_work; + wait_queue_head_t waitq; + struct miscdevice mdev; + spinlock_t client_lock; @@ -425,10 +432,10 @@ index 000000000000..1e772fd5b0be + struct surface_sam_ssh_rqst rqst = { + .tc = SAM_RQST_DTX_TC, + .cid = SAM_RQST_DTX_CID_GET_OPMODE, -+ .iid = 0, -+ .pri = SURFACE_SAM_PRIORITY_NORMAL, -+ .snc = 1, -+ .cdl = 0, ++ .iid = 0x00, ++ .chn = 0x01, ++ .snc = 0x01, ++ .cdl = 0x00, + .pld = NULL, + }; + @@ -454,10 +461,10 @@ index 000000000000..1e772fd5b0be + struct surface_sam_ssh_rqst rqst = { + .tc = SAM_RQST_DTX_TC, + .cid = cid, -+ .iid = 0, -+ .pri = SURFACE_SAM_PRIORITY_NORMAL, -+ .snc = 0, -+ .cdl = 0, ++ .iid = 0x00, ++ .chn = 0x01, ++ .snc = 0x00, ++ .cdl = 0x00, + .pld = NULL, + }; + @@ -719,70 +726,48 @@ index 000000000000..1e772fd5b0be + spin_unlock(&ddev->input_lock); +} + -+static int surface_dtx_evt_dtx(struct surface_sam_ssh_event *in_event, void *data) ++static void surface_dtx_opmode_workfn(struct work_struct *work) +{ -+ struct surface_dtx_dev *ddev = data; -+ struct surface_dtx_event event; ++ struct surface_dtx_dev *ddev = container_of(work, struct surface_dtx_dev, opmode_work.work); + -+ switch (in_event->cid) { ++ surface_dtx_update_opmpde(ddev); ++} ++ ++static u32 surface_dtx_notification(struct ssam_notifier_block *nb, const struct ssam_event *in_event) ++{ ++ struct surface_dtx_dev *ddev = container_of(nb, struct surface_dtx_dev, notif.base); ++ struct surface_dtx_event event; ++ unsigned long delay; ++ ++ switch (in_event->command_id) { + case SAM_EVENT_DTX_CID_CONNECTION: + case SAM_EVENT_DTX_CID_BUTTON: + case SAM_EVENT_DTX_CID_ERROR: + case SAM_EVENT_DTX_CID_LATCH_STATUS: -+ if (in_event->len > 2) { ++ if (in_event->length > 2) { + printk(DTX_ERR "unexpected payload size (cid: %x, len: %u)\n", -+ in_event->cid, in_event->len); -+ return 0; ++ in_event->command_id, in_event->length); ++ return SSAM_NOTIF_HANDLED; + } + -+ event.type = in_event->tc; -+ event.code = in_event->cid; -+ event.arg0 = in_event->len >= 1 ? in_event->pld[0] : 0x00; -+ event.arg1 = in_event->len >= 2 ? in_event->pld[1] : 0x00; ++ event.type = in_event->target_category; ++ event.code = in_event->command_id; ++ event.arg0 = in_event->length >= 1 ? in_event->data[0] : 0x00; ++ event.arg1 = in_event->length >= 2 ? in_event->data[1] : 0x00; + surface_dtx_push_event(ddev, &event); + break; + + default: -+ printk(DTX_WARN "unhandled dtx event (cid: %x)\n", in_event->cid); ++ return 0; + } + + // update device mode -+ if (in_event->cid == SAM_EVENT_DTX_CID_CONNECTION) { -+ if (in_event->pld[0]) { -+ // Note: we're already in a workqueue task -+ msleep(DTX_CONNECT_OPMODE_DELAY); -+ } -+ -+ surface_dtx_update_opmpde(ddev); ++ if (in_event->command_id == SAM_EVENT_DTX_CID_CONNECTION) { ++ delay = event.arg0 ? DTX_CONNECT_OPMODE_DELAY : 0; ++ schedule_delayed_work(&ddev->opmode_work, delay); + } + -+ return 0; -+} -+ -+static int surface_dtx_events_setup(struct surface_dtx_dev *ddev) -+{ -+ int status; -+ -+ status = surface_sam_ssh_set_event_handler(SAM_EVENT_DTX_RQID, surface_dtx_evt_dtx, ddev); -+ if (status) -+ goto err_handler; -+ -+ status = surface_sam_ssh_enable_event_source(SAM_EVENT_DTX_TC, 0x01, SAM_EVENT_DTX_RQID); -+ if (status) -+ goto err_source; -+ -+ return 0; -+ -+err_source: -+ surface_sam_ssh_remove_event_handler(SAM_EVENT_DTX_RQID); -+err_handler: -+ return status; -+} -+ -+static void surface_dtx_events_disable(void) -+{ -+ surface_sam_ssh_disable_event_source(SAM_EVENT_DTX_TC, 0x01, SAM_EVENT_DTX_RQID); -+ surface_sam_ssh_remove_event_handler(SAM_EVENT_DTX_RQID); ++ return SSAM_NOTIF_HANDLED; +} + + @@ -844,6 +829,7 @@ index 000000000000..1e772fd5b0be + goto err_register; + } + ++ INIT_DELAYED_WORK(&ddev->opmode_work, surface_dtx_opmode_workfn); + INIT_LIST_HEAD(&ddev->client_list); + init_waitqueue_head(&ddev->waitq); + ddev->active = true; @@ -854,8 +840,15 @@ index 000000000000..1e772fd5b0be + if (status) + goto err_register; + -+ // enable events -+ status = surface_dtx_events_setup(ddev); ++ // set up events ++ ddev->notif.base.priority = 1; ++ ddev->notif.base.fn = surface_dtx_notification; ++ ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM; ++ ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS; ++ ddev->notif.event.id.instance = 0; ++ ddev->notif.event.flags = SSAM_EVENT_SEQUENCED; ++ ++ status = surface_sam_ssh_notifier_register(&ddev->notif); + if (status) + goto err_events_setup; + @@ -884,7 +877,7 @@ index 000000000000..1e772fd5b0be + mutex_unlock(&ddev->mutex); + + // After this call we're guaranteed that no more input events will arive -+ surface_dtx_events_disable(); ++ surface_sam_ssh_notifier_unregister(&ddev->notif); + + // wake up clients + spin_lock(&ddev->client_lock); @@ -914,7 +907,7 @@ index 000000000000..1e772fd5b0be + .remove = surface_sam_dtx_remove, + .driver = { + .name = "surface_sam_dtx", -+ .acpi_match_table = ACPI_PTR(surface_sam_dtx_match), ++ .acpi_match_table = surface_sam_dtx_match, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; @@ -922,14 +915,14 @@ index 000000000000..1e772fd5b0be + +MODULE_AUTHOR("Maximilian Luz "); +MODULE_DESCRIPTION("Surface Detachment System (DTX) Driver for 5th Generation Surface Devices"); -+MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/surface_sam/surface_sam_hps.c b/drivers/platform/x86/surface_sam/surface_sam_hps.c new file mode 100644 -index 000000000000..4fba5ee75a66 +index 0000000000000..f945c2ec6d0c8 --- /dev/null +++ b/drivers/platform/x86/surface_sam/surface_sam_hps.c -@@ -0,0 +1,1110 @@ -+// SPDX-License-Identifier: GPL-2.0 +@@ -0,0 +1,1281 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Surface dGPU hot-plug system driver. + * Supports explicit setting of the dGPU power-state on the Surface Book 2 and @@ -970,10 +963,11 @@ index 000000000000..4fba5ee75a66 +#define SAM_DTX_TC 0x11 +#define SAM_DTX_CID_LATCH_LOCK 0x06 +#define SAM_DTX_CID_LATCH_UNLOCK 0x07 ++#define ACPI_SGCP_NOTIFY_POWER_ON 0x81 + +#define SHPS_DSM_GPU_ADDRS_RP "RP5_PCIE" +#define SHPS_DSM_GPU_ADDRS_DGPU "DGPU_PCIE" -+ ++#define SHPS_PCI_GPU_ADDR_RP "\\_SB.PCI0.RP13._ADR" + +static const struct acpi_gpio_params gpio_base_presence_int = { 0, 0, false }; +static const struct acpi_gpio_params gpio_base_presence = { 1, 0, false }; @@ -1011,6 +1005,15 @@ index 000000000000..4fba5ee75a66 + return ""; +} + ++enum shps_notification_method { ++ SHPS_NOTIFICATION_METHOD_SAN = 1, ++ SHPS_NOTIFICATION_METHOD_SGCP = 2 ++}; ++ ++struct shps_hardware_traits { ++ enum shps_notification_method notification_method; ++ const char *dgpu_rp_pci_address; ++}; + +struct shps_driver_data { + struct mutex lock; @@ -1022,6 +1025,31 @@ index 000000000000..4fba5ee75a66 + unsigned int irq_dgpu_presence; + unsigned int irq_base_presence; + unsigned long state; ++ acpi_handle sgpc_handle; ++ struct shps_hardware_traits hardware_traits; ++}; ++ ++struct shps_hardware_probe { ++ const char *hardware_id; ++ int generation; ++ struct shps_hardware_traits *hardware_traits; ++}; ++ ++static struct shps_hardware_traits shps_gen1_hwtraits = { ++ .notification_method = SHPS_NOTIFICATION_METHOD_SAN ++}; ++ ++static struct shps_hardware_traits shps_gen2_hwtraits = { ++ .notification_method = SHPS_NOTIFICATION_METHOD_SGCP, ++ .dgpu_rp_pci_address = SHPS_PCI_GPU_ADDR_RP ++}; ++ ++static const struct shps_hardware_probe shps_hardware_probe_match[] = { ++ /* Surface Book 3 */ ++ { "MSHW0117", 2, &shps_gen2_hwtraits }, ++ ++ /* Surface Book 2 (default, must be last entry) */ ++ { NULL, 1, &shps_gen1_hwtraits } +}; + +#define SHPS_STATE_BIT_PWRTGT 0 /* desired power state: 1 for on, 0 for off */ @@ -1075,16 +1103,15 @@ index 000000000000..4fba5ee75a66 +MODULE_PARM_DESC(dgpu_power_susp, "dGPU power state to be set on exit (0: off / 1: on / 2: as-is, default: as-is)"); +MODULE_PARM_DESC(dtx_latch, "lock/unlock DTX base latch in accordance to power-state (Y/n)"); + -+ +static int dtx_cmd_simple(u8 cid) +{ + struct surface_sam_ssh_rqst rqst = { + .tc = SAM_DTX_TC, + .cid = cid, -+ .iid = 0, -+ .pri = SURFACE_SAM_PRIORITY_NORMAL, -+ .snc = 0, -+ .cdl = 0, ++ .iid = 0x00, ++ .chn = 0x01, ++ .snc = 0x00, ++ .cdl = 0x00, + .pld = NULL, + }; + @@ -1101,8 +1128,34 @@ index 000000000000..4fba5ee75a66 + return dtx_cmd_simple(SAM_DTX_CID_LATCH_UNLOCK); +} + ++static int shps_dgpu_dsm_get_pci_addr_from_adr(struct platform_device *pdev, const char *entry) { ++ acpi_handle handle = ACPI_HANDLE(&pdev->dev); ++ int status; ++ struct acpi_object_list input; ++ union acpi_object input_args[0]; ++ u64 device_addr; ++ u8 bus, dev, fun; + -+static int shps_dgpu_dsm_get_pci_addr(struct platform_device *pdev, const char *entry) ++ input.count = 0; ++ input.pointer = input_args; ++ ++ ++ status = acpi_evaluate_integer(handle, (acpi_string)entry, &input, &device_addr); ++ if (status) { ++ return -ENODEV; ++ } ++ ++ bus = 0; ++ dev = (device_addr & 0xFF0000) >> 16; ++ fun = device_addr & 0xFF; ++ ++ dev_info(&pdev->dev, "found pci device at bus = %d, dev = %x, fun = %x\n", ++ (u32)bus, (u32)dev, (u32)fun); ++ ++ return bus << 8 | PCI_DEVFN(dev, fun); ++} ++ ++static int shps_dgpu_dsm_get_pci_addr_from_dsm(struct platform_device *pdev, const char *entry) +{ + acpi_handle handle = ACPI_HANDLE(&pdev->dev); + union acpi_object *result; @@ -1113,6 +1166,7 @@ index 000000000000..4fba5ee75a66 + u8 bus, dev, fun; + int i; + ++ + result = acpi_evaluate_dsm_typed(handle, &SHPS_DSM_UUID, SHPS_DSM_REVISION, + SHPS_DSM_GPU_ADDRS, NULL, ACPI_TYPE_PACKAGE); + @@ -1148,6 +1202,7 @@ index 000000000000..4fba5ee75a66 + if (device_addr == 0) + return -ENODEV; + ++ + // convert address + bus = (device_addr & 0x0FF00000) >> 20; + dev = (device_addr & 0x000F8000) >> 15; @@ -1156,12 +1211,19 @@ index 000000000000..4fba5ee75a66 + return bus << 8 | PCI_DEVFN(dev, fun); +} + -+static struct pci_dev *shps_dgpu_dsm_get_pci_dev(struct platform_device *pdev, const char *entry) ++static struct pci_dev *shps_dgpu_dsm_get_pci_dev(struct platform_device *pdev) +{ ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); + struct pci_dev *dev; + int addr; + -+ addr = shps_dgpu_dsm_get_pci_addr(pdev, entry); ++ ++ if (drvdata->hardware_traits.dgpu_rp_pci_address) { ++ addr = shps_dgpu_dsm_get_pci_addr_from_adr(pdev, drvdata->hardware_traits.dgpu_rp_pci_address); ++ } else { ++ addr = shps_dgpu_dsm_get_pci_addr_from_dsm(pdev, SHPS_DSM_GPU_ADDRS_RP); ++ } ++ + if (addr < 0) + return ERR_PTR(addr); + @@ -1521,10 +1583,10 @@ index 000000000000..4fba5ee75a66 + pcie_capability_read_word(rp, PCI_EXP_SLTSTA, &sltsta); + pcie_capability_read_word(rp, PCI_EXP_SLTSTA2, &sltsta2); + -+ dev_dbg(&pdev->dev, "%s: LNKSTA: 0x%04x", prefix, lnksta); -+ dev_dbg(&pdev->dev, "%s: LNKSTA2: 0x%04x", prefix, lnksta2); -+ dev_dbg(&pdev->dev, "%s: SLTSTA: 0x%04x", prefix, sltsta); -+ dev_dbg(&pdev->dev, "%s: SLTSTA2: 0x%04x", prefix, sltsta2); ++ dev_dbg(&pdev->dev, "%s: LNKSTA: 0x%04x\n", prefix, lnksta); ++ dev_dbg(&pdev->dev, "%s: LNKSTA2: 0x%04x\n", prefix, lnksta2); ++ dev_dbg(&pdev->dev, "%s: SLTSTA: 0x%04x\n", prefix, sltsta); ++ dev_dbg(&pdev->dev, "%s: SLTSTA2: 0x%04x\n", prefix, sltsta2); +} + +static void dbg_dump_drvsta(struct platform_device *pdev, const char *prefix) @@ -1532,14 +1594,13 @@ index 000000000000..4fba5ee75a66 + struct shps_driver_data *drvdata = platform_get_drvdata(pdev); + struct pci_dev *rp = drvdata->dgpu_root_port; + -+ dev_dbg(&pdev->dev, "%s: RP power: %d", prefix, rp->current_state); -+ dev_dbg(&pdev->dev, "%s: RP state saved: %d", prefix, rp->state_saved); -+ dev_dbg(&pdev->dev, "%s: RP state stored: %d", prefix, !!drvdata->dgpu_root_port_state); -+ dev_dbg(&pdev->dev, "%s: RP enabled: %d", prefix, atomic_read(&rp->enable_cnt)); -+ dev_dbg(&pdev->dev, "%s: RP mastered: %d", prefix, rp->is_busmaster); ++ dev_dbg(&pdev->dev, "%s: RP power: %d\n", prefix, rp->current_state); ++ dev_dbg(&pdev->dev, "%s: RP state saved: %d\n", prefix, rp->state_saved); ++ dev_dbg(&pdev->dev, "%s: RP state stored: %d\n", prefix, !!drvdata->dgpu_root_port_state); ++ dev_dbg(&pdev->dev, "%s: RP enabled: %d\n", prefix, atomic_read(&rp->enable_cnt)); ++ dev_dbg(&pdev->dev, "%s: RP mastered: %d\n", prefix, rp->is_busmaster); +} + -+ +static int shps_pm_prepare(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); @@ -1719,14 +1780,13 @@ index 000000000000..4fba5ee75a66 + mutex_unlock(&drvdata->lock); + + if (!test_bit(SHPS_STATE_BIT_PWRTGT, &drvdata->state)) { -+ dev_warn(&pdev->dev, "unexpected dGPU power-on detected"); ++ dev_warn(&pdev->dev, "unexpected dGPU power-on detected\n"); + // TODO: schedule state re-check and update + } + + return 0; +} + -+ +static int shps_dgpu_handle_rqsg(struct surface_sam_san_rqsg *rqsg, void *data) +{ + struct platform_device *pdev = data; @@ -1734,7 +1794,7 @@ index 000000000000..4fba5ee75a66 + if (rqsg->tc == SAM_DGPU_TC && rqsg->cid == SAM_DGPU_CID_POWERON) + return shps_dgpu_powered_on(pdev); + -+ dev_warn(&pdev->dev, "unimplemented dGPU request: RQSG(0x%02x, 0x%02x, 0x%02x)", ++ dev_warn(&pdev->dev, "unimplemented dGPU request: RQSG(0x%02x, 0x%02x, 0x%02x)\n", + rqsg->tc, rqsg->cid, rqsg->iid); + return 0; +} @@ -1858,7 +1918,7 @@ index 000000000000..4fba5ee75a66 +static int shps_gpios_setup_irq(struct platform_device *pdev) +{ + const int irqf_dgpu = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; -+ const int irqf_base = IRQF_SHARED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; ++ const int irqf_base = IRQF_SHARED; + struct shps_driver_data *drvdata = platform_get_drvdata(pdev); + int status; + @@ -1875,8 +1935,10 @@ index 000000000000..4fba5ee75a66 + status = request_irq(drvdata->irq_base_presence, + shps_base_presence_irq, irqf_base, + "shps_base_presence_irq", pdev); -+ if (status) ++ if (status) { ++ dev_err(&pdev->dev, "base irq failed: %d\n", status); + return status; ++ } + + status = request_threaded_irq(drvdata->irq_dgpu_presence, + NULL, shps_dgpu_presence_irq, irqf_dgpu, @@ -1897,29 +1959,100 @@ index 000000000000..4fba5ee75a66 + free_irq(drvdata->irq_dgpu_presence, pdev); +} + ++static void shps_sgcp_notify(acpi_handle device, u32 value, void *context) { ++ struct platform_device *pdev = context; ++ switch (value) { ++ case ACPI_SGCP_NOTIFY_POWER_ON: ++ shps_dgpu_powered_on(pdev); ++ } ++} ++ ++static int shps_start_sgcp_notification(struct platform_device *pdev, acpi_handle *sgpc_handle) { ++ acpi_handle handle; ++ int status; ++ ++ status = acpi_get_handle(NULL, "\\_SB.SGPC", &handle); ++ if (status) { ++ dev_err(&pdev->dev, "error in get_handle %d\n", status); ++ return status; ++ } ++ ++ status = acpi_install_notify_handler(handle, ACPI_DEVICE_NOTIFY, shps_sgcp_notify, pdev); ++ if (status) { ++ dev_err(&pdev->dev, "error in install notify %d\n", status); ++ *sgpc_handle = NULL; ++ return status; ++ } ++ ++ *sgpc_handle = handle; ++ return 0; ++} ++ ++static void shps_remove_sgcp_notification(struct platform_device *pdev) { ++ int status; ++ struct shps_driver_data *drvdata = platform_get_drvdata(pdev); ++ ++ if (drvdata->sgpc_handle) { ++ status = acpi_remove_notify_handler(drvdata->sgpc_handle, ACPI_DEVICE_NOTIFY, shps_sgcp_notify); ++ if (status) { ++ dev_err(&pdev->dev, "failed to remove notify handler: %d\n", status); ++ } ++ } ++} ++ ++static struct shps_hardware_traits shps_detect_hardware_traits(struct platform_device *pdev) { ++ const struct shps_hardware_probe *p; ++ ++ for (p = shps_hardware_probe_match; p->hardware_id; ++p) { ++ if (acpi_dev_present(p->hardware_id, NULL, -1)) { ++ break; ++ } ++ } ++ ++ dev_info(&pdev->dev, ++ "shps_detect_hardware_traits found device %s, generation %d\n", ++ p->hardware_id ? p->hardware_id : "SAN (default)", ++ p->generation); ++ ++ return *p->hardware_traits; ++} ++ +static int shps_probe(struct platform_device *pdev) +{ + struct acpi_device *shps_dev = ACPI_COMPANION(&pdev->dev); + struct shps_driver_data *drvdata; + struct device_link *link; + int power, status; ++ struct shps_hardware_traits detected_traits; + -+ if (gpiod_count(&pdev->dev, NULL) < 0) ++ if (gpiod_count(&pdev->dev, NULL) < 0) { ++ dev_err(&pdev->dev, "gpiod_count returned < 0\n"); + return -ENODEV; ++ } + + // link to SSH + status = surface_sam_ssh_consumer_register(&pdev->dev); -+ if (status) ++ if (status) { + return status == -ENXIO ? -EPROBE_DEFER : status; ++ } + -+ // link to SAN -+ status = surface_sam_san_consumer_register(&pdev->dev, 0); -+ if (status) -+ return status == -ENXIO ? -EPROBE_DEFER : status; ++ // detect what kind of hardware we're running ++ detected_traits = shps_detect_hardware_traits(pdev); ++ ++ if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) { ++ // link to SAN ++ status = surface_sam_san_consumer_register(&pdev->dev, 0); ++ if (status) { ++ dev_err(&pdev->dev, "failed to register with san consumer: %d\n", status); ++ return status == -ENXIO ? -EPROBE_DEFER : status; ++ } ++ } + + status = acpi_dev_add_driver_gpios(shps_dev, shps_acpi_gpios); -+ if (status) ++ if (status) { ++ dev_err(&pdev->dev, "failed to add gpios: %d\n", status); + return status; ++ } + + drvdata = kzalloc(sizeof(struct shps_driver_data), GFP_KERNEL); + if (!drvdata) { @@ -1929,19 +2062,26 @@ index 000000000000..4fba5ee75a66 + mutex_init(&drvdata->lock); + platform_set_drvdata(pdev, drvdata); + -+ drvdata->dgpu_root_port = shps_dgpu_dsm_get_pci_dev(pdev, SHPS_DSM_GPU_ADDRS_RP); ++ drvdata->hardware_traits = detected_traits; ++ ++ drvdata->dgpu_root_port = shps_dgpu_dsm_get_pci_dev(pdev); + if (IS_ERR(drvdata->dgpu_root_port)) { + status = PTR_ERR(drvdata->dgpu_root_port); ++ dev_err(&pdev->dev, "failed to get pci dev: %d\n", status); + goto err_rp_lookup; + } + + status = shps_gpios_setup(pdev); -+ if (status) ++ if (status) { ++ dev_err(&pdev->dev, "unable to set up gpios, %d\n", status); + goto err_gpio; ++ } + + status = shps_gpios_setup_irq(pdev); -+ if (status) ++ if (status) { ++ dev_err(&pdev->dev, "unable to set up irqs %d\n", status); + goto err_gpio_irqs; ++ } + + status = device_add_groups(&pdev->dev, shps_power_groups); + if (status) @@ -1952,23 +2092,41 @@ index 000000000000..4fba5ee75a66 + if (!link) + goto err_devlink; + -+ surface_sam_san_set_rqsg_handler(shps_dgpu_handle_rqsg, pdev); ++ if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) { ++ status = surface_sam_san_set_rqsg_handler(shps_dgpu_handle_rqsg, pdev); ++ if (status) { ++ dev_err(&pdev->dev, "unable to set SAN notification handler (%d)\n", status); ++ goto err_devlink; ++ } ++ } else if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) { ++ status = shps_start_sgcp_notification(pdev, &drvdata->sgpc_handle); ++ if (status) { ++ dev_err(&pdev->dev, "unable to install SGCP notification handler (%d)\n", status); ++ goto err_devlink; ++ } ++ } + + // if dGPU is not present turn-off root-port, else obey module param + status = shps_dgpu_is_present(pdev); + if (status < 0) -+ goto err_devlink; ++ goto err_post_notification; + + power = status == 0 ? SHPS_DGPU_POWER_OFF : param_dgpu_power_init; + if (power != SHPS_DGPU_MP_POWER_ASIS) { + status = shps_dgpu_set_power(pdev, power); + if (status) -+ goto err_devlink; ++ goto err_post_notification; + } + + device_init_wakeup(&pdev->dev, true); + return 0; + ++err_post_notification: ++ if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) { ++ shps_remove_sgcp_notification(pdev); ++ } else if (detected_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) { ++ surface_sam_san_set_rqsg_handler(NULL, NULL); ++ } +err_devlink: + device_remove_groups(&pdev->dev, shps_power_groups); +err_devattr: @@ -1998,7 +2156,12 @@ index 000000000000..4fba5ee75a66 + } + + device_set_wakeup_capable(&pdev->dev, false); -+ surface_sam_san_set_rqsg_handler(NULL, NULL); ++ ++ if (drvdata->hardware_traits.notification_method == SHPS_NOTIFICATION_METHOD_SGCP) { ++ shps_remove_sgcp_notification(pdev); ++ } else if (drvdata->hardware_traits.notification_method == SHPS_NOTIFICATION_METHOD_SAN) { ++ surface_sam_san_set_rqsg_handler(NULL, NULL); ++ } + device_remove_groups(&pdev->dev, shps_power_groups); + shps_gpios_remove_irq(pdev); + shps_gpios_remove(pdev); @@ -2030,22 +2193,23 @@ index 000000000000..4fba5ee75a66 + .shutdown = shps_shutdown, + .driver = { + .name = "surface_dgpu_hps", -+ .acpi_match_table = ACPI_PTR(shps_acpi_match), ++ .acpi_match_table = shps_acpi_match, + .pm = &shps_pm_ops, + }, +}; ++ +module_platform_driver(surface_sam_hps); + +MODULE_AUTHOR("Maximilian Luz "); +MODULE_DESCRIPTION("Surface Hot-Plug System (HPS) and dGPU power-state Driver for Surface Book 2"); -+MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/surface_sam/surface_sam_san.c b/drivers/platform/x86/surface_sam/surface_sam_san.c new file mode 100644 -index 000000000000..63478945e6b2 +index 0000000000000..11dd6daedc3dd --- /dev/null +++ b/drivers/platform/x86/surface_sam/surface_sam_san.c -@@ -0,0 +1,883 @@ -+// SPDX-License-Identifier: GPL-2.0 +@@ -0,0 +1,913 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Surface ACPI Notify (SAN) and ACPI integration driver for SAM. + * Translates communication from ACPI to SSH and back. @@ -2073,15 +2237,11 @@ index 000000000000..63478945e6b2 +#define SAM_EVENT_DELAY_PWR_ADAPTER msecs_to_jiffies(5000) +#define SAM_EVENT_DELAY_PWR_BST msecs_to_jiffies(2500) + -+#define SAM_EVENT_PWR_TC 0x02 -+#define SAM_EVENT_PWR_RQID 0x0002 +#define SAM_EVENT_PWR_CID_BIX 0x15 +#define SAM_EVENT_PWR_CID_BST 0x16 +#define SAM_EVENT_PWR_CID_ADAPTER 0x17 +#define SAM_EVENT_PWR_CID_DPTF 0x4f + -+#define SAM_EVENT_TEMP_TC 0x03 -+#define SAM_EVENT_TEMP_RQID 0x0003 +#define SAM_EVENT_TEMP_CID_NOTIFY_SENSOR_TRIP_POINT 0x0b + +#define SAN_RQST_TAG "surface_sam_san: rqst: " @@ -2114,7 +2274,16 @@ index 000000000000..63478945e6b2 +struct san_drvdata { + struct san_opreg_context opreg_ctx; + struct san_consumers consumers; -+ bool has_power_events; ++ ++ struct platform_device *dev; ++ struct ssam_event_notifier nf_bat; ++ struct ssam_event_notifier nf_tmp; ++}; ++ ++struct san_event_work { ++ struct delayed_work work; ++ struct platform_device *dev; ++ struct ssam_event event; // must be last +}; + +struct gsb_data_in { @@ -2124,7 +2293,7 @@ index 000000000000..63478945e6b2 +struct gsb_data_rqsx { + u8 cv; // command value (should be 0x01 or 0x03) + u8 tc; // target controller -+ u8 tid; // expected to be 0x01, could be revision ++ u8 tid; // transport channnel ID? + u8 iid; // target sub-controller (e.g. primary vs. secondary battery) + u8 snc; // expect-response-flag + u8 cid; // command ID @@ -2167,6 +2336,7 @@ index 000000000000..63478945e6b2 + SAN_PWR_EVENT_ADP1_INFO = 0x06, + SAN_PWR_EVENT_BAT2_STAT = 0x07, + SAN_PWR_EVENT_BAT2_INFO = 0x08, ++ SAN_PWR_EVENT_DPTF = 0x0A, +}; + + @@ -2243,14 +2413,23 @@ index 000000000000..63478945e6b2 +} + + ++static bool san_acpi_can_notify(struct device *dev, u64 func) ++{ ++ acpi_handle san = ACPI_HANDLE(dev); ++ return acpi_check_dsm(san, &SAN_DSM_UUID, SAN_DSM_REVISION, 1 << func); ++} ++ +static int san_acpi_notify_power_event(struct device *dev, enum san_pwr_event event) +{ + acpi_handle san = ACPI_HANDLE(dev); + union acpi_object *obj; + ++ if (!san_acpi_can_notify(dev, event)) ++ return 0; ++ + dev_dbg(dev, "notify power event 0x%02x\n", event); + obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION, -+ (u8) event, NULL, ACPI_TYPE_BUFFER); ++ event, NULL, ACPI_TYPE_BUFFER); + + if (IS_ERR_OR_NULL(obj)) + return obj ? PTR_ERR(obj) : -ENXIO; @@ -2270,6 +2449,9 @@ index 000000000000..63478945e6b2 + union acpi_object *obj; + union acpi_object param; + ++ if (!san_acpi_can_notify(dev, SAN_DSM_FN_NOTIFY_SENSOR_TRIP_POINT)) ++ return 0; ++ + param.type = ACPI_TYPE_INTEGER; + param.integer.value = iid; + @@ -2290,15 +2472,13 @@ index 000000000000..63478945e6b2 +} + + -+static inline int san_evt_power_adapter(struct device *dev, struct surface_sam_ssh_event *event) ++static inline int san_evt_power_adapter(struct device *dev, const struct ssam_event *event) +{ + int status; + + status = san_acpi_notify_power_event(dev, SAN_PWR_EVENT_ADP1_STAT); -+ if (status) { -+ dev_err(dev, "error handling power event (cid = %x)\n", event->cid); ++ if (status) + return status; -+ } + + /* + * Enusre that the battery states get updated correctly. @@ -2308,61 +2488,74 @@ index 000000000000..63478945e6b2 + */ + + status = san_acpi_notify_power_event(dev, SAN_PWR_EVENT_BAT1_STAT); -+ if (status) { -+ dev_err(dev, "error handling power event (cid = %x)\n", event->cid); ++ if (status) + return status; -+ } + -+ status = san_acpi_notify_power_event(dev, SAN_PWR_EVENT_BAT2_STAT); -+ if (status) { -+ dev_err(dev, "error handling power event (cid = %x)\n", event->cid); -+ return status; -+ } -+ -+ return 0; ++ return san_acpi_notify_power_event(dev, SAN_PWR_EVENT_BAT2_STAT); +} + -+static inline int san_evt_power_bix(struct device *dev, struct surface_sam_ssh_event *event) ++static inline int san_evt_power_bix(struct device *dev, const struct ssam_event *event) +{ + enum san_pwr_event evcode; -+ int status; + -+ if (event->iid == 0x02) ++ if (event->instance_id == 0x02) + evcode = SAN_PWR_EVENT_BAT2_INFO; + else + evcode = SAN_PWR_EVENT_BAT1_INFO; + -+ status = san_acpi_notify_power_event(dev, evcode); -+ if (status) { -+ dev_err(dev, "error handling power event (cid = %x)\n", event->cid); -+ return status; -+ } -+ -+ return 0; ++ return san_acpi_notify_power_event(dev, evcode); +} + -+static inline int san_evt_power_bst(struct device *dev, struct surface_sam_ssh_event *event) ++static inline int san_evt_power_bst(struct device *dev, const struct ssam_event *event) +{ + enum san_pwr_event evcode; -+ int status; + -+ if (event->iid == 0x02) ++ if (event->instance_id == 0x02) + evcode = SAN_PWR_EVENT_BAT2_STAT; + else + evcode = SAN_PWR_EVENT_BAT1_STAT; + -+ status = san_acpi_notify_power_event(dev, evcode); -+ if (status) { -+ dev_err(dev, "error handling power event (cid = %x)\n", event->cid); -+ return status; ++ return san_acpi_notify_power_event(dev, evcode); ++} ++ ++static inline int san_evt_power_dptf(struct device *dev, const struct ssam_event *event) ++{ ++ union acpi_object payload; ++ acpi_handle san = ACPI_HANDLE(dev); ++ union acpi_object *obj; ++ ++ if (!san_acpi_can_notify(dev, SAN_PWR_EVENT_DPTF)) ++ return 0; ++ ++ /* ++ * The Surface ACPI expects a buffer and not a package. It specifically ++ * checks for ObjectType (Arg3) == 0x03. This will cause a warning in ++ * acpica/nsarguments.c, but this can safely be ignored. ++ */ ++ payload.type = ACPI_TYPE_BUFFER; ++ payload.buffer.length = event->length; ++ payload.buffer.pointer = (u8 *)&event->data[0]; ++ ++ dev_dbg(dev, "notify power event 0x%02x\n", event->command_id); ++ obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION, ++ SAN_PWR_EVENT_DPTF, &payload, ++ ACPI_TYPE_BUFFER); ++ ++ if (IS_ERR_OR_NULL(obj)) ++ return obj ? PTR_ERR(obj) : -ENXIO; ++ ++ if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) { ++ dev_err(dev, "got unexpected result from _DSM\n"); ++ return -EFAULT; + } + ++ ACPI_FREE(obj); + return 0; +} + -+static unsigned long san_evt_power_delay(struct surface_sam_ssh_event *event, void *data) ++static unsigned long san_evt_power_delay(u8 cid) +{ -+ switch (event->cid) { ++ switch (cid) { + case SAM_EVENT_PWR_CID_ADAPTER: + /* + * Wait for battery state to update before signalling adapter change. @@ -2382,65 +2575,109 @@ index 000000000000..63478945e6b2 + } +} + -+static int san_evt_power(struct surface_sam_ssh_event *event, void *data) -+{ -+ struct device *dev = (struct device *)data; -+ -+ switch (event->cid) { -+ case SAM_EVENT_PWR_CID_BIX: -+ return san_evt_power_bix(dev, event); -+ -+ case SAM_EVENT_PWR_CID_BST: -+ return san_evt_power_bst(dev, event); -+ -+ case SAM_EVENT_PWR_CID_ADAPTER: -+ return san_evt_power_adapter(dev, event); -+ -+ case SAM_EVENT_PWR_CID_DPTF: -+ /* -+ * Ignored for now. -+ * This signals a change in Intel DPTF PMAX, and possibly other -+ * fields. Ignore for now as there is no corresponding _DSM call and -+ * DPTF is implemented via a separate INT3407 device. -+ * -+ * The payload of this event is: [u32 PMAX, unknown...]. -+ */ -+ return 0; -+ -+ default: -+ dev_warn(dev, "unhandled power event (cid = %x)\n", event->cid); -+ } -+ -+ return 0; -+} -+ -+ -+static inline int san_evt_thermal_notify(struct device *dev, struct surface_sam_ssh_event *event) ++static bool san_evt_power(const struct ssam_event *event, struct device *dev) +{ + int status; + -+ status = san_acpi_notify_sensor_trip_point(dev, event->iid); -+ if (status) { -+ dev_err(dev, "error handling thermal event (cid = %x)\n", event->cid); -+ return status; -+ } ++ switch (event->command_id) { ++ case SAM_EVENT_PWR_CID_BIX: ++ status = san_evt_power_bix(dev, event); ++ break; + -+ return 0; -+} ++ case SAM_EVENT_PWR_CID_BST: ++ status = san_evt_power_bst(dev, event); ++ break; + -+static int san_evt_thermal(struct surface_sam_ssh_event *event, void *data) -+{ -+ struct device *dev = (struct device *)data; ++ case SAM_EVENT_PWR_CID_ADAPTER: ++ status = san_evt_power_adapter(dev, event); ++ break; + -+ switch (event->cid) { -+ case SAM_EVENT_TEMP_CID_NOTIFY_SENSOR_TRIP_POINT: -+ return san_evt_thermal_notify(dev, event); ++ case SAM_EVENT_PWR_CID_DPTF: ++ status = san_evt_power_dptf(dev, event); ++ break; + + default: -+ dev_warn(dev, "unhandled thermal event (cid = %x)\n", event->cid); ++ return false; + } + -+ return 0; ++ if (status) ++ dev_err(dev, "error handling power event (cid = %x)\n", ++ event->command_id); ++ ++ return true; ++} ++ ++static void san_evt_power_workfn(struct work_struct *work) ++{ ++ struct san_event_work *ev = container_of(work, struct san_event_work, work.work); ++ ++ san_evt_power(&ev->event, &ev->dev->dev); ++ kfree(ev); ++} ++ ++ ++static u32 san_evt_power_nb(struct ssam_notifier_block *nb, const struct ssam_event *event) ++{ ++ struct san_drvdata *drvdata = container_of(nb, struct san_drvdata, nf_bat.base); ++ struct san_event_work *work; ++ unsigned long delay = san_evt_power_delay(event->command_id); ++ ++ if (delay == 0) { ++ if (san_evt_power(event, &drvdata->dev->dev)) ++ return SSAM_NOTIF_HANDLED; ++ else ++ return 0; ++ } ++ ++ work = kzalloc(sizeof(struct san_event_work) + event->length, GFP_KERNEL); ++ if (!work) ++ return ssam_notifier_from_errno(-ENOMEM); ++ ++ INIT_DELAYED_WORK(&work->work, san_evt_power_workfn); ++ work->dev = drvdata->dev; ++ ++ memcpy(&work->event, event, sizeof(struct ssam_event) + event->length); ++ ++ schedule_delayed_work(&work->work, delay); ++ return SSAM_NOTIF_HANDLED; ++} ++ ++ ++static inline int san_evt_thermal_notify(struct device *dev, const struct ssam_event *event) ++{ ++ return san_acpi_notify_sensor_trip_point(dev, event->instance_id); ++} ++ ++static bool san_evt_thermal(const struct ssam_event *event, struct device *dev) ++{ ++ int status; ++ ++ switch (event->command_id) { ++ case SAM_EVENT_TEMP_CID_NOTIFY_SENSOR_TRIP_POINT: ++ status = san_evt_thermal_notify(dev, event); ++ break; ++ ++ default: ++ return false; ++ } ++ ++ if (status) { ++ dev_err(dev, "error handling thermal event (cid = %x)\n", ++ event->command_id); ++ } ++ ++ return true; ++} ++ ++static u32 san_evt_thermal_nb(struct ssam_notifier_block *nb, const struct ssam_event *event) ++{ ++ struct san_drvdata *drvdata = container_of(nb, struct san_drvdata, nf_tmp.base); ++ struct platform_device *pdev = drvdata->dev; ++ ++ if (san_evt_thermal(event, &pdev->dev)) ++ return SSAM_NOTIF_HANDLED; ++ else ++ return 0; +} + + @@ -2506,7 +2743,7 @@ index 000000000000..63478945e6b2 + rqst.tc = gsb_rqst->tc; + rqst.cid = gsb_rqst->cid; + rqst.iid = gsb_rqst->iid; -+ rqst.pri = SURFACE_SAM_PRIORITY_NORMAL; ++ rqst.chn = gsb_rqst->tid; + rqst.snc = gsb_rqst->snc; + rqst.cdl = gsb_rqst->cdl; + rqst.pld = &gsb_rqst->pld[0]; @@ -2638,92 +2875,42 @@ index 000000000000..63478945e6b2 + return AE_OK; +} + -+static int san_enable_power_events(struct platform_device *pdev) -+{ -+ int status; -+ -+ status = surface_sam_ssh_set_delayed_event_handler( -+ SAM_EVENT_PWR_RQID, san_evt_power, -+ san_evt_power_delay, &pdev->dev); -+ if (status) -+ return status; -+ -+ status = surface_sam_ssh_enable_event_source(SAM_EVENT_PWR_TC, 0x01, SAM_EVENT_PWR_RQID); -+ if (status) { -+ surface_sam_ssh_remove_event_handler(SAM_EVENT_PWR_RQID); -+ return status; -+ } -+ -+ return 0; -+} -+ -+static int san_enable_thermal_events(struct platform_device *pdev) -+{ -+ int status; -+ -+ status = surface_sam_ssh_set_event_handler( -+ SAM_EVENT_TEMP_RQID, san_evt_thermal, -+ &pdev->dev); -+ if (status) -+ return status; -+ -+ status = surface_sam_ssh_enable_event_source(SAM_EVENT_TEMP_TC, 0x01, SAM_EVENT_TEMP_RQID); -+ if (status) { -+ surface_sam_ssh_remove_event_handler(SAM_EVENT_TEMP_RQID); -+ return status; -+ } -+ -+ return 0; -+} -+ -+static void san_disable_power_events(void) -+{ -+ surface_sam_ssh_disable_event_source(SAM_EVENT_PWR_TC, 0x01, SAM_EVENT_PWR_RQID); -+ surface_sam_ssh_remove_event_handler(SAM_EVENT_PWR_RQID); -+} -+ -+static void san_disable_thermal_events(void) -+{ -+ surface_sam_ssh_disable_event_source(SAM_EVENT_TEMP_TC, 0x01, SAM_EVENT_TEMP_RQID); -+ surface_sam_ssh_remove_event_handler(SAM_EVENT_TEMP_RQID); -+} -+ -+ -+static int san_enable_events(struct platform_device *pdev) ++static int san_events_register(struct platform_device *pdev) +{ + struct san_drvdata *drvdata = platform_get_drvdata(pdev); + int status; + -+ status = san_enable_thermal_events(pdev); ++ drvdata->nf_bat.base.priority = 1; ++ drvdata->nf_bat.base.fn = san_evt_power_nb; ++ drvdata->nf_bat.event.reg = SSAM_EVENT_REGISTRY_SAM; ++ drvdata->nf_bat.event.id.target_category = SSAM_SSH_TC_BAT; ++ drvdata->nf_bat.event.id.instance = 0; ++ drvdata->nf_bat.event.flags = SSAM_EVENT_SEQUENCED; ++ ++ drvdata->nf_tmp.base.priority = 1; ++ drvdata->nf_tmp.base.fn = san_evt_thermal_nb; ++ drvdata->nf_tmp.event.reg = SSAM_EVENT_REGISTRY_SAM; ++ drvdata->nf_tmp.event.id.target_category = SSAM_SSH_TC_TMP; ++ drvdata->nf_tmp.event.id.instance = 0; ++ drvdata->nf_tmp.event.flags = SSAM_EVENT_SEQUENCED; ++ ++ status = surface_sam_ssh_notifier_register(&drvdata->nf_bat); + if (status) + return status; + -+ /* -+ * We have to figure out if this device uses SAN or requires a separate -+ * driver for the battery. If it uses the separate driver, that driver -+ * will enable and handle power events. -+ */ -+ drvdata->has_power_events = acpi_has_method(NULL, "\\_SB.BAT1._BST"); -+ if (drvdata->has_power_events) { -+ status = san_enable_power_events(pdev); -+ if (status) -+ goto err; -+ } ++ status = surface_sam_ssh_notifier_register(&drvdata->nf_tmp); ++ if (status) ++ surface_sam_ssh_notifier_unregister(&drvdata->nf_bat); + -+ return 0; -+ -+err: -+ san_disable_thermal_events(); + return status; +} + -+static void san_disable_events(struct platform_device *pdev) ++static void san_events_unregister(struct platform_device *pdev) +{ + struct san_drvdata *drvdata = platform_get_drvdata(pdev); + -+ san_disable_thermal_events(); -+ if (drvdata->has_power_events) -+ san_disable_power_events(); ++ surface_sam_ssh_notifier_unregister(&drvdata->nf_bat); ++ surface_sam_ssh_notifier_unregister(&drvdata->nf_tmp); +} + + @@ -2831,6 +3018,7 @@ index 000000000000..63478945e6b2 + if (!drvdata) + return -ENOMEM; + ++ drvdata->dev = pdev; + drvdata->opreg_ctx.dev = &pdev->dev; + + cons = acpi_device_get_match_data(&pdev->dev); @@ -2850,7 +3038,7 @@ index 000000000000..63478945e6b2 + goto err_install_handler; + } + -+ status = san_enable_events(pdev); ++ status = san_events_register(pdev); + if (status) + goto err_enable_events; + @@ -2868,7 +3056,7 @@ index 000000000000..63478945e6b2 + return 0; + +err_install_dev: -+ san_disable_events(pdev); ++ san_events_unregister(pdev); +err_enable_events: + acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS, &san_opreg_handler); +err_install_handler: @@ -2890,7 +3078,13 @@ index 000000000000..63478945e6b2 + mutex_unlock(&rqsg_if.lock); + + acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS, &san_opreg_handler); -+ san_disable_events(pdev); ++ san_events_unregister(pdev); ++ ++ /* ++ * We have unregistered our event sources. Now we need to ensure that ++ * all delayed works they may have spawned are run to completion. ++ */ ++ flush_scheduled_work(); + + san_consumers_unlink(&drvdata->consumers); + kfree(drvdata); @@ -2919,7 +3113,7 @@ index 000000000000..63478945e6b2 + .remove = surface_sam_san_remove, + .driver = { + .name = "surface_sam_san", -+ .acpi_match_table = ACPI_PTR(surface_sam_san_match), ++ .acpi_match_table = surface_sam_san_match, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; @@ -2927,14 +3121,14 @@ index 000000000000..63478945e6b2 + +MODULE_AUTHOR("Maximilian Luz "); +MODULE_DESCRIPTION("Surface ACPI Notify Driver for 5th Generation Surface Devices"); -+MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/surface_sam/surface_sam_san.h b/drivers/platform/x86/surface_sam/surface_sam_san.h new file mode 100644 -index 000000000000..85b6d6569947 +index 0000000000000..2b9dee159bbbc --- /dev/null +++ b/drivers/platform/x86/surface_sam/surface_sam_san.h @@ -0,0 +1,30 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Interface for Surface ACPI/Notify (SAN). + * @@ -2966,11 +3160,11 @@ index 000000000000..85b6d6569947 +#endif /* _SURFACE_SAM_SAN_H */ diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid.c b/drivers/platform/x86/surface_sam/surface_sam_sid.c new file mode 100644 -index 000000000000..fb49d0e00808 +index 0000000000000..53c90d8924e28 --- /dev/null +++ b/drivers/platform/x86/surface_sam/surface_sam_sid.c -@@ -0,0 +1,137 @@ -+// SPDX-License-Identifier: GPL-2.0 +@@ -0,0 +1,147 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Surface Integration Driver. + * MFD driver to provide device/model dependent functionality. @@ -2989,6 +3183,12 @@ index 000000000000..fb49d0e00808 + { }, +}; + ++static const struct mfd_cell sid_devs_sp6[] = { ++ { .name = "surface_sam_sid_gpelid", .id = -1 }, ++ { .name = "surface_sam_sid_perfmode", .id = -1 }, ++ { }, ++}; ++ +static const struct mfd_cell sid_devs_sp7[] = { + { .name = "surface_sam_sid_gpelid", .id = -1 }, + { .name = "surface_sam_sid_ac", .id = -1 }, @@ -3028,9 +3228,10 @@ index 000000000000..fb49d0e00808 +}; + +static const struct mfd_cell sid_devs_sl3_15[] = { -+ { .name = "surface_sam_sid_vhf", .id = -1 }, -+ { .name = "surface_sam_sid_ac", .id = -1 }, -+ { .name = "surface_sam_sid_battery", .id = -1 }, ++ { .name = "surface_sam_sid_vhf", .id = -1 }, ++ { .name = "surface_sam_sid_ac", .id = -1 }, ++ { .name = "surface_sam_sid_battery", .id = -1 }, ++ { .name = "surface_sam_sid_perfmode", .id = -1 }, + { }, +}; + @@ -3038,6 +3239,9 @@ index 000000000000..fb49d0e00808 + /* Surface Pro 4, 5, and 6 */ + { "MSHW0081", (unsigned long)sid_devs_sp4 }, + ++ /* Surface Pro 6 (OMBR >= 0x10) */ ++ { "MSHW0111", (unsigned long)sid_devs_sp6 }, ++ + /* Surface Pro 7 */ + { "MSHW0116", (unsigned long)sid_devs_sp7 }, + @@ -3098,7 +3302,7 @@ index 000000000000..fb49d0e00808 + .remove = surface_sam_sid_remove, + .driver = { + .name = "surface_sam_sid", -+ .acpi_match_table = ACPI_PTR(surface_sam_sid_match), ++ .acpi_match_table = surface_sam_sid_match, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; @@ -3106,14 +3310,14 @@ index 000000000000..fb49d0e00808 + +MODULE_AUTHOR("Maximilian Luz "); +MODULE_DESCRIPTION("Surface Integration Driver for 5th Generation Surface Devices"); -+MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_gpelid.c b/drivers/platform/x86/surface_sam/surface_sam_sid_gpelid.c new file mode 100644 -index 000000000000..286411701d36 +index 0000000000000..798184bfd8573 --- /dev/null +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_gpelid.c @@ -0,0 +1,224 @@ -+// SPDX-License-Identifier: GPL-2.0 ++// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Surface Lid driver to enable wakeup from suspend via the lid. + */ @@ -3335,15 +3539,15 @@ index 000000000000..286411701d36 + +MODULE_AUTHOR("Maximilian Luz "); +MODULE_DESCRIPTION("Surface Lid Driver for 5th Generation Surface Devices"); -+MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:surface_sam_sid_gpelid"); diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_perfmode.c b/drivers/platform/x86/surface_sam/surface_sam_sid_perfmode.c new file mode 100644 -index 000000000000..f74e2b51604d +index 0000000000000..2e11efb166f2b --- /dev/null +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_perfmode.c @@ -0,0 +1,216 @@ -+// SPDX-License-Identifier: GPL-2.0 ++// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Surface Performance Mode Driver. + * Allows to change cooling capabilities based on user preference. @@ -3390,7 +3594,7 @@ index 000000000000..f74e2b51604d + .tc = 0x03, + .cid = 0x02, + .iid = 0x00, -+ .pri = SURFACE_SAM_PRIORITY_NORMAL, ++ .chn = 0x01, + .snc = 0x01, + .cdl = 0x00, + .pld = NULL, @@ -3420,7 +3624,7 @@ index 000000000000..f74e2b51604d + .tc = 0x03, + .cid = 0x03, + .iid = 0x00, -+ .pri = SURFACE_SAM_PRIORITY_NORMAL, ++ .chn = 0x01, + .snc = 0x00, + .cdl = ARRAY_SIZE(payload), + .pld = payload, @@ -3470,7 +3674,7 @@ index 000000000000..f74e2b51604d + + perf_mode = surface_sam_perf_mode_get(); + if (perf_mode < 0) { -+ dev_err(dev, "failed to get current performance mode: %d", perf_mode); ++ dev_err(dev, "failed to get current performance mode: %d\n", perf_mode); + return -EIO; + } + @@ -3507,7 +3711,7 @@ index 000000000000..f74e2b51604d + return count; +} + -+const static DEVICE_ATTR_RW(perf_mode); ++static const DEVICE_ATTR_RW(perf_mode); + + +static int surface_sam_sid_perfmode_probe(struct platform_device *pdev) @@ -3557,15 +3761,15 @@ index 000000000000..f74e2b51604d + +MODULE_AUTHOR("Maximilian Luz "); +MODULE_DESCRIPTION("Surface Performance Mode Driver for 5th Generation Surface Devices"); -+MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:surface_sam_sid_perfmode"); diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_power.c b/drivers/platform/x86/surface_sam/surface_sam_sid_power.c new file mode 100644 -index 000000000000..eb925bdda883 +index 0000000000000..d7844d52ddb6f --- /dev/null +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_power.c -@@ -0,0 +1,1264 @@ -+// SPDX-License-Identifier: GPL-2.0 +@@ -0,0 +1,1146 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Surface SID Battery/AC Driver. + * Provides support for the battery and AC on 7th generation Surface devices. @@ -3602,7 +3806,6 @@ index 000000000000..eb925bdda883 + */ + +#define SAM_PWR_TC 0x02 -+#define SAM_PWR_RQID 0x0002 + +#define SAM_RQST_PWR_CID_STA 0x01 +#define SAM_RQST_PWR_CID_BIX 0x02 @@ -3618,7 +3821,6 @@ index 000000000000..eb925bdda883 +#define SAM_EVENT_PWR_CID_BIX 0x15 +#define SAM_EVENT_PWR_CID_BST 0x16 +#define SAM_EVENT_PWR_CID_ADAPTER 0x17 -+#define SAM_EVENT_PWR_CID_DPTF 0x4f + +#define SAM_BATTERY_STA_OK 0x0f +#define SAM_BATTERY_STA_PRESENT 0x10 @@ -3679,7 +3881,7 @@ index 000000000000..eb925bdda883 + rqst.tc = SAM_PWR_TC; + rqst.cid = SAM_RQST_PWR_CID_STA; + rqst.iid = iid; -+ rqst.pri = SURFACE_SAM_PRIORITY_NORMAL; ++ rqst.chn = 0x01; + rqst.snc = 0x01; + rqst.cdl = 0x00; + rqst.pld = NULL; @@ -3700,7 +3902,7 @@ index 000000000000..eb925bdda883 + rqst.tc = SAM_PWR_TC; + rqst.cid = SAM_RQST_PWR_CID_BIX; + rqst.iid = iid; -+ rqst.pri = SURFACE_SAM_PRIORITY_NORMAL; ++ rqst.chn = 0x01; + rqst.snc = 0x01; + rqst.cdl = 0x00; + rqst.pld = NULL; @@ -3721,7 +3923,7 @@ index 000000000000..eb925bdda883 + rqst.tc = SAM_PWR_TC; + rqst.cid = SAM_RQST_PWR_CID_BST; + rqst.iid = iid; -+ rqst.pri = SURFACE_SAM_PRIORITY_NORMAL; ++ rqst.chn = 0x01; + rqst.snc = 0x01; + rqst.cdl = 0x00; + rqst.pld = NULL; @@ -3741,7 +3943,7 @@ index 000000000000..eb925bdda883 + rqst.tc = SAM_PWR_TC; + rqst.cid = SAM_RQST_PWR_CID_BTP; + rqst.iid = iid; -+ rqst.pri = SURFACE_SAM_PRIORITY_NORMAL; ++ rqst.chn = 0x01; + rqst.snc = 0x00; + rqst.cdl = sizeof(u32); + rqst.pld = (u8 *)&btp; @@ -3758,7 +3960,7 @@ index 000000000000..eb925bdda883 + rqst.tc = SAM_PWR_TC; + rqst.cid = SAM_RQST_PWR_CID_PSRC; + rqst.iid = iid; -+ rqst.pri = SURFACE_SAM_PRIORITY_NORMAL; ++ rqst.chn = 0x01; + rqst.snc = 0x01; + rqst.cdl = 0x00; + rqst.pld = NULL; @@ -3780,7 +3982,7 @@ index 000000000000..eb925bdda883 + rqst.tc = SAM_PWR_TC; + rqst.cid = SAM_RQST_PWR_CID_PMAX; + rqst.iid = iid; -+ rqst.pri = SURFACE_SAM_PRIORITY_NORMAL; ++ rqst.chn = 0x01; + rqst.snc = 0x01; + rqst.cdl = 0x00; + rqst.pld = NULL; @@ -3802,7 +4004,7 @@ index 000000000000..eb925bdda883 + rqst.tc = SAM_PWR_TC; + rqst.cid = SAM_RQST_PWR_CID_ARTG; + rqst.iid = iid; -+ rqst.pri = SURFACE_SAM_PRIORITY_NORMAL; ++ rqst.chn = 0x01; + rqst.snc = 0x01; + rqst.cdl = 0x00; + rqst.pld = NULL; @@ -3824,7 +4026,7 @@ index 000000000000..eb925bdda883 + rqst.tc = SAM_PWR_TC; + rqst.cid = SAM_RQST_PWR_CID_PSOC; + rqst.iid = iid; -+ rqst.pri = SURFACE_SAM_PRIORITY_NORMAL; ++ rqst.chn = 0x01; + rqst.snc = 0x01; + rqst.cdl = 0x00; + rqst.pld = NULL; @@ -3845,7 +4047,7 @@ index 000000000000..eb925bdda883 + rqst.tc = SAM_PWR_TC; + rqst.cid = SAM_RQST_PWR_CID_CHGI; + rqst.iid = iid; -+ rqst.pri = SURFACE_SAM_PRIORITY_NORMAL; ++ rqst.chn = 0x01; + rqst.snc = 0x00; + rqst.cdl = sizeof(u32); + rqst.pld = (u8 *)&chgi; @@ -3858,16 +4060,11 @@ index 000000000000..eb925bdda883 + * Common Power-Subsystem Interface. + */ + -+enum spwr_battery_id { -+ SPWR_BAT1, -+ SPWR_BAT2, -+ __SPWR_NUM_BAT, -+}; +#define SPWR_BAT_SINGLE PLATFORM_DEVID_NONE + +struct spwr_battery_device { + struct platform_device *pdev; -+ enum spwr_battery_id id; ++ u8 iid; + + char name[32]; + struct power_supply *psy; @@ -3875,6 +4072,8 @@ index 000000000000..eb925bdda883 + + struct delayed_work update_work; + ++ struct ssam_event_notifier notif; ++ + struct mutex lock; + unsigned long timestamp; + @@ -3891,23 +4090,13 @@ index 000000000000..eb925bdda883 + struct power_supply *psy; + struct power_supply_desc psy_desc; + ++ struct ssam_event_notifier notif; ++ + struct mutex lock; + + u32 state; +}; + -+struct spwr_subsystem { -+ struct mutex lock; -+ -+ unsigned int refcount; -+ struct spwr_ac_device *ac; -+ struct spwr_battery_device *battery[__SPWR_NUM_BAT]; -+}; -+ -+static struct spwr_subsystem spwr_subsystem = { -+ .lock = __MUTEX_INITIALIZER(spwr_subsystem.lock), -+}; -+ +static enum power_supply_property spwr_ac_props[] = { + POWER_SUPPLY_PROP_ONLINE, +}; @@ -3949,10 +4138,9 @@ index 000000000000..eb925bdda883 +}; + + -+static int spwr_battery_register(struct spwr_battery_device *bat, struct platform_device *pdev, -+ enum spwr_battery_id id); ++static int spwr_battery_register(struct spwr_battery_device *bat, struct platform_device *pdev, int iid); + -+static int spwr_battery_unregister(struct spwr_battery_device *bat); ++static void spwr_battery_unregister(struct spwr_battery_device *bat); + + +static inline bool spwr_battery_present(struct spwr_battery_device *bat) @@ -3963,7 +4151,7 @@ index 000000000000..eb925bdda883 + +static inline int spwr_battery_load_sta(struct spwr_battery_device *bat) +{ -+ return sam_psy_get_sta(bat->id + 1, &bat->sta); ++ return sam_psy_get_sta(bat->iid, &bat->sta); +} + +static inline int spwr_battery_load_bix(struct spwr_battery_device *bat) @@ -3971,7 +4159,7 @@ index 000000000000..eb925bdda883 + if (!spwr_battery_present(bat)) + return 0; + -+ return sam_psy_get_bix(bat->id + 1, &bat->bix); ++ return sam_psy_get_bix(bat->iid, &bat->bix); +} + +static inline int spwr_battery_load_bst(struct spwr_battery_device *bat) @@ -3979,14 +4167,14 @@ index 000000000000..eb925bdda883 + if (!spwr_battery_present(bat)) + return 0; + -+ return sam_psy_get_bst(bat->id + 1, &bat->bst); ++ return sam_psy_get_bst(bat->iid, &bat->bst); +} + + +static inline int spwr_battery_set_alarm_unlocked(struct spwr_battery_device *bat, u32 value) +{ + bat->alarm = value; -+ return sam_psy_set_btp(bat->id + 1, bat->alarm); ++ return sam_psy_set_btp(bat->iid, bat->alarm); +} + +static inline int spwr_battery_set_alarm(struct spwr_battery_device *bat, u32 value) @@ -4098,84 +4286,38 @@ index 000000000000..eb925bdda883 + + // if the unit has changed, re-add the battery + if (unit != bat->bix.power_unit) { -+ mutex_unlock(&spwr_subsystem.lock); -+ -+ status = spwr_battery_unregister(bat); -+ if (status) -+ return status; -+ -+ status = spwr_battery_register(bat, bat->pdev, bat->id); ++ spwr_battery_unregister(bat); ++ status = spwr_battery_register(bat, bat->pdev, bat->iid); + } + + return status; +} + + -+static int spwr_handle_event_bix(struct surface_sam_ssh_event *event) ++static inline int spwr_notify_bix(struct spwr_battery_device *bat) +{ -+ struct spwr_battery_device *bat; -+ enum spwr_battery_id bat_id = event->iid - 1; -+ int status = 0; ++ int status; + -+ if (bat_id < 0 || bat_id >= __SPWR_NUM_BAT) { -+ printk(SPWR_WARN "invalid BIX event iid 0x%02x\n", event->iid); -+ bat_id = SPWR_BAT1; -+ } ++ status = spwr_battery_recheck(bat); ++ if (!status) ++ power_supply_changed(bat->psy); + -+ mutex_lock(&spwr_subsystem.lock); -+ bat = spwr_subsystem.battery[bat_id]; -+ if (bat) { -+ status = spwr_battery_recheck(bat); -+ if (!status) -+ power_supply_changed(bat->psy); -+ } -+ -+ mutex_unlock(&spwr_subsystem.lock); + return status; +} + -+static int spwr_handle_event_bst(struct surface_sam_ssh_event *event) ++static inline int spwr_notify_bst(struct spwr_battery_device *bat) +{ -+ struct spwr_battery_device *bat; -+ enum spwr_battery_id bat_id = event->iid - 1; -+ int status = 0; ++ int status; + -+ if (bat_id < 0 || bat_id >= __SPWR_NUM_BAT) { -+ printk(SPWR_WARN "invalid BST event iid 0x%02x\n", event->iid); -+ bat_id = SPWR_BAT1; -+ } ++ status = spwr_battery_update_bst(bat, false); ++ if (!status) ++ power_supply_changed(bat->psy); + -+ mutex_lock(&spwr_subsystem.lock); -+ -+ bat = spwr_subsystem.battery[bat_id]; -+ if (bat) { -+ status = spwr_battery_update_bst(bat, false); -+ if (!status) -+ power_supply_changed(bat->psy); -+ } -+ -+ mutex_unlock(&spwr_subsystem.lock); + return status; +} + -+static int spwr_handle_event_adapter(struct surface_sam_ssh_event *event) ++static inline int spwr_notify_adapter_bat(struct spwr_battery_device *bat) +{ -+ struct spwr_battery_device *bat1 = NULL; -+ struct spwr_battery_device *bat2 = NULL; -+ struct spwr_ac_device *ac; -+ int status = 0; -+ -+ mutex_lock(&spwr_subsystem.lock); -+ -+ ac = spwr_subsystem.ac; -+ if (ac) { -+ status = spwr_ac_update(ac); -+ if (status) -+ goto out; -+ -+ power_supply_changed(ac->psy); -+ } -+ + /* + * Handle battery update quirk: + * When the battery is fully charged and the adapter is plugged in or @@ -4184,43 +4326,73 @@ index 000000000000..eb925bdda883 + * the state is updated on the battery. Schedule an update to solve this. + */ + -+ bat1 = spwr_subsystem.battery[SPWR_BAT1]; -+ if (bat1 && bat1->bst.remaining_cap >= bat1->bix.last_full_charge_cap) -+ schedule_delayed_work(&bat1->update_work, SPWR_AC_BAT_UPDATE_DELAY); ++ if (bat->bst.remaining_cap >= bat->bix.last_full_charge_cap) ++ schedule_delayed_work(&bat->update_work, SPWR_AC_BAT_UPDATE_DELAY); + -+ bat2 = spwr_subsystem.battery[SPWR_BAT2]; -+ if (bat2 && bat2->bst.remaining_cap >= bat2->bix.last_full_charge_cap) -+ schedule_delayed_work(&bat2->update_work, SPWR_AC_BAT_UPDATE_DELAY); ++ return 0; ++} ++ ++static inline int spwr_notify_adapter_ac(struct spwr_ac_device *ac) ++{ ++ int status; ++ ++ status = spwr_ac_update(ac); ++ if (!status) ++ power_supply_changed(ac->psy); + -+out: -+ mutex_unlock(&spwr_subsystem.lock); + return status; +} + -+static int spwr_handle_event_dptf(struct surface_sam_ssh_event *event) ++static u32 spwr_notify_bat(struct ssam_notifier_block *nb, const struct ssam_event *event) +{ -+ return 0; // TODO: spwr_handle_event_dptf -+} ++ struct spwr_battery_device *bat = container_of(nb, struct spwr_battery_device, notif.base); ++ int status; + -+static int spwr_handle_event(struct surface_sam_ssh_event *event, void *data) -+{ -+ printk(SPWR_DEBUG "power event (cid = 0x%02x)\n", event->cid); ++ dev_dbg(&bat->pdev->dev, "power event (cid = 0x%02x)\n", event->command_id); + -+ switch (event->cid) { ++ // handled here because adapter has IID = 0 ++ if (event->command_id == SAM_EVENT_PWR_CID_ADAPTER) { ++ status = spwr_notify_adapter_bat(bat); ++ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED; ++ } ++ ++ // check for the correct battery IID ++ if (event->instance_id != bat->iid) ++ return 0; ++ ++ switch (event->command_id) { + case SAM_EVENT_PWR_CID_BIX: -+ return spwr_handle_event_bix(event); ++ status = spwr_notify_bix(bat); ++ break; + + case SAM_EVENT_PWR_CID_BST: -+ return spwr_handle_event_bst(event); -+ -+ case SAM_EVENT_PWR_CID_ADAPTER: -+ return spwr_handle_event_adapter(event); -+ -+ case SAM_EVENT_PWR_CID_DPTF: -+ return spwr_handle_event_dptf(event); ++ status = spwr_notify_bst(bat); ++ break; ++ ++ default: ++ return 0; ++ } ++ ++ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED; ++} ++ ++static u32 spwr_notify_ac(struct ssam_notifier_block *nb, const struct ssam_event *event) ++{ ++ struct spwr_ac_device *ac = container_of(nb, struct spwr_ac_device, notif.base); ++ int status; ++ ++ dev_dbg(&ac->pdev->dev, "power event (cid = 0x%02x)\n", event->command_id); ++ ++ // AC has IID = 0 ++ if (event->instance_id != 0) ++ return 0; ++ ++ switch (event->command_id) { ++ case SAM_EVENT_PWR_CID_ADAPTER: ++ status = spwr_notify_adapter_ac(ac); ++ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED; + + default: -+ printk(SPWR_WARN "unhandled power event (cid = 0x%02x)\n", event->cid); + return 0; + } +} @@ -4462,58 +4634,6 @@ index 000000000000..eb925bdda883 +}; + + -+static int spwr_subsys_init_unlocked(void) -+{ -+ int status; -+ -+ status = surface_sam_ssh_set_event_handler(SAM_PWR_RQID, spwr_handle_event, NULL); -+ if (status) -+ goto err_handler; -+ -+ status = surface_sam_ssh_enable_event_source(SAM_PWR_TC, 0x01, SAM_PWR_RQID); -+ if (status) -+ goto err_source; -+ -+ return 0; -+ -+err_source: -+ surface_sam_ssh_remove_event_handler(SAM_PWR_RQID); -+err_handler: -+ return status; -+} -+ -+static int spwr_subsys_deinit_unlocked(void) -+{ -+ surface_sam_ssh_disable_event_source(SAM_PWR_TC, 0x01, SAM_PWR_RQID); -+ surface_sam_ssh_remove_event_handler(SAM_PWR_RQID); -+ return 0; -+} -+ -+static inline int spwr_subsys_ref_unlocked(void) -+{ -+ int status = 0; -+ -+ if (!spwr_subsystem.refcount) -+ status = spwr_subsys_init_unlocked(); -+ -+ spwr_subsystem.refcount += 1; -+ return status; -+} -+ -+static inline int spwr_subsys_unref_unlocked(void) -+{ -+ int status = 0; -+ -+ if (spwr_subsystem.refcount) -+ spwr_subsystem.refcount -= 1; -+ -+ if (!spwr_subsystem.refcount) -+ status = spwr_subsys_deinit_unlocked(); -+ -+ return status; -+} -+ -+ +static int spwr_ac_register(struct spwr_ac_device *ac, struct platform_device *pdev) +{ + struct power_supply_config psy_cfg = {}; @@ -4541,69 +4661,51 @@ index 000000000000..eb925bdda883 + ac->psy_desc.num_properties = ARRAY_SIZE(spwr_ac_props); + ac->psy_desc.get_property = spwr_ac_get_property; + -+ mutex_lock(&spwr_subsystem.lock); -+ if (spwr_subsystem.ac) { -+ status = -EEXIST; -+ goto err; -+ } -+ -+ status = spwr_subsys_ref_unlocked(); -+ if (status) -+ goto err; -+ + ac->psy = power_supply_register(&ac->pdev->dev, &ac->psy_desc, &psy_cfg); + if (IS_ERR(ac->psy)) { + status = PTR_ERR(ac->psy); -+ goto err_unref; ++ goto err_psy; + } + -+ spwr_subsystem.ac = ac; -+ mutex_unlock(&spwr_subsystem.lock); ++ ac->notif.base.priority = 1; ++ ac->notif.base.fn = spwr_notify_ac; ++ ac->notif.event.reg = SSAM_EVENT_REGISTRY_SAM; ++ ac->notif.event.id.target_category = SSAM_SSH_TC_BAT; ++ ac->notif.event.id.instance = 0; ++ ac->notif.event.flags = SSAM_EVENT_SEQUENCED; ++ ++ status = surface_sam_ssh_notifier_register(&ac->notif); ++ if (status) ++ goto err_notif; ++ + return 0; + -+err_unref: -+ spwr_subsys_unref_unlocked(); -+err: -+ mutex_unlock(&spwr_subsystem.lock); ++err_notif: ++ power_supply_unregister(ac->psy); ++err_psy: + mutex_destroy(&ac->lock); + return status; +} + +static int spwr_ac_unregister(struct spwr_ac_device *ac) +{ -+ int status; -+ -+ mutex_lock(&spwr_subsystem.lock); -+ if (spwr_subsystem.ac != ac) { -+ mutex_unlock(&spwr_subsystem.lock); -+ return -EINVAL; -+ } -+ -+ spwr_subsystem.ac = NULL; ++ surface_sam_ssh_notifier_unregister(&ac->notif); + power_supply_unregister(ac->psy); -+ -+ status = spwr_subsys_unref_unlocked(); -+ mutex_unlock(&spwr_subsystem.lock); -+ + mutex_destroy(&ac->lock); -+ return status; ++ return 0; +} + -+static int spwr_battery_register(struct spwr_battery_device *bat, struct platform_device *pdev, -+ enum spwr_battery_id id) ++static int spwr_battery_register(struct spwr_battery_device *bat, struct platform_device *pdev, int iid) +{ + struct power_supply_config psy_cfg = {}; + u32 sta; + int status; + -+ if ((id < 0 || id >= __SPWR_NUM_BAT) && id != SPWR_BAT_SINGLE) -+ return -EINVAL; -+ + bat->pdev = pdev; -+ bat->id = id != SPWR_BAT_SINGLE ? id : SPWR_BAT1; ++ bat->iid = iid != SPWR_BAT_SINGLE ? iid : 1; + + // make sure the device is there and functioning properly -+ status = sam_psy_get_sta(bat->id + 1, &sta); ++ status = sam_psy_get_sta(bat->iid, &sta); + if (status) + return status; + @@ -4620,7 +4722,7 @@ index 000000000000..eb925bdda883 + return status; + } + -+ snprintf(bat->name, ARRAY_SIZE(bat->name), "BAT%d", bat->id); ++ snprintf(bat->name, ARRAY_SIZE(bat->name), "BAT%d", bat->iid - 1); + bat->psy_desc.name = bat->name; + bat->psy_desc.type = POWER_SUPPLY_TYPE_BATTERY; + @@ -4639,63 +4741,45 @@ index 000000000000..eb925bdda883 + + INIT_DELAYED_WORK(&bat->update_work, spwr_battery_update_bst_workfn); + -+ mutex_lock(&spwr_subsystem.lock); -+ if (spwr_subsystem.battery[bat->id]) { -+ status = -EEXIST; -+ goto err; -+ } -+ -+ status = spwr_subsys_ref_unlocked(); -+ if (status) -+ goto err; -+ + bat->psy = power_supply_register(&bat->pdev->dev, &bat->psy_desc, &psy_cfg); + if (IS_ERR(bat->psy)) { + status = PTR_ERR(bat->psy); -+ goto err_unref; ++ goto err_psy; + } + ++ bat->notif.base.priority = 1; ++ bat->notif.base.fn = spwr_notify_bat; ++ bat->notif.event.reg = SSAM_EVENT_REGISTRY_SAM; ++ bat->notif.event.id.target_category = SSAM_SSH_TC_BAT; ++ bat->notif.event.id.instance = 0; ++ bat->notif.event.flags = SSAM_EVENT_SEQUENCED; ++ ++ status = surface_sam_ssh_notifier_register(&bat->notif); ++ if (status) ++ goto err_notif; ++ + status = device_create_file(&bat->psy->dev, &alarm_attr); + if (status) -+ goto err_dereg; ++ goto err_file; + -+ spwr_subsystem.battery[bat->id] = bat; -+ mutex_unlock(&spwr_subsystem.lock); + return 0; + -+err_dereg: ++err_file: ++ surface_sam_ssh_notifier_unregister(&bat->notif); ++err_notif: + power_supply_unregister(bat->psy); -+err_unref: -+ spwr_subsys_unref_unlocked(); -+err: -+ mutex_unlock(&spwr_subsystem.lock); ++err_psy: ++ mutex_destroy(&bat->lock); + return status; +} + -+static int spwr_battery_unregister(struct spwr_battery_device *bat) ++static void spwr_battery_unregister(struct spwr_battery_device *bat) +{ -+ int status; -+ -+ if (bat->id < 0 || bat->id >= __SPWR_NUM_BAT) -+ return -EINVAL; -+ -+ mutex_lock(&spwr_subsystem.lock); -+ if (spwr_subsystem.battery[bat->id] != bat) { -+ mutex_unlock(&spwr_subsystem.lock); -+ return -EINVAL; -+ } -+ -+ spwr_subsystem.battery[bat->id] = NULL; -+ -+ status = spwr_subsys_unref_unlocked(); -+ mutex_unlock(&spwr_subsystem.lock); -+ ++ surface_sam_ssh_notifier_unregister(&bat->notif); + cancel_delayed_work_sync(&bat->update_work); + device_remove_file(&bat->psy->dev, &alarm_attr); + power_supply_unregister(bat->psy); -+ + mutex_destroy(&bat->lock); -+ return status; +} + + @@ -4740,7 +4824,9 @@ index 000000000000..eb925bdda883 + struct spwr_battery_device *bat; + + bat = platform_get_drvdata(pdev); -+ return spwr_battery_unregister(bat); ++ spwr_battery_unregister(bat); ++ ++ return 0; +} + +static struct platform_driver surface_sam_sid_battery = { @@ -4826,16 +4912,16 @@ index 000000000000..eb925bdda883 + +MODULE_AUTHOR("Maximilian Luz "); +MODULE_DESCRIPTION("Surface Battery/AC Driver for 7th Generation Surface Devices"); -+MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:surface_sam_sid_ac"); +MODULE_ALIAS("platform:surface_sam_sid_battery"); diff --git a/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c b/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c new file mode 100644 -index 000000000000..9cf912a44171 +index 0000000000000..5feb882cf74e8 --- /dev/null +++ b/drivers/platform/x86/surface_sam/surface_sam_sid_vhf.c -@@ -0,0 +1,428 @@ -+// SPDX-License-Identifier: GPL-2.0 +@@ -0,0 +1,420 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Microsofs Surface HID (VHF) driver for HID input events via SAM. + * Used for keyboard input events on the 7th generation Surface Laptops. @@ -4851,23 +4937,15 @@ index 000000000000..9cf912a44171 + +#define SID_VHF_INPUT_NAME "Microsoft Surface HID" + -+/* -+ * Request ID for VHF events. This value is based on the output of the Surface -+ * EC and should not be changed. -+ */ -+#define SAM_EVENT_SID_VHF_RQID 0x0015 +#define SAM_EVENT_SID_VHF_TC 0x15 + +#define VHF_HID_STARTED 0 + -+struct sid_vhf_evtctx { -+ struct device *dev; ++struct sid_vhf { ++ struct platform_device *dev; + struct hid_device *hid; -+ unsigned long flags; -+}; -+ -+struct sid_vhf_drvdata { -+ struct sid_vhf_evtctx event_ctx; ++ struct ssam_event_notifier notif; ++ unsigned long state; +}; + + @@ -4884,22 +4962,22 @@ index 000000000000..9cf912a44171 + +static int sid_vhf_hid_open(struct hid_device *hid) +{ -+ struct sid_vhf_drvdata *drvdata = platform_get_drvdata(to_platform_device(hid->dev.parent)); ++ struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent); + + hid_dbg(hid, "%s\n", __func__); + -+ set_bit(VHF_HID_STARTED, &drvdata->event_ctx.flags); ++ set_bit(VHF_HID_STARTED, &vhf->state); + return 0; +} + +static void sid_vhf_hid_close(struct hid_device *hid) +{ + -+ struct sid_vhf_drvdata *drvdata = platform_get_drvdata(to_platform_device(hid->dev.parent)); ++ struct sid_vhf *vhf = dev_get_drvdata(hid->dev.parent); + + hid_dbg(hid, "%s\n", __func__); + -+ clear_bit(VHF_HID_STARTED, &drvdata->event_ctx.flags); ++ clear_bit(VHF_HID_STARTED, &vhf->state); +} + +struct surface_sam_sid_vhf_meta_rqst { @@ -4953,10 +5031,10 @@ index 000000000000..9cf912a44171 + }; + + struct surface_sam_ssh_rqst rqst = { -+ .tc = 0x15, ++ .tc = 0x15, + .cid = 0x04, + .iid = iid, -+ .pri = 0x02, ++ .chn = 0x02, + .snc = 0x01, + .cdl = sizeof(struct surface_sam_sid_vhf_meta_rqst), + .pld = (u8 *)&resp.rqst, @@ -4992,10 +5070,10 @@ index 000000000000..9cf912a44171 + }; + + struct surface_sam_ssh_rqst rqst = { -+ .tc = 0x15, ++ .tc = 0x15, + .cid = 0x04, + .iid = iid, -+ .pri = 0x02, ++ .chn = 0x02, + .snc = 0x01, + .cdl = sizeof(struct surface_sam_sid_vhf_meta_rqst), + .pld = (u8 *)&resp.rqst, @@ -5104,7 +5182,7 @@ index 000000000000..9cf912a44171 + } + + rqst.tc = SAM_EVENT_SID_VHF_TC; -+ rqst.pri = SURFACE_SAM_PRIORITY_HIGH; ++ rqst.chn = 0x02; + rqst.iid = 0x00; // windows tends to distinguish iids, but EC will take it + rqst.cid = cid; + rqst.snc = reqtype == HID_REQ_GET_REPORT ? 0x01 : 0x00; @@ -5160,24 +5238,28 @@ index 000000000000..9cf912a44171 + return hid; +} + -+static int sid_vhf_event_handler(struct surface_sam_ssh_event *event, void *data) ++static u32 sid_vhf_event_handler(struct ssam_notifier_block *nb, const struct ssam_event *event) +{ -+ struct sid_vhf_evtctx *ctx = (struct sid_vhf_evtctx *)data; ++ struct sid_vhf *vhf = container_of(nb, struct sid_vhf, notif.base); ++ int status; + -+ // skip if HID hasn't started yet -+ if (!test_bit(VHF_HID_STARTED, &ctx->flags)) ++ if (event->target_category != SSAM_SSH_TC_HID) + return 0; + -+ if (event->tc == SAM_EVENT_SID_VHF_TC && (event->cid == 0x00 || event->cid == 0x03 || event->cid == 0x04)) -+ return hid_input_report(ctx->hid, HID_INPUT_REPORT, event->pld, event->len, 1); ++ if (event->command_id != 0x00 && event->command_id != 0x03 && event->command_id != 0x04) ++ return 0; + -+ dev_warn(ctx->dev, "unsupported event (tc = %d, cid = %d)\n", event->tc, event->cid); -+ return 0; ++ // skip if HID hasn't started yet ++ if (!test_bit(VHF_HID_STARTED, &vhf->state)) ++ return SSAM_NOTIF_HANDLED; ++ ++ status = hid_input_report(vhf->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 0); ++ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED; +} + +static int surface_sam_sid_vhf_probe(struct platform_device *pdev) +{ -+ struct sid_vhf_drvdata *drvdata; ++ struct sid_vhf *vhf; + struct vhf_device_metadata meta = {}; + struct hid_device *hid; + int status; @@ -5187,8 +5269,8 @@ index 000000000000..9cf912a44171 + if (status) + return status == -ENXIO ? -EPROBE_DEFER : status; + -+ drvdata = kzalloc(sizeof(struct sid_vhf_drvdata), GFP_KERNEL); -+ if (!drvdata) ++ vhf = kzalloc(sizeof(struct sid_vhf), GFP_KERNEL); ++ if (!vhf) + return -ENOMEM; + + status = vhf_get_metadata(0x00, &meta); @@ -5201,21 +5283,21 @@ index 000000000000..9cf912a44171 + goto err_create_hid; + } + -+ drvdata->event_ctx.dev = &pdev->dev; -+ drvdata->event_ctx.hid = hid; ++ vhf->dev = pdev; ++ vhf->hid = hid; + -+ platform_set_drvdata(pdev, drvdata); ++ vhf->notif.base.priority = 1; ++ vhf->notif.base.fn = sid_vhf_event_handler; ++ vhf->notif.event.reg = SSAM_EVENT_REGISTRY_SAM; ++ vhf->notif.event.id.target_category = SSAM_SSH_TC_HID; ++ vhf->notif.event.id.instance = 0; ++ vhf->notif.event.flags = 0; + -+ status = surface_sam_ssh_set_event_handler( -+ SAM_EVENT_SID_VHF_RQID, -+ sid_vhf_event_handler, -+ &drvdata->event_ctx); ++ platform_set_drvdata(pdev, vhf); ++ ++ status = surface_sam_ssh_notifier_register(&vhf->notif); + if (status) -+ goto err_event_handler; -+ -+ status = surface_sam_ssh_enable_event_source(SAM_EVENT_SID_VHF_TC, 0x01, SAM_EVENT_SID_VHF_RQID); -+ if (status) -+ goto err_event_source; ++ goto err_notif; + + status = hid_add_device(hid); + if (status) @@ -5224,26 +5306,22 @@ index 000000000000..9cf912a44171 + return 0; + +err_add_hid: -+ surface_sam_ssh_disable_event_source(SAM_EVENT_SID_VHF_TC, 0x01, SAM_EVENT_SID_VHF_RQID); -+err_event_source: -+ surface_sam_ssh_remove_event_handler(SAM_EVENT_SID_VHF_RQID); -+err_event_handler: ++ surface_sam_ssh_notifier_unregister(&vhf->notif); ++err_notif: + hid_destroy_device(hid); + platform_set_drvdata(pdev, NULL); +err_create_hid: -+ kfree(drvdata); ++ kfree(vhf); + return status; +} + +static int surface_sam_sid_vhf_remove(struct platform_device *pdev) +{ -+ struct sid_vhf_drvdata *drvdata = platform_get_drvdata(pdev); ++ struct sid_vhf *vhf = platform_get_drvdata(pdev); + -+ surface_sam_ssh_disable_event_source(SAM_EVENT_SID_VHF_TC, 0x01, SAM_EVENT_SID_VHF_RQID); -+ surface_sam_ssh_remove_event_handler(SAM_EVENT_SID_VHF_RQID); -+ -+ hid_destroy_device(drvdata->event_ctx.hid); -+ kfree(drvdata); ++ surface_sam_ssh_notifier_unregister(&vhf->notif); ++ hid_destroy_device(vhf->hid); ++ kfree(vhf); + + platform_set_drvdata(pdev, NULL); + return 0; @@ -5261,15 +5339,15 @@ index 000000000000..9cf912a44171 + +MODULE_AUTHOR("Blaž Hrastnik "); +MODULE_DESCRIPTION("Driver for HID devices connected via Surface SAM"); -+MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:surface_sam_sid_vhf"); diff --git a/drivers/platform/x86/surface_sam/surface_sam_ssh.c b/drivers/platform/x86/surface_sam/surface_sam_ssh.c new file mode 100644 -index 000000000000..988be7c2d286 +index 0000000000000..87f4f5d95cc4a --- /dev/null +++ b/drivers/platform/x86/surface_sam/surface_sam_ssh.c -@@ -0,0 +1,1744 @@ -+// SPDX-License-Identifier: GPL-2.0 +@@ -0,0 +1,5115 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Surface Serial Hub (SSH) driver for communication with the Surface/System + * Aggregator Module. @@ -5277,6 +5355,7 @@ index 000000000000..988be7c2d286 + +#include +#include ++#include +#include +#include +#include @@ -5285,112 +5364,4089 @@ index 000000000000..988be7c2d286 +#include +#include +#include ++#include ++#include ++#include ++#include +#include +#include +#include +#include +#include ++#include +#include + +#include "surface_sam_ssh.h" + ++#define CREATE_TRACE_POINTS ++#include "surface_sam_ssh_trace.h" ++ ++ ++/* -- TODO. ----------------------------------------------------------------- */ + +#define SSH_RQST_TAG_FULL "surface_sam_ssh_rqst: " +#define SSH_RQST_TAG "rqst: " -+#define SSH_EVENT_TAG "event: " -+#define SSH_RECV_TAG "recv: " + +#define SSH_SUPPORTED_FLOW_CONTROL_MASK (~((u8) ACPI_UART_FLOW_CONTROL_HW)) + -+#define SSH_BYTELEN_SYNC 2 -+#define SSH_BYTELEN_TERM 2 -+#define SSH_BYTELEN_CRC 2 -+#define SSH_BYTELEN_CTRL 4 // command-header, ACK, or RETRY -+#define SSH_BYTELEN_CMDFRAME 8 // without payload + -+#define SSH_MAX_WRITE ( \ -+ SSH_BYTELEN_SYNC \ -+ + SSH_BYTELEN_CTRL \ -+ + SSH_BYTELEN_CRC \ -+ + SSH_BYTELEN_CMDFRAME \ -+ + SURFACE_SAM_SSH_MAX_RQST_PAYLOAD \ -+ + SSH_BYTELEN_CRC \ -+) ++/* -- Error injection helpers. ---------------------------------------------- */ + -+#define SSH_MSG_LEN_CTRL ( \ -+ SSH_BYTELEN_SYNC \ -+ + SSH_BYTELEN_CTRL \ -+ + SSH_BYTELEN_CRC \ -+ + SSH_BYTELEN_TERM \ -+) ++#if 0 // not supported on 4.19 ++#define noinline_if_inject noinline ++#else /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */ ++#define noinline_if_inject inline ++#endif /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */ + -+#define SSH_MSG_LEN_CMD_BASE ( \ -+ SSH_BYTELEN_SYNC \ -+ + SSH_BYTELEN_CTRL \ -+ + SSH_BYTELEN_CRC \ -+ + SSH_BYTELEN_CRC \ -+) // without payload and command-frame + -+#define SSH_WRITE_TIMEOUT msecs_to_jiffies(1000) -+#define SSH_READ_TIMEOUT msecs_to_jiffies(1000) -+#define SSH_NUM_RETRY 3 ++/* -- Public interface. ----------------------------------------------------- */ + -+#define SSH_WRITE_BUF_LEN SSH_MAX_WRITE -+#define SSH_READ_BUF_LEN 512 // must be power of 2 -+#define SSH_EVAL_BUF_LEN SSH_MAX_WRITE // also works for reading ++enum ssam_request_flags { ++ SSAM_REQUEST_HAS_RESPONSE = BIT(0), ++ SSAM_REQUEST_UNSEQUENCED = BIT(1), ++}; + -+#define SSH_FRAME_TYPE_CMD_NOACK 0x00 // request/event that does not to be ACKed -+#define SSH_FRAME_TYPE_CMD 0x80 // request/event -+#define SSH_FRAME_TYPE_ACK 0x40 // ACK for request/event -+#define SSH_FRAME_TYPE_RETRY 0x04 // error or retry indicator ++struct ssam_request { ++ u8 target_category; ++ u8 command_id; ++ u8 instance_id; ++ u8 channel; ++ u16 flags; ++ u16 length; ++ u8 *payload; ++}; + -+#define SSH_FRAME_OFFS_CTRL SSH_BYTELEN_SYNC -+#define SSH_FRAME_OFFS_CTRL_CRC (SSH_FRAME_OFFS_CTRL + SSH_BYTELEN_CTRL) -+#define SSH_FRAME_OFFS_TERM (SSH_FRAME_OFFS_CTRL_CRC + SSH_BYTELEN_CRC) -+#define SSH_FRAME_OFFS_CMD SSH_FRAME_OFFS_TERM // either TERM or CMD -+#define SSH_FRAME_OFFS_CMD_PLD (SSH_FRAME_OFFS_CMD + SSH_BYTELEN_CMDFRAME) + ++/* -- Common/utility functions. --------------------------------------------- */ ++ ++static inline u16 ssh_crc(const u8 *buf, size_t len) ++{ ++ return crc_ccitt_false(0xffff, buf, len); ++} ++ ++static inline u16 __ssh_rqid_next(u16 rqid) ++{ ++ return rqid > 0 ? rqid + 1u : rqid + SURFACE_SAM_SSH_NUM_EVENTS + 1u; ++} ++ ++static inline u16 ssh_event_to_rqid(u16 event) ++{ ++ return event + 1u; ++} ++ ++static inline u16 ssh_rqid_to_event(u16 rqid) ++{ ++ return rqid - 1u; ++} ++ ++static inline bool ssh_rqid_is_event(u16 rqid) ++{ ++ return ssh_rqid_to_event(rqid) < SURFACE_SAM_SSH_NUM_EVENTS; ++} ++ ++static inline int ssh_tc_to_rqid(u8 tc) ++{ ++#if 0 // TODO: check if it works without this ++ /* ++ * TC=0x08 represents the input subsystem on Surface Laptop 1 and 2. ++ * This is mapped on Windows to RQID=0x0001. As input events seem to be ++ * somewhat special with regards to enabling/disabling (they seem to be ++ * enabled by default with a fixed RQID), let's do the same here. ++ */ ++ if (tc == 0x08) ++ return 0x0001; ++ ++ /* Default path: Set RQID = TC. */ ++#endif ++ return tc; ++} ++ ++static inline int ssh_tc_to_event(u8 tc) ++{ ++ return ssh_rqid_to_event(ssh_tc_to_rqid(tc)); ++} ++ ++static inline u8 ssh_channel_to_index(u8 channel) ++{ ++ return channel - 1u; ++} ++ ++static inline bool ssh_channel_is_valid(u8 channel) ++{ ++ return ssh_channel_to_index(channel) < SURFACE_SAM_SSH_NUM_CHANNELS; ++} ++ ++ ++/* -- Safe counters. -------------------------------------------------------- */ ++ ++struct ssh_seq_counter { ++ u8 value; ++}; ++ ++struct ssh_rqid_counter { ++ u16 value; ++}; ++ ++static inline void ssh_seq_reset(struct ssh_seq_counter *c) ++{ ++ WRITE_ONCE(c->value, 0); ++} ++ ++static inline u8 ssh_seq_next(struct ssh_seq_counter *c) ++{ ++ u8 old = READ_ONCE(c->value); ++ u8 new = old + 1; ++ u8 ret; ++ ++ while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) { ++ old = ret; ++ new = old + 1; ++ } ++ ++ return old; ++} ++ ++static inline void ssh_rqid_reset(struct ssh_rqid_counter *c) ++{ ++ WRITE_ONCE(c->value, 0); ++} ++ ++static inline u16 ssh_rqid_next(struct ssh_rqid_counter *c) ++{ ++ u16 old = READ_ONCE(c->value); ++ u16 new = __ssh_rqid_next(old); ++ u16 ret; ++ ++ while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) { ++ old = ret; ++ new = __ssh_rqid_next(old); ++ } ++ ++ return old; ++} ++ ++ ++/* -- Builder functions for SAM-over-SSH messages. -------------------------- */ ++ ++struct msgbuf { ++ u8 *buffer; ++ u8 *end; ++ u8 *ptr; ++}; ++ ++static inline void msgb_init(struct msgbuf *msgb, u8 *buffer, size_t cap) ++{ ++ msgb->buffer = buffer; ++ msgb->end = buffer + cap; ++ msgb->ptr = buffer; ++} ++ ++static inline int msgb_alloc(struct msgbuf *msgb, size_t cap, gfp_t flags) ++{ ++ u8 *buf; ++ ++ buf = kzalloc(cap, flags); ++ if (!buf) ++ return -ENOMEM; ++ ++ msgb_init(msgb, buf, cap); ++ return 0; ++} ++ ++static inline void msgb_free(struct msgbuf *msgb) ++{ ++ kfree(msgb->buffer); ++ msgb->buffer = NULL; ++ msgb->end = NULL; ++ msgb->ptr = NULL; ++} ++ ++static inline void msgb_reset(struct msgbuf *msgb) ++{ ++ msgb->ptr = msgb->buffer; ++} ++ ++static inline size_t msgb_bytes_used(const struct msgbuf *msgb) ++{ ++ return msgb->ptr - msgb->buffer; ++} ++ ++static inline void msgb_push_u16(struct msgbuf *msgb, u16 value) ++{ ++ WARN_ON(msgb->ptr + sizeof(u16) > msgb->end); ++ if (msgb->ptr + sizeof(u16) > msgb->end) ++ return; ++ ++ put_unaligned_le16(value, msgb->ptr); ++ msgb->ptr += sizeof(u16); ++} ++ ++static inline void msgb_push_syn(struct msgbuf *msgb) ++{ ++ msgb_push_u16(msgb, SSH_MSG_SYN); ++} ++ ++static inline void msgb_push_buf(struct msgbuf *msgb, const u8 *buf, size_t len) ++{ ++ msgb->ptr = memcpy(msgb->ptr, buf, len) + len; ++} ++ ++static inline void msgb_push_crc(struct msgbuf *msgb, const u8 *buf, size_t len) ++{ ++ msgb_push_u16(msgb, ssh_crc(buf, len)); ++} ++ ++static inline void msgb_push_frame(struct msgbuf *msgb, u8 ty, u16 len, u8 seq) ++{ ++ struct ssh_frame *frame = (struct ssh_frame *)msgb->ptr; ++ const u8 *const begin = msgb->ptr; ++ ++ WARN_ON(msgb->ptr + sizeof(*frame) > msgb->end); ++ if (msgb->ptr + sizeof(*frame) > msgb->end) ++ return; ++ ++ frame->type = ty; ++ put_unaligned_le16(len, &frame->len); ++ frame->seq = seq; ++ ++ msgb->ptr += sizeof(*frame); ++ msgb_push_crc(msgb, begin, msgb->ptr - begin); ++} ++ ++static inline void msgb_push_ack(struct msgbuf *msgb, u8 seq) ++{ ++ // SYN ++ msgb_push_syn(msgb); ++ ++ // ACK-type frame + CRC ++ msgb_push_frame(msgb, SSH_FRAME_TYPE_ACK, 0x00, seq); ++ ++ // payload CRC (ACK-type frames do not have a payload) ++ msgb_push_crc(msgb, msgb->ptr, 0); ++} ++ ++static inline void msgb_push_nak(struct msgbuf *msgb) ++{ ++ // SYN ++ msgb_push_syn(msgb); ++ ++ // NAK-type frame + CRC ++ msgb_push_frame(msgb, SSH_FRAME_TYPE_NAK, 0x00, 0x00); ++ ++ // payload CRC (ACK-type frames do not have a payload) ++ msgb_push_crc(msgb, msgb->ptr, 0); ++} ++ ++static inline void msgb_push_cmd(struct msgbuf *msgb, u8 seq, ++ const struct surface_sam_ssh_rqst *rqst, ++ u16 rqid) ++{ ++ struct ssh_command *cmd; ++ const u8 *cmd_begin; ++ const u8 type = SSH_FRAME_TYPE_DATA_SEQ; ++ ++ // SYN ++ msgb_push_syn(msgb); ++ ++ // command frame + crc ++ msgb_push_frame(msgb, type, sizeof(*cmd) + rqst->cdl, seq); ++ ++ // frame payload: command struct + payload ++ WARN_ON(msgb->ptr + sizeof(*cmd) > msgb->end); ++ if (msgb->ptr + sizeof(*cmd) > msgb->end) ++ return; ++ ++ cmd_begin = msgb->ptr; ++ cmd = (struct ssh_command *)msgb->ptr; ++ ++ cmd->type = SSH_PLD_TYPE_CMD; ++ cmd->tc = rqst->tc; ++ cmd->chn_out = rqst->chn; ++ cmd->chn_in = 0x00; ++ cmd->iid = rqst->iid; ++ put_unaligned_le16(rqid, &cmd->rqid); ++ cmd->cid = rqst->cid; ++ ++ msgb->ptr += sizeof(*cmd); ++ ++ // command payload ++ msgb_push_buf(msgb, rqst->pld, rqst->cdl); ++ ++ // crc for command struct + payload ++ msgb_push_crc(msgb, cmd_begin, msgb->ptr - cmd_begin); ++} ++ ++ ++/* -- Parser functions and utilities for SAM-over-SSH messages. ------------- */ ++ ++struct sshp_buf { ++ u8 *ptr; ++ size_t len; ++ size_t cap; ++}; ++ ++ ++static inline bool sshp_validate_crc(const struct sshp_span *src, const u8 *crc) ++{ ++ u16 actual = ssh_crc(src->ptr, src->len); ++ u16 expected = get_unaligned_le16(crc); ++ ++ return actual == expected; ++} ++ ++static bool sshp_find_syn(const struct sshp_span *src, struct sshp_span *rem) ++{ ++ size_t i; ++ ++ for (i = 0; i < src->len - 1; i++) { ++ if (likely(get_unaligned_le16(src->ptr + i) == SSH_MSG_SYN)) { ++ rem->ptr = src->ptr + i; ++ rem->len = src->len - i; ++ return true; ++ } ++ } ++ ++ if (unlikely(src->ptr[src->len - 1] == (SSH_MSG_SYN & 0xff))) { ++ rem->ptr = src->ptr + src->len - 1; ++ rem->len = 1; ++ return false; ++ } else { ++ rem->ptr = src->ptr + src->len; ++ rem->len = 0; ++ return false; ++ } ++} ++ ++static bool sshp_starts_with_syn(const struct sshp_span *src) ++{ ++ return src->len >= 2 && get_unaligned_le16(src->ptr) == SSH_MSG_SYN; ++} ++ ++static int sshp_parse_frame(const struct device *dev, ++ const struct sshp_span *source, ++ struct ssh_frame **frame, ++ struct sshp_span *payload, ++ size_t maxlen) ++{ ++ struct sshp_span sf; ++ struct sshp_span sp; ++ ++ // initialize output ++ *frame = NULL; ++ payload->ptr = NULL; ++ payload->len = 0; ++ ++ if (!sshp_starts_with_syn(source)) { ++ dev_warn(dev, "rx: parser: invalid start of frame\n"); ++ return -ENOMSG; ++ } ++ ++ // check for minumum packet length ++ if (unlikely(source->len < SSH_MESSAGE_LENGTH(0))) { ++ dev_dbg(dev, "rx: parser: not enough data for frame\n"); ++ return 0; ++ } ++ ++ // pin down frame ++ sf.ptr = source->ptr + sizeof(u16); ++ sf.len = sizeof(struct ssh_frame); ++ ++ // validate frame CRC ++ if (unlikely(!sshp_validate_crc(&sf, sf.ptr + sf.len))) { ++ dev_warn(dev, "rx: parser: invalid frame CRC\n"); ++ return -EBADMSG; ++ } ++ ++ // ensure packet does not exceed maximum length ++ if (unlikely(((struct ssh_frame *)sf.ptr)->len > maxlen)) { ++ dev_warn(dev, "rx: parser: frame too large: %u bytes\n", ++ ((struct ssh_frame *)sf.ptr)->len); ++ return -EMSGSIZE; ++ } ++ ++ // pin down payload ++ sp.ptr = sf.ptr + sf.len + sizeof(u16); ++ sp.len = get_unaligned_le16(&((struct ssh_frame *)sf.ptr)->len); ++ ++ // check for frame + payload length ++ if (source->len < SSH_MESSAGE_LENGTH(sp.len)) { ++ dev_dbg(dev, "rx: parser: not enough data for payload\n"); ++ return 0; ++ } ++ ++ // validate payload crc ++ if (unlikely(!sshp_validate_crc(&sp, sp.ptr + sp.len))) { ++ dev_warn(dev, "rx: parser: invalid payload CRC\n"); ++ return -EBADMSG; ++ } ++ ++ *frame = (struct ssh_frame *)sf.ptr; ++ *payload = sp; ++ ++ dev_dbg(dev, "rx: parser: valid frame found (type: 0x%02x, len: %u)\n", ++ (*frame)->type, (*frame)->len); ++ ++ return 0; ++} ++ ++static int sshp_parse_command(const struct device *dev, ++ const struct sshp_span *source, ++ struct ssh_command **command, ++ struct sshp_span *command_data) ++{ ++ // check for minimum length ++ if (unlikely(source->len < sizeof(struct ssh_command))) { ++ *command = NULL; ++ command_data->ptr = NULL; ++ command_data->len = 0; ++ ++ dev_err(dev, "rx: parser: command payload is too short\n"); ++ return -ENOMSG; ++ } ++ ++ *command = (struct ssh_command *)source->ptr; ++ command_data->ptr = source->ptr + sizeof(struct ssh_command); ++ command_data->len = source->len - sizeof(struct ssh_command); ++ ++ dev_dbg(dev, "rx: parser: valid command found (tc: 0x%02x," ++ " cid: 0x%02x)\n", (*command)->tc, (*command)->cid); ++ ++ return 0; ++} ++ ++ ++static inline void sshp_buf_init(struct sshp_buf *buf, u8 *ptr, size_t cap) ++{ ++ buf->ptr = ptr; ++ buf->len = 0; ++ buf->cap = cap; ++} ++ ++static inline int sshp_buf_alloc(struct sshp_buf *buf, size_t cap, gfp_t flags) ++{ ++ u8 *ptr; ++ ++ ptr = kzalloc(cap, flags); ++ if (!ptr) ++ return -ENOMEM; ++ ++ sshp_buf_init(buf, ptr, cap); ++ return 0; ++ ++} ++ ++static inline void sshp_buf_free(struct sshp_buf *buf) ++{ ++ kfree(buf->ptr); ++ buf->ptr = NULL; ++ buf->len = 0; ++ buf->cap = 0; ++} ++ ++static inline void sshp_buf_reset(struct sshp_buf *buf) ++{ ++ buf->len = 0; ++} ++ ++static inline void sshp_buf_drop(struct sshp_buf *buf, size_t n) ++{ ++ memmove(buf->ptr, buf->ptr + n, buf->len - n); ++ buf->len -= n; ++} ++ ++static inline size_t sshp_buf_read_from_fifo(struct sshp_buf *buf, ++ struct kfifo *fifo) ++{ ++ size_t n; ++ ++ n = kfifo_out(fifo, buf->ptr + buf->len, buf->cap - buf->len); ++ buf->len += n; ++ ++ return n; ++} ++ ++static inline void sshp_buf_span_from(struct sshp_buf *buf, size_t offset, ++ struct sshp_span *span) ++{ ++ span->ptr = buf->ptr + offset; ++ span->len = buf->len - offset; ++} ++ ++ ++/* -- Packet transport layer (ptl). ----------------------------------------- */ +/* -+ * A note on Request IDs (RQIDs): -+ * 0x0000 is not a valid RQID -+ * 0x0001 is valid, but reserved for Surface Laptop keyboard events -+ */ -+#define SAM_NUM_EVENT_TYPES ((1 << SURFACE_SAM_SSH_RQID_EVENT_BITS) - 1) -+ -+/* -+ * Sync: aa 55 -+ * Terminate: ff ff ++ * To simplify reasoning about the code below, we define a few concepts. The ++ * system below is similar to a state-machine for packets, however, there are ++ * too many states to explicitly write them down. To (somewhat) manage the ++ * states and packets we rely on flags, reference counting, and some simple ++ * concepts. State transitions are triggered by actions. + * -+ * Request Message: sync cmd-hdr crc(cmd-hdr) cmd-rqst-frame crc(cmd-rqst-frame) -+ * Ack Message: sync ack crc(ack) terminate -+ * Retry Message: sync retry crc(retry) terminate -+ * Response Message: sync cmd-hdr crc(cmd-hdr) cmd-resp-frame crc(cmd-resp-frame) ++ * >> Actions << + * -+ * Command Header: 80 LEN 00 SEQ -+ * Ack: 40 00 00 SEQ -+ * Retry: 04 00 00 00 -+ * Command Request Frame: 80 RTC 01 00 RIID RQID RCID PLD -+ * Command Response Frame: 80 RTC 00 01 RIID RQID RCID PLD ++ * - submit ++ * - transmission start (process next item in queue) ++ * - transmission finished (guaranteed to never be parallel to transmission ++ * start) ++ * - ACK received ++ * - NAK received (this is equivalent to issuing re-submit for all pending ++ * packets) ++ * - timeout (this is equivalent to re-issuing a submit or canceling) ++ * - cancel (non-pending and pending) ++ * ++ * >> Data Structures, Packet Ownership, General Overview << ++ * ++ * The code below employs two main data structures: The packet queue, containing ++ * all packets scheduled for transmission, and the set of pending packets, ++ * containing all packets awaiting an ACK. ++ * ++ * Shared ownership of a packet is controlled via reference counting. Inside the ++ * transmission system are a total of five packet owners: ++ * ++ * - the packet queue, ++ * - the pending set, ++ * - the transmitter thread, ++ * - the receiver thread (via ACKing), and ++ * - the timeout work item. ++ * ++ * Normal operation is as follows: The initial reference of the packet is ++ * obtained by submitting the packet and queueing it. The receiver thread ++ * takes packets from the queue. By doing this, it does not increment the ++ * refcount but takes over the reference (removing it from the queue). ++ * If the packet is sequenced (i.e. needs to be ACKed by the client), the ++ * transmitter thread sets-up the timeout and adds the packet to the pending set ++ * before starting to transmit it. As the timeout is handled by a reaper task, ++ * no additional reference for it is needed. After the transmit is done, the ++ * reference hold by the transmitter thread is dropped. If the packet is ++ * unsequenced (i.e. does not need an ACK), the packet is completed by the ++ * transmitter thread before dropping that reference. ++ * ++ * On receial of an ACK, the receiver thread removes and obtains the refernce to ++ * the packet from the pending set. On succes, the receiver thread will then ++ * complete the packet and drop its reference. ++ * ++ * On error, the completion callback is immediately run by on thread on which ++ * the error was detected. ++ * ++ * To ensure that a packet eventually leaves the system it is marked as "locked" ++ * directly before it is going to be completed or when it is canceled. Marking a ++ * packet as "locked" has the effect that passing and creating new references ++ * of the packet will be blocked. This means that the packet cannot be added ++ * to the queue, the pending set, and the timeout, or be picked up by the ++ * transmitter thread or receiver thread. To remove a packet from the system it ++ * has to be marked as locked and subsequently all references from the data ++ * structures (queue, pending) have to be removed. References held by threads ++ * will eventually be dropped automatically as their execution progresses. ++ * ++ * Note that the packet completion callback is, in case of success and for a ++ * sequenced packet, guaranteed to run on the receiver thread, thus providing a ++ * way to reliably identify responses to the packet. The packet completion ++ * callback is only run once and it does not indicate that the packet has fully ++ * left the system. In case of re-submission (and with somewhat unlikely ++ * timing), it may be possible that the packet is being re-transmitted while the ++ * completion callback runs. Completion will occur both on success and internal ++ * error, as well as when the packet is canceled. ++ * ++ * >> Flags << ++ * ++ * Flags are used to indicate the state and progression of a packet. Some flags ++ * have stricter guarantees than other: ++ * ++ * - locked ++ * Indicates if the packet is locked. If the packet is locked, passing and/or ++ * creating additional references to the packet is forbidden. The packet thus ++ * may not be queued, dequeued, or removed or added to the pending set. Note ++ * that the packet state flags may still change (e.g. it may be marked as ++ * ACKed, transmitted, ...). ++ * ++ * - completed ++ * Indicates if the packet completion has been run or is about to be run. This ++ * flag is used to ensure that the packet completion callback is only run ++ * once. ++ * ++ * - queued ++ * Indicates if a packet is present in the submission queue or not. This flag ++ * must only be modified with the queue lock held, and must be coherent ++ * presence of the packet in the queue. ++ * ++ * - pending ++ * Indicates if a packet is present in the set of pending packets or not. ++ * This flag must only be modified with the pending lock held, and must be ++ * coherent presence of the packet in the pending set. ++ * ++ * - transmitting ++ * Indicates if the packet is currently transmitting. In case of ++ * re-transmissions, it is only safe to wait on the "transmitted" completion ++ * after this flag has been set. The completion will be set both in success ++ * and error case. ++ * ++ * - transmitted ++ * Indicates if the packet has been transmitted. This flag is not cleared by ++ * the system, thus it indicates the first transmission only. ++ * ++ * - acked ++ * Indicates if the packet has been acknowledged by the client. There are no ++ * other guarantees given. For example, the packet may still be canceled ++ * and/or the completion may be triggered an error even though this bit is ++ * set. Rely on the status provided by completion instead. ++ * ++ * - canceled ++ * Indicates if the packet has been canceled from the outside. There are no ++ * other guarantees given. Specifically, the packet may be completed by ++ * another part of the system before the cancellation attempts to complete it. ++ * ++ * >> General Notes << ++ * ++ * To avoid deadlocks, if both queue and pending locks are required, the pending ++ * lock must be acquired before the queue lock. + */ + -+struct ssh_frame_ctrl { -+ u8 type; -+ u8 len; // without crc -+ u8 pad; -+ u8 seq; -+} __packed; ++/** ++ * Maximum number transmission attempts per sequenced packet in case of ++ * time-outs. Must be smaller than 16. ++ */ ++#define SSH_PTL_MAX_PACKET_TRIES 3 + -+struct ssh_frame_cmd { -+ u8 type; -+ u8 tc; -+ u8 pri_out; -+ u8 pri_in; -+ u8 iid; -+ u8 rqid_lo; // id for request/response matching (low byte) -+ u8 rqid_hi; // id for request/response matching (high byte) -+ u8 cid; -+} __packed; ++/** ++ * Timeout as ktime_t delta for ACKs. If we have not received an ACK in this ++ * time-frame after starting transmission, the packet will be re-submitted. ++ */ ++#define SSH_PTL_PACKET_TIMEOUT ms_to_ktime(1000) + ++/** ++ * Maximum time resolution for timeouts. Currently set to max(2 jiffies, 50ms). ++ * Should be larger than one jiffy to avoid direct re-scheduling of reaper ++ * work_struct. ++ */ ++#define SSH_PTL_PACKET_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50)) ++ ++/** ++ * Maximum number of sequenced packets concurrently waiting for an ACK. ++ * Packets marked as blocking will not be transmitted while this limit is ++ * reached. ++ */ ++#define SSH_PTL_MAX_PENDING 1 ++ ++#define SSH_PTL_RX_BUF_LEN 4096 ++ ++#define SSH_PTL_RX_FIFO_LEN 4096 ++ ++ ++enum ssh_ptl_state_flags { ++ SSH_PTL_SF_SHUTDOWN_BIT, ++}; ++ ++struct ssh_ptl_ops { ++ void (*data_received)(struct ssh_ptl *p, const struct sshp_span *data); ++}; ++ ++struct ssh_ptl { ++ struct serdev_device *serdev; ++ unsigned long state; ++ ++ struct { ++ spinlock_t lock; ++ struct list_head head; ++ } queue; ++ ++ struct { ++ spinlock_t lock; ++ struct list_head head; ++ atomic_t count; ++ } pending; ++ ++ struct { ++ bool thread_signal; ++ struct task_struct *thread; ++ struct wait_queue_head thread_wq; ++ struct wait_queue_head packet_wq; ++ struct ssh_packet *packet; ++ size_t offset; ++ } tx; ++ ++ struct { ++ struct task_struct *thread; ++ struct wait_queue_head wq; ++ struct kfifo fifo; ++ struct sshp_buf buf; ++ ++ struct { ++ u16 seqs[8]; ++ u16 offset; ++ } blacklist; ++ } rx; ++ ++ struct { ++ ktime_t timeout; ++ ktime_t expires; ++ struct delayed_work reaper; ++ } rtx_timeout; ++ ++ struct ssh_ptl_ops ops; ++}; ++ ++ ++#define __ssam_prcond(func, p, fmt, ...) \ ++ do { \ ++ if ((p)) \ ++ func((p), fmt, ##__VA_ARGS__); \ ++ } while (0); ++ ++#define ptl_dbg(p, fmt, ...) dev_dbg(&(p)->serdev->dev, fmt, ##__VA_ARGS__) ++#define ptl_info(p, fmt, ...) dev_info(&(p)->serdev->dev, fmt, ##__VA_ARGS__) ++#define ptl_warn(p, fmt, ...) dev_warn(&(p)->serdev->dev, fmt, ##__VA_ARGS__) ++#define ptl_err(p, fmt, ...) dev_err(&(p)->serdev->dev, fmt, ##__VA_ARGS__) ++#define ptl_dbg_cond(p, fmt, ...) __ssam_prcond(ptl_dbg, p, fmt, ##__VA_ARGS__) ++ ++#define to_ssh_packet(ptr, member) \ ++ container_of(ptr, struct ssh_packet, member) ++ ++#define to_ssh_ptl(ptr, member) \ ++ container_of(ptr, struct ssh_ptl, member) ++ ++ ++#if 0 // not supported on 4.19 ++ ++/** ++ * ssh_ptl_should_drop_ack_packet - error injection hook to drop ACK packets ++ * ++ * Useful to test detection and handling of automated re-transmits by the EC. ++ * Specifically of packets that the EC consideres not-ACKed but the driver ++ * already consideres ACKed (due to dropped ACK). In this case, the EC ++ * re-transmits the packet-to-be-ACKed and the driver should detect it as ++ * duplicate/already handled. Note that the driver should still send an ACK ++ * for the re-transmitted packet. ++ */ ++static noinline bool ssh_ptl_should_drop_ack_packet(void) ++{ ++ return false; ++} ++ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_ack_packet, TRUE); ++ ++/** ++ * ssh_ptl_should_drop_nak_packet - error injection hook to drop NAK packets ++ * ++ * Useful to test/force automated (timeout-based) re-transmit by the EC. ++ * Specifically, packets that have not reached the driver completely/with valid ++ * checksums. Only useful in combination with receival of (injected) bad data. ++ */ ++static noinline bool ssh_ptl_should_drop_nak_packet(void) ++{ ++ return false; ++} ++ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_nak_packet, TRUE); ++ ++/** ++ * ssh_ptl_should_drop_dsq_packet - error injection hook to drop sequenced data ++ * packet ++ * ++ * Useful to test re-transmit timeout of the driver. If the data packet has not ++ * been ACKed after a certain time, the driver should re-transmit the packet up ++ * to limited number of times defined in SSH_PTL_MAX_PACKET_TRIES. ++ */ ++static noinline bool ssh_ptl_should_drop_dsq_packet(void) ++{ ++ return false; ++} ++ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_dsq_packet, TRUE); ++ ++/** ++ * ssh_ptl_should_fail_write - error injection hook to make serdev_device_write ++ * fail ++ * ++ * Hook to simulate errors in serdev_device_write when transmitting packets. ++ */ ++static noinline int ssh_ptl_should_fail_write(void) ++{ ++ return 0; ++} ++ALLOW_ERROR_INJECTION(ssh_ptl_should_fail_write, ERRNO); ++ ++/** ++ * ssh_ptl_should_corrupt_tx_data - error injection hook to simualte invalid ++ * data being sent to the EC ++ * ++ * Hook to simulate corrupt/invalid data being sent from host (driver) to EC. ++ * Causes the package data to be actively corrupted by overwriting it with ++ * pre-defined values, such that it becomes invalid, causing the EC to respond ++ * with a NAK packet. Useful to test handling of NAK packets received by the ++ * driver. ++ */ ++static noinline bool ssh_ptl_should_corrupt_tx_data(void) ++{ ++ return false; ++} ++ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_tx_data, TRUE); ++ ++/** ++ * ssh_ptl_should_corrupt_rx_syn - error injection hook to simulate invalid ++ * data being sent by the EC ++ * ++ * Hook to simulate invalid SYN bytes, i.e. an invalid start of messages and ++ * test handling thereof in the driver. ++ */ ++static noinline bool ssh_ptl_should_corrupt_rx_syn(void) ++{ ++ return false; ++} ++ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_syn, TRUE); ++ ++/** ++ * ssh_ptl_should_corrupt_rx_data - error injection hook to simulate invalid ++ * data being sent by the EC ++ * ++ * Hook to simulate invalid data/checksum of the message frame and test handling ++ * thereof in the driver. ++ */ ++static noinline bool ssh_ptl_should_corrupt_rx_data(void) ++{ ++ return false; ++} ++ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_data, TRUE); ++ ++ ++static inline bool __ssh_ptl_should_drop_ack_packet(struct ssh_packet *packet) ++{ ++ if (likely(!ssh_ptl_should_drop_ack_packet())) ++ return false; ++ ++ trace_ssam_ei_tx_drop_ack_packet(packet); ++ ptl_info(packet->ptl, "packet error injection: dropping ACK packet %p\n", ++ packet); ++ ++ return true; ++} ++ ++static inline bool __ssh_ptl_should_drop_nak_packet(struct ssh_packet *packet) ++{ ++ if (likely(!ssh_ptl_should_drop_nak_packet())) ++ return false; ++ ++ trace_ssam_ei_tx_drop_nak_packet(packet); ++ ptl_info(packet->ptl, "packet error injection: dropping NAK packet %p\n", ++ packet); ++ ++ return true; ++} ++ ++static inline bool __ssh_ptl_should_drop_dsq_packet(struct ssh_packet *packet) ++{ ++ if (likely(!ssh_ptl_should_drop_dsq_packet())) ++ return false; ++ ++ trace_ssam_ei_tx_drop_dsq_packet(packet); ++ ptl_info(packet->ptl, ++ "packet error injection: dropping sequenced data packet %p\n", ++ packet); ++ ++ return true; ++} ++ ++static bool ssh_ptl_should_drop_packet(struct ssh_packet *packet) ++{ ++ // ignore packets that don't carry any data (i.e. flush) ++ if (!packet->data || !packet->data_length) ++ return false; ++ ++ switch (packet->data[SSH_MSGOFFSET_FRAME(type)]) { ++ case SSH_FRAME_TYPE_ACK: ++ return __ssh_ptl_should_drop_ack_packet(packet); ++ ++ case SSH_FRAME_TYPE_NAK: ++ return __ssh_ptl_should_drop_nak_packet(packet); ++ ++ case SSH_FRAME_TYPE_DATA_SEQ: ++ return __ssh_ptl_should_drop_dsq_packet(packet); ++ ++ default: ++ return false; ++ } ++} ++ ++static inline int ssh_ptl_write_buf(struct ssh_ptl *ptl, ++ struct ssh_packet *packet, ++ const unsigned char *buf, ++ size_t count) ++{ ++ int status; ++ ++ status = ssh_ptl_should_fail_write(); ++ if (unlikely(status)) { ++ trace_ssam_ei_tx_fail_write(packet, status); ++ ptl_info(packet->ptl, ++ "packet error injection: simulating transmit error %d, packet %p\n", ++ status, packet); ++ ++ return status; ++ } ++ ++ return serdev_device_write_buf(ptl->serdev, buf, count); ++} ++ ++static inline void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet) ++{ ++ // ignore packets that don't carry any data (i.e. flush) ++ if (!packet->data || !packet->data_length) ++ return; ++ ++ // only allow sequenced data packets to be modified ++ if (packet->data[SSH_MSGOFFSET_FRAME(type)] != SSH_FRAME_TYPE_DATA_SEQ) ++ return; ++ ++ if (likely(!ssh_ptl_should_corrupt_tx_data())) ++ return; ++ ++ trace_ssam_ei_tx_corrupt_data(packet); ++ ptl_info(packet->ptl, ++ "packet error injection: simulating invalid transmit data on packet %p\n", ++ packet); ++ ++ /* ++ * NB: The value 0xb3 has been chosen more or less randomly so that it ++ * doesn't have any (major) overlap with the SYN bytes (aa 55) and is ++ * non-trivial (i.e. non-zero, non-0xff). ++ */ ++ memset(packet->data, 0xb3, packet->data_length); ++} ++ ++static inline void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl, ++ struct sshp_span *data) ++{ ++ struct sshp_span frame; ++ ++ // check if there actually is something to corrupt ++ if (!sshp_find_syn(data, &frame)) ++ return; ++ ++ if (likely(!ssh_ptl_should_corrupt_rx_syn())) ++ return; ++ ++ trace_ssam_ei_rx_corrupt_syn("data_length", data->len); ++ ++ data->ptr[1] = 0xb3; // set second byte of SYN to "random" value ++} ++ ++static inline void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl, ++ struct sshp_span *frame) ++{ ++ size_t payload_len, message_len; ++ struct ssh_frame *sshf; ++ ++ // ignore incomplete messages, will get handled once it's complete ++ if (frame->len < SSH_MESSAGE_LENGTH(0)) ++ return; ++ ++ // ignore incomplete messages, part 2 ++ payload_len = get_unaligned_le16(&frame->ptr[SSH_MSGOFFSET_FRAME(len)]); ++ message_len = SSH_MESSAGE_LENGTH(payload_len); ++ if (frame->len < message_len) ++ return; ++ ++ if (likely(!ssh_ptl_should_corrupt_rx_data())) ++ return; ++ ++ sshf = (struct ssh_frame *)&frame->ptr[SSH_MSGOFFSET_FRAME(type)]; ++ trace_ssam_ei_rx_corrupt_data(sshf); ++ ++ /* ++ * Flip bits in first byte of payload checksum. This is basically ++ * equivalent to a payload/frame data error without us having to worry ++ * about (the, arguably pretty small, probability of) accidental ++ * checksum collisions. ++ */ ++ frame->ptr[frame->len - 2] = ~frame->ptr[frame->len - 2]; ++} ++ ++#else /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */ ++ ++static inline bool ssh_ptl_should_drop_packet(struct ssh_packet *packet) ++{ ++ return false; ++} ++ ++static inline int ssh_ptl_write_buf(struct ssh_ptl *ptl, ++ struct ssh_packet *packet, ++ const unsigned char *buf, ++ size_t count) ++{ ++ return serdev_device_write_buf(ptl->serdev, buf, count); ++} ++ ++static inline void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet) ++{ ++} ++ ++static inline void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl, ++ struct sshp_span *data) ++{ ++} ++ ++static inline void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl, ++ struct sshp_span *frame) ++{ ++} ++ ++#endif /* CONFIG_SURFACE_SAM_SSH_ERROR_INJECTION */ ++ ++ ++static void __ssh_ptl_packet_release(struct kref *kref) ++{ ++ struct ssh_packet *p = to_ssh_packet(kref, refcnt); ++ ++ trace_ssam_packet_release(p); ++ ++ ptl_dbg_cond(p->ptl, "ptl: releasing packet %p\n", p); ++ p->ops->release(p); ++} ++ ++static inline void ssh_packet_get(struct ssh_packet *packet) ++{ ++ kref_get(&packet->refcnt); ++} ++ ++static inline void ssh_packet_put(struct ssh_packet *packet) ++{ ++ kref_put(&packet->refcnt, __ssh_ptl_packet_release); ++} ++ ++ ++static inline u8 ssh_packet_get_seq(struct ssh_packet *packet) ++{ ++ return packet->data[SSH_MSGOFFSET_FRAME(seq)]; ++} ++ ++ ++struct ssh_packet_args { ++ u8 type; ++ u8 priority; ++ const struct ssh_packet_ops *ops; ++}; ++ ++static void ssh_packet_init(struct ssh_packet *packet, ++ const struct ssh_packet_args *args) ++{ ++ kref_init(&packet->refcnt); ++ ++ packet->ptl = NULL; ++ INIT_LIST_HEAD(&packet->queue_node); ++ INIT_LIST_HEAD(&packet->pending_node); ++ ++ packet->type = args->type; ++ packet->priority = args->priority; ++ packet->state = 0; ++ packet->timestamp = KTIME_MAX; ++ ++ packet->data_length = 0; ++ packet->data = NULL; ++ ++ packet->ops = args->ops; ++} ++ ++ ++static struct ssh_packet *ptl_alloc_ctrl_packet( ++ struct ssh_ptl *ptl, const struct ssh_packet_args *args, ++ gfp_t flags) ++{ ++ struct ssh_packet *packet; ++ ++ // TODO: chache packets ++ ++ packet = kzalloc(sizeof(struct ssh_packet) + SSH_MSG_LEN_CTRL, flags); ++ if (!packet) ++ return NULL; ++ ++ ssh_packet_init(packet, args); ++ packet->data_length = SSH_MSG_LEN_CTRL; ++ packet->data = ((u8 *) packet) + sizeof(struct ssh_packet); ++ ++ return packet; ++} ++ ++static void ptl_free_ctrl_packet(struct ssh_packet *p) ++{ ++ // TODO: chache packets ++ ++ kfree(p); ++} ++ ++static const struct ssh_packet_ops ssh_ptl_ctrl_packet_ops = { ++ .complete = NULL, ++ .release = ptl_free_ctrl_packet, ++}; ++ ++ ++static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl *ptl, ktime_t now, ++ ktime_t expires) ++{ ++ unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now)); ++ ktime_t aexp = ktime_add(expires, SSH_PTL_PACKET_TIMEOUT_RESOLUTION); ++ ktime_t old; ++ ++ // re-adjust / schedule reaper if it is above resolution delta ++ old = READ_ONCE(ptl->rtx_timeout.expires); ++ while (ktime_before(aexp, old)) ++ old = cmpxchg64(&ptl->rtx_timeout.expires, old, expires); ++ ++ // if we updated the reaper expiration, modify work timeout ++ if (old == expires) ++ mod_delayed_work(system_wq, &ptl->rtx_timeout.reaper, delta); ++} ++ ++static void ssh_ptl_timeout_start(struct ssh_packet *packet) ++{ ++ struct ssh_ptl *ptl = packet->ptl; ++ ktime_t timestamp = ktime_get_coarse_boottime(); ++ ktime_t timeout = ptl->rtx_timeout.timeout; ++ ++ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)) ++ return; ++ ++ WRITE_ONCE(packet->timestamp, timestamp); ++ smp_mb__after_atomic(); ++ ++ ssh_ptl_timeout_reaper_mod(packet->ptl, timestamp, timestamp + timeout); ++} ++ ++ ++static struct list_head *__ssh_ptl_queue_find_entrypoint(struct ssh_packet *p) ++{ ++ struct list_head *head; ++ u8 priority = READ_ONCE(p->priority); ++ ++ /* ++ * We generally assume that there are less control (ACK/NAK) packets and ++ * re-submitted data packets as there are normal data packets (at least ++ * in situations in which many packets are queued; if there aren't many ++ * packets queued the decision on how to iterate should be basically ++ * irrellevant; the number of control/data packets is more or less ++ * limited via the maximum number of pending packets). Thus, when ++ * inserting a control or re-submitted data packet, (determined by their ++ * priority), we search from front to back. Normal data packets are, ++ * usually queued directly at the tail of the queue, so for those search ++ * from back to front. ++ */ ++ ++ if (priority > SSH_PACKET_PRIORITY_DATA) { ++ list_for_each(head, &p->ptl->queue.head) { ++ p = list_entry(head, struct ssh_packet, queue_node); ++ ++ if (READ_ONCE(p->priority) < priority) ++ break; ++ } ++ } else { ++ list_for_each_prev(head, &p->ptl->queue.head) { ++ p = list_entry(head, struct ssh_packet, queue_node); ++ ++ if (READ_ONCE(p->priority) >= priority) { ++ head = head->next; ++ break; ++ } ++ } ++ } ++ ++ ++ return head; ++} ++ ++static int ssh_ptl_queue_push(struct ssh_packet *packet) ++{ ++ struct ssh_ptl *ptl = packet->ptl; ++ struct list_head *head; ++ ++ spin_lock(&ptl->queue.lock); ++ ++ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state)) { ++ spin_unlock(&ptl->queue.lock); ++ return -ESHUTDOWN; ++ } ++ ++ // avoid further transitions when cancelling/completing ++ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)) { ++ spin_unlock(&ptl->queue.lock); ++ return -EINVAL; ++ } ++ ++ // if this packet has already been queued, do not add it ++ if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) { ++ spin_unlock(&ptl->queue.lock); ++ return -EALREADY; ++ } ++ ++ head = __ssh_ptl_queue_find_entrypoint(packet); ++ ++ ssh_packet_get(packet); ++ list_add_tail(&packet->queue_node, &ptl->queue.head); ++ ++ spin_unlock(&ptl->queue.lock); ++ return 0; ++} ++ ++static void ssh_ptl_queue_remove(struct ssh_packet *packet) ++{ ++ struct ssh_ptl *ptl = packet->ptl; ++ bool remove; ++ ++ spin_lock(&ptl->queue.lock); ++ ++ remove = test_and_clear_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state); ++ if (remove) ++ list_del(&packet->queue_node); ++ ++ spin_unlock(&ptl->queue.lock); ++ ++ if (remove) ++ ssh_packet_put(packet); ++} ++ ++ ++static void ssh_ptl_pending_push(struct ssh_packet *packet) ++{ ++ struct ssh_ptl *ptl = packet->ptl; ++ ++ spin_lock(&ptl->pending.lock); ++ ++ // if we are cancelling/completing this packet, do not add it ++ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)) { ++ spin_unlock(&ptl->pending.lock); ++ return; ++ } ++ ++ // in case it is already pending (e.g. re-submission), do not add it ++ if (test_and_set_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) { ++ spin_unlock(&ptl->pending.lock); ++ return; ++ } ++ ++ atomic_inc(&ptl->pending.count); ++ ssh_packet_get(packet); ++ list_add_tail(&packet->pending_node, &ptl->pending.head); ++ ++ spin_unlock(&ptl->pending.lock); ++} ++ ++static void ssh_ptl_pending_remove(struct ssh_packet *packet) ++{ ++ struct ssh_ptl *ptl = packet->ptl; ++ bool remove; ++ ++ spin_lock(&ptl->pending.lock); ++ ++ remove = test_and_clear_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state); ++ if (remove) { ++ list_del(&packet->pending_node); ++ atomic_dec(&ptl->pending.count); ++ } ++ ++ spin_unlock(&ptl->pending.lock); ++ ++ if (remove) ++ ssh_packet_put(packet); ++} ++ ++ ++static void __ssh_ptl_complete(struct ssh_packet *p, int status) ++{ ++ struct ssh_ptl *ptl = READ_ONCE(p->ptl); ++ ++ trace_ssam_packet_complete(p, status); ++ ++ ptl_dbg_cond(ptl, "ptl: completing packet %p\n", p); ++ if (p->ops->complete) ++ p->ops->complete(p, status); ++} ++ ++static void ssh_ptl_remove_and_complete(struct ssh_packet *p, int status) ++{ ++ /* ++ * A call to this function should in general be preceeded by ++ * set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->flags) to avoid re-adding the ++ * packet to the structures it's going to be removed from. ++ * ++ * The set_bit call does not need explicit memory barriers as the ++ * implicit barrier of the test_and_set_bit call below ensure that the ++ * flag is visible before we actually attempt to remove the packet. ++ */ ++ ++ if (test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) ++ return; ++ ++ ssh_ptl_queue_remove(p); ++ ssh_ptl_pending_remove(p); ++ ++ __ssh_ptl_complete(p, status); ++} ++ ++ ++static bool ssh_ptl_tx_can_process(struct ssh_packet *packet) ++{ ++ struct ssh_ptl *ptl = packet->ptl; ++ ++ if (packet->type & SSH_PACKET_TY_FLUSH) ++ return !atomic_read(&ptl->pending.count); ++ ++ // we can alwas process non-blocking packets ++ if (!(packet->type & SSH_PACKET_TY_BLOCKING)) ++ return true; ++ ++ // if we are already waiting for this packet, send it again ++ if (test_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) ++ return true; ++ ++ // otherwise: check if we have the capacity to send ++ return atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING; ++} ++ ++static struct ssh_packet *ssh_ptl_tx_pop(struct ssh_ptl *ptl) ++{ ++ struct ssh_packet *packet = ERR_PTR(-ENOENT); ++ struct ssh_packet *p, *n; ++ ++ spin_lock(&ptl->queue.lock); ++ list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) { ++ /* ++ * If we are cancelling or completing this packet, ignore it. ++ * It's going to be removed from this queue shortly. ++ */ ++ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) { ++ spin_unlock(&ptl->queue.lock); ++ continue; ++ } ++ ++ /* ++ * Packets should be ordered non-blocking/to-be-resent first. ++ * If we cannot process this packet, assume that we can't ++ * process any following packet either and abort. ++ */ ++ if (!ssh_ptl_tx_can_process(p)) { ++ spin_unlock(&ptl->queue.lock); ++ packet = ERR_PTR(-EBUSY); ++ break; ++ } ++ ++ /* ++ * We are allowed to change the state now. Remove it from the ++ * queue and mark it as being transmitted. Note that we cannot ++ * add it to the set of pending packets yet, as queue locks must ++ * always be acquired before packet locks (otherwise we might ++ * run into a deadlock). ++ */ ++ ++ list_del(&p->queue_node); ++ ++ /* ++ * Ensure that the "queued" bit gets cleared after setting the ++ * "transmitting" bit to guaranteee non-zero flags. ++ */ ++ set_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state); ++ ++ packet = p; ++ break; ++ } ++ spin_unlock(&ptl->queue.lock); ++ ++ return packet; ++} ++ ++static struct ssh_packet *ssh_ptl_tx_next(struct ssh_ptl *ptl) ++{ ++ struct ssh_packet *p; ++ ++ p = ssh_ptl_tx_pop(ptl); ++ if (IS_ERR(p)) ++ return p; ++ ++ if (p->type & SSH_PACKET_TY_SEQUENCED) { ++ ptl_dbg(ptl, "ptl: transmitting sequenced packet %p\n", p); ++ ssh_ptl_pending_push(p); ++ ssh_ptl_timeout_start(p); ++ } else { ++ ptl_dbg(ptl, "ptl: transmitting non-sequenced packet %p\n", p); ++ } ++ ++ /* ++ * Update number of tries. This directly influences the priority in case ++ * the packet is re-submitted (e.g. via timeout/NAK). Note that this is ++ * the only place where we update the priority in-flight. As this runs ++ * only on the tx-thread, this read-modify-write procedure is safe. ++ */ ++ WRITE_ONCE(p->priority, READ_ONCE(p->priority) + 1); ++ ++ return p; ++} ++ ++static void ssh_ptl_tx_compl_success(struct ssh_packet *packet) ++{ ++ struct ssh_ptl *ptl = packet->ptl; ++ ++ ptl_dbg(ptl, "ptl: successfully transmitted packet %p\n", packet); ++ ++ /* ++ * Transition to state to "transmitted". Ensure that the flags never get ++ * zero with barrier. ++ */ ++ set_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state); ++ ++ // if the packet is unsequenced, we're done: lock and complete ++ if (!(packet->type & SSH_PACKET_TY_SEQUENCED)) { ++ set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state); ++ ssh_ptl_remove_and_complete(packet, 0); ++ } ++ ++ /* ++ * Notify that a packet transmission has finished. In general we're only ++ * waiting for one packet (if any), so wake_up_all should be fine. ++ */ ++ wake_up_all(&ptl->tx.packet_wq); ++} ++ ++static void ssh_ptl_tx_compl_error(struct ssh_packet *packet, int status) ++{ ++ /* ++ * Transmission failure: Lock the packet and try to complete it. Ensure ++ * that the flags never get zero with barrier. ++ */ ++ set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state); ++ ++ ptl_err(packet->ptl, "ptl: transmission error: %d\n", status); ++ ptl_dbg(packet->ptl, "ptl: failed to transmit packet: %p\n", packet); ++ ++ ssh_ptl_remove_and_complete(packet, status); ++ ++ /* ++ * Notify that a packet transmission has finished. In general we're only ++ * waiting for one packet (if any), so wake_up_all should be fine. ++ */ ++ wake_up_all(&packet->ptl->tx.packet_wq); ++} ++ ++static void ssh_ptl_tx_threadfn_wait(struct ssh_ptl *ptl) ++{ ++ wait_event_interruptible(ptl->tx.thread_wq, ++ READ_ONCE(ptl->tx.thread_signal) || kthread_should_stop()); ++ WRITE_ONCE(ptl->tx.thread_signal, false); ++} ++ ++static int ssh_ptl_tx_threadfn(void *data) ++{ ++ struct ssh_ptl *ptl = data; ++ ++ while (!kthread_should_stop()) { ++ unsigned char *buf; ++ bool drop = false; ++ size_t len = 0; ++ int status = 0; ++ ++ // if we don't have a packet, get the next and add it to pending ++ if (IS_ERR_OR_NULL(ptl->tx.packet)) { ++ ptl->tx.packet = ssh_ptl_tx_next(ptl); ++ ptl->tx.offset = 0; ++ ++ // if no packet is available, we are done ++ if (IS_ERR(ptl->tx.packet)) { ++ ssh_ptl_tx_threadfn_wait(ptl); ++ continue; ++ } ++ } ++ ++ // error injection: drop packet to simulate transmission problem ++ if (ptl->tx.offset == 0) ++ drop = ssh_ptl_should_drop_packet(ptl->tx.packet); ++ ++ // error injection: simulate invalid packet data ++ if (ptl->tx.offset == 0 && !drop) ++ ssh_ptl_tx_inject_invalid_data(ptl->tx.packet); ++ ++ // flush-packets don't have any data ++ if (likely(ptl->tx.packet->data && !drop)) { ++ buf = ptl->tx.packet->data + ptl->tx.offset; ++ len = ptl->tx.packet->data_length - ptl->tx.offset; ++ ++ ptl_dbg(ptl, "tx: sending data (length: %zu)\n", len); ++ print_hex_dump_debug("tx: ", DUMP_PREFIX_OFFSET, 16, 1, ++ buf, len, false); ++ ++ status = ssh_ptl_write_buf(ptl, ptl->tx.packet, buf, len); ++ } ++ ++ if (status < 0) { ++ // complete packet with error ++ ssh_ptl_tx_compl_error(ptl->tx.packet, status); ++ ssh_packet_put(ptl->tx.packet); ++ ptl->tx.packet = NULL; ++ ++ } else if (status == len) { ++ // complete packet and/or mark as transmitted ++ ssh_ptl_tx_compl_success(ptl->tx.packet); ++ ssh_packet_put(ptl->tx.packet); ++ ptl->tx.packet = NULL; ++ ++ } else { // need more buffer space ++ ptl->tx.offset += status; ++ ssh_ptl_tx_threadfn_wait(ptl); ++ } ++ } ++ ++ // cancel active packet before we actually stop ++ if (!IS_ERR_OR_NULL(ptl->tx.packet)) { ++ ssh_ptl_tx_compl_error(ptl->tx.packet, -ESHUTDOWN); ++ ssh_packet_put(ptl->tx.packet); ++ ptl->tx.packet = NULL; ++ } ++ ++ return 0; ++} ++ ++static inline void ssh_ptl_tx_wakeup(struct ssh_ptl *ptl, bool force) ++{ ++ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state)) ++ return; ++ ++ if (force || atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING) { ++ WRITE_ONCE(ptl->tx.thread_signal, true); ++ smp_mb__after_atomic(); ++ wake_up(&ptl->tx.thread_wq); ++ } ++} ++ ++static int ssh_ptl_tx_start(struct ssh_ptl *ptl) ++{ ++ ptl->tx.thread = kthread_run(ssh_ptl_tx_threadfn, ptl, "surface-sh-tx"); ++ if (IS_ERR(ptl->tx.thread)) ++ return PTR_ERR(ptl->tx.thread); ++ ++ return 0; ++} ++ ++static int ssh_ptl_tx_stop(struct ssh_ptl *ptl) ++{ ++ int status = 0; ++ ++ if (ptl->tx.thread) { ++ status = kthread_stop(ptl->tx.thread); ++ ptl->tx.thread = NULL; ++ } ++ ++ return status; ++} ++ ++ ++static struct ssh_packet *ssh_ptl_ack_pop(struct ssh_ptl *ptl, u8 seq_id) ++{ ++ struct ssh_packet *packet = ERR_PTR(-ENOENT); ++ struct ssh_packet *p, *n; ++ ++ spin_lock(&ptl->pending.lock); ++ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) { ++ /* ++ * We generally expect packets to be in order, so first packet ++ * to be added to pending is first to be sent, is first to be ++ * ACKed. ++ */ ++ if (unlikely(ssh_packet_get_seq(p) != seq_id)) ++ continue; ++ ++ /* ++ * In case we receive an ACK while handling a transmission error ++ * completion. The packet will be removed shortly. ++ */ ++ if (unlikely(test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) { ++ packet = ERR_PTR(-EPERM); ++ break; ++ } ++ ++ /* ++ * Mark packet as ACKed and remove it from pending. Ensure that ++ * the flags never get zero with barrier. ++ */ ++ set_bit(SSH_PACKET_SF_ACKED_BIT, &p->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state); ++ ++ atomic_dec(&ptl->pending.count); ++ list_del(&p->pending_node); ++ packet = p; ++ ++ break; ++ } ++ spin_unlock(&ptl->pending.lock); ++ ++ return packet; ++} ++ ++static void ssh_ptl_wait_until_transmitted(struct ssh_packet *packet) ++{ ++ wait_event(packet->ptl->tx.packet_wq, ++ test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state) ++ || test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state)); ++} ++ ++static void ssh_ptl_acknowledge(struct ssh_ptl *ptl, u8 seq) ++{ ++ struct ssh_packet *p; ++ int status = 0; ++ ++ p = ssh_ptl_ack_pop(ptl, seq); ++ if (IS_ERR(p)) { ++ if (PTR_ERR(p) == -ENOENT) { ++ /* ++ * The packet has not been found in the set of pending ++ * packets. ++ */ ++ ptl_warn(ptl, "ptl: received ACK for non-pending" ++ " packet\n"); ++ } else { ++ /* ++ * The packet is pending, but we are not allowed to take ++ * it because it has been locked. ++ */ ++ } ++ return; ++ } ++ ++ ptl_dbg(ptl, "ptl: received ACK for packet %p\n", p); ++ ++ /* ++ * It is possible that the packet has been transmitted, but the state ++ * has not been updated from "transmitting" to "transmitted" yet. ++ * In that case, we need to wait for this transition to occur in order ++ * to determine between success or failure. ++ */ ++ if (test_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state)) ++ ssh_ptl_wait_until_transmitted(p); ++ ++ /* ++ * The packet will already be locked in case of a transmission error or ++ * cancellation. Let the transmitter or cancellation issuer complete the ++ * packet. ++ */ ++ if (unlikely(test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) { ++ ssh_packet_put(p); ++ return; ++ } ++ ++ if (unlikely(!test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &p->state))) { ++ ptl_err(ptl, "ptl: received ACK before packet had been fully" ++ " transmitted\n"); ++ status = -EREMOTEIO; ++ } ++ ++ ssh_ptl_remove_and_complete(p, status); ++ ssh_packet_put(p); ++ ++ ssh_ptl_tx_wakeup(ptl, false); ++} ++ ++ ++static int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *packet) ++{ ++ int status; ++ ++ trace_ssam_packet_submit(packet); ++ ++ // validate packet fields ++ if (packet->type & SSH_PACKET_TY_FLUSH) { ++ if (packet->data || (packet->type & SSH_PACKET_TY_SEQUENCED)) ++ return -EINVAL; ++ } else if (!packet->data) { ++ return -EINVAL; ++ } ++ ++ /* ++ * This function is currently not intended for re-submission. The ptl ++ * reference only gets set on the first submission. After the first ++ * submission, it has to be read-only. ++ * ++ * Use cmpxchg to ensure safety with regards to ssh_ptl_cancel and ++ * re-entry, where we can't guarantee that the packet has been submitted ++ * yet. ++ * ++ * The implicit barrier of cmpxchg is paired with barrier in ++ * ssh_ptl_cancel to guarantee cancelation in case the packet has never ++ * been submitted or is currently being submitted. ++ */ ++ if (cmpxchg(&packet->ptl, NULL, ptl) != NULL) ++ return -EALREADY; ++ ++ status = ssh_ptl_queue_push(packet); ++ if (status) ++ return status; ++ ++ ssh_ptl_tx_wakeup(ptl, !(packet->type & SSH_PACKET_TY_BLOCKING)); ++ return 0; ++} ++ ++static void __ssh_ptl_resubmit(struct ssh_packet *packet) ++{ ++ struct list_head *head; ++ ++ trace_ssam_packet_resubmit(packet); ++ ++ spin_lock(&packet->ptl->queue.lock); ++ ++ // if this packet has already been queued, do not add it ++ if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) { ++ spin_unlock(&packet->ptl->queue.lock); ++ return; ++ } ++ ++ // find first node with lower priority ++ head = __ssh_ptl_queue_find_entrypoint(packet); ++ ++ WRITE_ONCE(packet->timestamp, KTIME_MAX); ++ smp_mb__after_atomic(); ++ ++ // add packet ++ ssh_packet_get(packet); ++ list_add_tail(&packet->queue_node, head); ++ ++ spin_unlock(&packet->ptl->queue.lock); ++} ++ ++static void ssh_ptl_resubmit_pending(struct ssh_ptl *ptl) ++{ ++ struct ssh_packet *p; ++ bool resub = false; ++ u8 try; ++ ++ /* ++ * Note: We deliberately do not remove/attempt to cancel and complete ++ * packets that are out of tires in this function. The packet will be ++ * eventually canceled and completed by the timeout. Removing the packet ++ * here could lead to overly eager cancelation if the packet has not ++ * been re-transmitted yet but the tries-counter already updated (i.e ++ * ssh_ptl_tx_next removed the packet from the queue and updated the ++ * counter, but re-transmission for the last try has not actually ++ * started yet). ++ */ ++ ++ spin_lock(&ptl->pending.lock); ++ ++ // re-queue all pending packets ++ list_for_each_entry(p, &ptl->pending.head, pending_node) { ++ // avoid further transitions if locked ++ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) ++ continue; ++ ++ // do not re-schedule if packet is out of tries ++ try = ssh_packet_priority_get_try(READ_ONCE(p->priority)); ++ if (try >= SSH_PTL_MAX_PACKET_TRIES) ++ continue; ++ ++ resub = true; ++ __ssh_ptl_resubmit(p); ++ } ++ ++ spin_unlock(&ptl->pending.lock); ++ ++ ssh_ptl_tx_wakeup(ptl, resub); ++} ++ ++static void ssh_ptl_cancel(struct ssh_packet *p) ++{ ++ if (test_and_set_bit(SSH_PACKET_SF_CANCELED_BIT, &p->state)) ++ return; ++ ++ trace_ssam_packet_cancel(p); ++ ++ /* ++ * Lock packet and commit with memory barrier. If this packet has ++ * already been locked, it's going to be removed and completed by ++ * another party, which should have precedence. ++ */ ++ if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) ++ return; ++ ++ /* ++ * By marking the packet as locked and employing the implicit memory ++ * barrier of test_and_set_bit, we have guaranteed that, at this point, ++ * the packet cannot be added to the queue any more. ++ * ++ * In case the packet has never been submitted, packet->ptl is NULL. If ++ * the packet is currently being submitted, packet->ptl may be NULL or ++ * non-NULL. Due marking the packet as locked above and committing with ++ * the memory barrier, we have guaranteed that, if packet->ptl is NULL, ++ * the packet will never be added to the queue. If packet->ptl is ++ * non-NULL, we don't have any guarantees. ++ */ ++ ++ if (READ_ONCE(p->ptl)) { ++ ssh_ptl_remove_and_complete(p, -ECANCELED); ++ ssh_ptl_tx_wakeup(p->ptl, false); ++ } else if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) { ++ __ssh_ptl_complete(p, -ECANCELED); ++ } ++} ++ ++ ++static ktime_t ssh_packet_get_expiration(struct ssh_packet *p, ktime_t timeout) ++{ ++ ktime_t timestamp = READ_ONCE(p->timestamp); ++ ++ if (timestamp != KTIME_MAX) ++ return ktime_add(timestamp, timeout); ++ else ++ return KTIME_MAX; ++} ++ ++static void ssh_ptl_timeout_reap(struct work_struct *work) ++{ ++ struct ssh_ptl *ptl = to_ssh_ptl(work, rtx_timeout.reaper.work); ++ struct ssh_packet *p, *n; ++ LIST_HEAD(claimed); ++ ktime_t now = ktime_get_coarse_boottime(); ++ ktime_t timeout = ptl->rtx_timeout.timeout; ++ ktime_t next = KTIME_MAX; ++ bool resub = false; ++ ++ trace_ssam_ptl_timeout_reap("pending", atomic_read(&ptl->pending.count)); ++ ++ /* ++ * Mark reaper as "not pending". This is done before checking any ++ * packets to avoid lost-update type problems. ++ */ ++ WRITE_ONCE(ptl->rtx_timeout.expires, KTIME_MAX); ++ smp_mb__after_atomic(); ++ ++ spin_lock(&ptl->pending.lock); ++ ++ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) { ++ ktime_t expires = ssh_packet_get_expiration(p, timeout); ++ u8 try; ++ ++ /* ++ * Check if the timeout hasn't expired yet. Find out next ++ * expiration date to be handled after this run. ++ */ ++ if (ktime_after(expires, now)) { ++ next = ktime_before(expires, next) ? expires : next; ++ continue; ++ } ++ ++ // avoid further transitions if locked ++ if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) ++ continue; ++ ++ trace_ssam_packet_timeout(p); ++ ++ // check if we still have some tries left ++ try = ssh_packet_priority_get_try(READ_ONCE(p->priority)); ++ if (likely(try < SSH_PTL_MAX_PACKET_TRIES)) { ++ resub = true; ++ __ssh_ptl_resubmit(p); ++ continue; ++ } ++ ++ // no more tries left: cancel the packet ++ ++ // if someone else has locked the packet already, don't use it ++ if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) ++ continue; ++ ++ /* ++ * We have now marked the packet as locked. Thus it cannot be ++ * added to the pending list again after we've removed it here. ++ * We can therefore re-use the pending_node of this packet ++ * temporarily. ++ */ ++ ++ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state); ++ ++ atomic_dec(&ptl->pending.count); ++ list_del(&p->pending_node); ++ ++ list_add_tail(&p->pending_node, &claimed); ++ } ++ ++ spin_unlock(&ptl->pending.lock); ++ ++ // cancel and complete the packet ++ list_for_each_entry_safe(p, n, &claimed, pending_node) { ++ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) { ++ ssh_ptl_queue_remove(p); ++ __ssh_ptl_complete(p, -ETIMEDOUT); ++ } ++ ++ // drop the reference we've obtained by removing it from pending ++ list_del(&p->pending_node); ++ ssh_packet_put(p); ++ } ++ ++ // ensure that reaper doesn't run again immediately ++ next = max(next, ktime_add(now, SSH_PTL_PACKET_TIMEOUT_RESOLUTION)); ++ if (next != KTIME_MAX) ++ ssh_ptl_timeout_reaper_mod(ptl, now, next); ++ ++ // force-wakeup to properly handle re-transmits if we've re-submitted ++ ssh_ptl_tx_wakeup(ptl, resub); ++} ++ ++ ++static bool ssh_ptl_rx_blacklist_check(struct ssh_ptl *ptl, u8 seq) ++{ ++ int i; ++ ++ // check if SEQ is blacklisted ++ for (i = 0; i < ARRAY_SIZE(ptl->rx.blacklist.seqs); i++) { ++ if (likely(ptl->rx.blacklist.seqs[i] != seq)) ++ continue; ++ ++ ptl_dbg(ptl, "ptl: ignoring repeated data packet\n"); ++ return true; ++ } ++ ++ // update blacklist ++ ptl->rx.blacklist.seqs[ptl->rx.blacklist.offset] = seq; ++ ptl->rx.blacklist.offset = (ptl->rx.blacklist.offset + 1) ++ % ARRAY_SIZE(ptl->rx.blacklist.seqs); ++ ++ return false; ++} ++ ++static void ssh_ptl_rx_dataframe(struct ssh_ptl *ptl, ++ const struct ssh_frame *frame, ++ const struct sshp_span *payload) ++{ ++ if (ssh_ptl_rx_blacklist_check(ptl, frame->seq)) ++ return; ++ ++ ptl->ops.data_received(ptl, payload); ++} ++ ++static void ssh_ptl_send_ack(struct ssh_ptl *ptl, u8 seq) ++{ ++ struct ssh_packet_args args; ++ struct ssh_packet *packet; ++ struct msgbuf msgb; ++ ++ args.type = 0; ++ args.priority = SSH_PACKET_PRIORITY(ACK, 0); ++ args.ops = &ssh_ptl_ctrl_packet_ops; ++ ++ packet = ptl_alloc_ctrl_packet(ptl, &args, GFP_KERNEL); ++ if (!packet) { ++ ptl_err(ptl, "ptl: failed to allocate ACK packet\n"); ++ return; ++ } ++ ++ msgb_init(&msgb, packet->data, packet->data_length); ++ msgb_push_ack(&msgb, seq); ++ packet->data_length = msgb_bytes_used(&msgb); ++ ++ ssh_ptl_submit(ptl, packet); ++ ssh_packet_put(packet); ++} ++ ++static void ssh_ptl_send_nak(struct ssh_ptl *ptl) ++{ ++ struct ssh_packet_args args; ++ struct ssh_packet *packet; ++ struct msgbuf msgb; ++ ++ args.type = 0; ++ args.priority = SSH_PACKET_PRIORITY(NAK, 0); ++ args.ops = &ssh_ptl_ctrl_packet_ops; ++ ++ packet = ptl_alloc_ctrl_packet(ptl, &args, GFP_KERNEL); ++ if (!packet) { ++ ptl_err(ptl, "ptl: failed to allocate NAK packet\n"); ++ return; ++ } ++ ++ msgb_init(&msgb, packet->data, packet->data_length); ++ msgb_push_nak(&msgb); ++ packet->data_length = msgb_bytes_used(&msgb); ++ ++ ssh_ptl_submit(ptl, packet); ++ ssh_packet_put(packet); ++} ++ ++static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct sshp_span *source) ++{ ++ struct ssh_frame *frame; ++ struct sshp_span payload; ++ struct sshp_span aligned; ++ bool syn_found; ++ int status; ++ ++ // error injection: modify data to simulate corrupt SYN bytes ++ ssh_ptl_rx_inject_invalid_syn(ptl, source); ++ ++ // find SYN ++ syn_found = sshp_find_syn(source, &aligned); ++ ++ if (unlikely(aligned.ptr - source->ptr) > 0) { ++ ptl_warn(ptl, "rx: parser: invalid start of frame, skipping\n"); ++ ++ /* ++ * Notes: ++ * - This might send multiple NAKs in case the communication ++ * starts with an invalid SYN and is broken down into multiple ++ * pieces. This should generally be handled fine, we just ++ * might receive duplicate data in this case, which is ++ * detected when handling data frames. ++ * - This path will also be executed on invalid CRCs: When an ++ * invalid CRC is encountered, the code below will skip data ++ * until direclty after the SYN. This causes the search for ++ * the next SYN, which is generally not placed directly after ++ * the last one. ++ */ ++ ssh_ptl_send_nak(ptl); ++ } ++ ++ if (unlikely(!syn_found)) ++ return aligned.ptr - source->ptr; ++ ++ // error injection: modify data to simulate corruption ++ ssh_ptl_rx_inject_invalid_data(ptl, &aligned); ++ ++ // parse and validate frame ++ status = sshp_parse_frame(&ptl->serdev->dev, &aligned, &frame, &payload, ++ SSH_PTL_RX_BUF_LEN); ++ if (status) // invalid frame: skip to next syn ++ return aligned.ptr - source->ptr + sizeof(u16); ++ if (!frame) // not enough data ++ return aligned.ptr - source->ptr; ++ ++ trace_ssam_rx_frame_received(frame); ++ ++ switch (frame->type) { ++ case SSH_FRAME_TYPE_ACK: ++ ssh_ptl_acknowledge(ptl, frame->seq); ++ break; ++ ++ case SSH_FRAME_TYPE_NAK: ++ ssh_ptl_resubmit_pending(ptl); ++ break; ++ ++ case SSH_FRAME_TYPE_DATA_SEQ: ++ ssh_ptl_send_ack(ptl, frame->seq); ++ /* fallthrough */ ++ ++ case SSH_FRAME_TYPE_DATA_NSQ: ++ ssh_ptl_rx_dataframe(ptl, frame, &payload); ++ break; ++ ++ default: ++ ptl_warn(ptl, "ptl: received frame with unknown type 0x%02x\n", ++ frame->type); ++ break; ++ } ++ ++ return aligned.ptr - source->ptr + SSH_MESSAGE_LENGTH(frame->len); ++} ++ ++static int ssh_ptl_rx_threadfn(void *data) ++{ ++ struct ssh_ptl *ptl = data; ++ ++ while (true) { ++ struct sshp_span span; ++ size_t offs = 0; ++ size_t n; ++ ++ wait_event_interruptible(ptl->rx.wq, ++ !kfifo_is_empty(&ptl->rx.fifo) ++ || kthread_should_stop()); ++ if (kthread_should_stop()) ++ break; ++ ++ // copy from fifo to evaluation buffer ++ n = sshp_buf_read_from_fifo(&ptl->rx.buf, &ptl->rx.fifo); ++ ++ ptl_dbg(ptl, "rx: received data (size: %zu)\n", n); ++ print_hex_dump_debug("rx: ", DUMP_PREFIX_OFFSET, 16, 1, ++ ptl->rx.buf.ptr + ptl->rx.buf.len - n, ++ n, false); ++ ++ // parse until we need more bytes or buffer is empty ++ while (offs < ptl->rx.buf.len) { ++ sshp_buf_span_from(&ptl->rx.buf, offs, &span); ++ n = ssh_ptl_rx_eval(ptl, &span); ++ if (n == 0) ++ break; // need more bytes ++ ++ offs += n; ++ } ++ ++ // throw away the evaluated parts ++ sshp_buf_drop(&ptl->rx.buf, offs); ++ } ++ ++ return 0; ++} ++ ++static inline void ssh_ptl_rx_wakeup(struct ssh_ptl *ptl) ++{ ++ wake_up(&ptl->rx.wq); ++} ++ ++static int ssh_ptl_rx_start(struct ssh_ptl *ptl) ++{ ++ if (ptl->rx.thread) ++ return 0; ++ ++ ptl->rx.thread = kthread_run(ssh_ptl_rx_threadfn, ptl, "surface-sh-rx"); ++ if (IS_ERR(ptl->rx.thread)) ++ return PTR_ERR(ptl->rx.thread); ++ ++ return 0; ++} ++ ++static int ssh_ptl_rx_stop(struct ssh_ptl *ptl) ++{ ++ int status = 0; ++ ++ if (ptl->rx.thread) { ++ status = kthread_stop(ptl->rx.thread); ++ ptl->rx.thread = NULL; ++ } ++ ++ return status; ++} ++ ++static int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n) ++{ ++ int used; ++ ++ if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state)) ++ return used; ++ ++ used = kfifo_in(&ptl->rx.fifo, buf, n); ++ if (used) ++ ssh_ptl_rx_wakeup(ptl); ++ ++ return used; ++} ++ ++ ++struct ssh_flush_packet { ++ struct ssh_packet base; ++ struct completion completion; ++ int status; ++}; ++ ++static void ssh_ptl_flush_complete(struct ssh_packet *p, int status) ++{ ++ struct ssh_flush_packet *packet; ++ ++ packet = container_of(p, struct ssh_flush_packet, base); ++ packet->status = status; ++} ++ ++static void ssh_ptl_flush_release(struct ssh_packet *p) ++{ ++ struct ssh_flush_packet *packet; ++ ++ packet = container_of(p, struct ssh_flush_packet, base); ++ complete_all(&packet->completion); ++} ++ ++static const struct ssh_packet_ops ssh_flush_packet_ops = { ++ .complete = ssh_ptl_flush_complete, ++ .release = ssh_ptl_flush_release, ++}; ++ ++/** ++ * ssh_ptl_flush - flush the packet transmission layer ++ * @ptl: packet transmission layer ++ * @timeout: timeout for the flush operation in jiffies ++ * ++ * Queue a special flush-packet and wait for its completion. This packet will ++ * be completed after all other currently queued and pending packets have been ++ * completed. Flushing guarantees that all previously submitted data packets ++ * have been fully completed before this call returns. Additionally, flushing ++ * blocks execution of all later submitted data packets until the flush has been ++ * completed. ++ * ++ * Control (i.e. ACK/NAK) packets that have been submitted after this call will ++ * be placed before the flush packet in the queue, as long as the flush-packet ++ * has not been chosen for processing yet. ++ * ++ * Flushing, even when no new data packets are submitted after this call, does ++ * not guarantee that no more packets are scheduled. For example, incoming ++ * messages can promt automated submission of ACK or NAK type packets. If this ++ * happens while the flush-packet is being processed (i.e. after it has been ++ * taken from the queue), such packets may still be queued after this function ++ * returns. ++ * ++ * Return: Zero on success, -ETIMEDOUT if the flush timed out and has been ++ * canceled as a result of the timeout, or -ESHUTDOWN if the packet transmission ++ * layer has been shut down before this call. May also return -EINTR if the ++ * packet transmission has been interrupted. ++ */ ++static int ssh_ptl_flush(struct ssh_ptl *ptl, unsigned long timeout) ++{ ++ struct ssh_flush_packet packet; ++ struct ssh_packet_args args; ++ int status; ++ ++ args.type = SSH_PACKET_TY_FLUSH | SSH_PACKET_TY_BLOCKING; ++ args.priority = SSH_PACKET_PRIORITY(FLUSH, 0); ++ args.ops = &ssh_flush_packet_ops; ++ ++ ssh_packet_init(&packet.base, &args); ++ init_completion(&packet.completion); ++ ++ status = ssh_ptl_submit(ptl, &packet.base); ++ if (status) ++ return status; ++ ++ ssh_packet_put(&packet.base); ++ ++ if (wait_for_completion_timeout(&packet.completion, timeout)) ++ return 0; ++ ++ ssh_ptl_cancel(&packet.base); ++ wait_for_completion(&packet.completion); ++ ++ WARN_ON(packet.status != 0 && packet.status != -ECANCELED ++ && packet.status != -ESHUTDOWN && packet.status != -EINTR); ++ ++ return packet.status == -ECANCELED ? -ETIMEDOUT : status; ++} ++ ++/** ++ * ssh_ptl_shutdown - shut down the packet transmission layer ++ * @ptl: packet transmission layer ++ * ++ * Shuts down the packet transmission layer, removing and canceling all queued ++ * and pending packets. Packets canceled by this operation will be completed ++ * with -ESHUTDOWN as status. ++ * ++ * As a result of this function, the transmission layer will be marked as shut ++ * down. Submission of packets after the transmission layer has been shut down ++ * will fail with -ESHUTDOWN. ++ */ ++static void ssh_ptl_shutdown(struct ssh_ptl *ptl) ++{ ++ LIST_HEAD(complete_q); ++ LIST_HEAD(complete_p); ++ struct ssh_packet *p, *n; ++ int status; ++ ++ // ensure that no new packets (including ACK/NAK) can be submitted ++ set_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state); ++ smp_mb__after_atomic(); ++ ++ status = ssh_ptl_rx_stop(ptl); ++ if (status) ++ ptl_err(ptl, "ptl: failed to stop receiver thread\n"); ++ ++ status = ssh_ptl_tx_stop(ptl); ++ if (status) ++ ptl_err(ptl, "ptl: failed to stop transmitter thread\n"); ++ ++ cancel_delayed_work_sync(&ptl->rtx_timeout.reaper); ++ ++ /* ++ * At this point, all threads have been stopped. This means that the ++ * only references to packets from inside the system are in the queue ++ * and pending set. ++ * ++ * Note: We still need locks here because someone could still be ++ * cancelling packets. ++ * ++ * Note 2: We can re-use queue_node (or pending_node) if we mark the ++ * packet as locked an then remove it from the queue (or pending set ++ * respecitvely). Marking the packet as locked avoids re-queueing ++ * (which should already be prevented by having stopped the treads...) ++ * and not setting QUEUED_BIT (or PENDING_BIT) prevents removal from a ++ * new list via other threads (e.g. canellation). ++ * ++ * Note 3: There may be overlap between complete_p and complete_q. ++ * This is handled via test_and_set_bit on the "completed" flag ++ * (also handles cancelation). ++ */ ++ ++ // mark queued packets as locked and move them to complete_q ++ spin_lock(&ptl->queue.lock); ++ list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) { ++ set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state); ++ ++ list_del(&p->queue_node); ++ list_add_tail(&p->queue_node, &complete_q); ++ } ++ spin_unlock(&ptl->queue.lock); ++ ++ // mark pending packets as locked and move them to complete_p ++ spin_lock(&ptl->pending.lock); ++ list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) { ++ set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state); ++ ++ list_del(&p->pending_node); ++ list_add_tail(&p->pending_node, &complete_q); ++ } ++ atomic_set(&ptl->pending.count, 0); ++ spin_unlock(&ptl->pending.lock); ++ ++ // complete and drop packets on complete_q ++ list_for_each_entry(p, &complete_q, queue_node) { ++ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) ++ __ssh_ptl_complete(p, -ESHUTDOWN); ++ ++ ssh_packet_put(p); ++ } ++ ++ // complete and drop packets on complete_p ++ list_for_each_entry(p, &complete_p, pending_node) { ++ if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) ++ __ssh_ptl_complete(p, -ESHUTDOWN); ++ ++ ssh_packet_put(p); ++ } ++ ++ /* ++ * At this point we have guaranteed that the system doesn't reference ++ * any packets any more. ++ */ ++} ++ ++static inline struct device *ssh_ptl_get_device(struct ssh_ptl *ptl) ++{ ++ return &ptl->serdev->dev; ++} ++ ++static int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev, ++ struct ssh_ptl_ops *ops) ++{ ++ int i, status; ++ ++ ptl->serdev = serdev; ++ ptl->state = 0; ++ ++ spin_lock_init(&ptl->queue.lock); ++ INIT_LIST_HEAD(&ptl->queue.head); ++ ++ spin_lock_init(&ptl->pending.lock); ++ INIT_LIST_HEAD(&ptl->pending.head); ++ atomic_set_release(&ptl->pending.count, 0); ++ ++ ptl->tx.thread = NULL; ++ ptl->tx.thread_signal = false; ++ ptl->tx.packet = NULL; ++ ptl->tx.offset = 0; ++ init_waitqueue_head(&ptl->tx.thread_wq); ++ init_waitqueue_head(&ptl->tx.packet_wq); ++ ++ ptl->rx.thread = NULL; ++ init_waitqueue_head(&ptl->rx.wq); ++ ++ ptl->rtx_timeout.timeout = SSH_PTL_PACKET_TIMEOUT; ++ ptl->rtx_timeout.expires = KTIME_MAX; ++ INIT_DELAYED_WORK(&ptl->rtx_timeout.reaper, ssh_ptl_timeout_reap); ++ ++ ptl->ops = *ops; ++ ++ // initialize SEQ blacklist with invalid sequence IDs ++ for (i = 0; i < ARRAY_SIZE(ptl->rx.blacklist.seqs); i++) ++ ptl->rx.blacklist.seqs[i] = 0xFFFF; ++ ptl->rx.blacklist.offset = 0; ++ ++ status = kfifo_alloc(&ptl->rx.fifo, SSH_PTL_RX_FIFO_LEN, GFP_KERNEL); ++ if (status) ++ return status; ++ ++ status = sshp_buf_alloc(&ptl->rx.buf, SSH_PTL_RX_BUF_LEN, GFP_KERNEL); ++ if (status) ++ kfifo_free(&ptl->rx.fifo); ++ ++ return status; ++} ++ ++static void ssh_ptl_destroy(struct ssh_ptl *ptl) ++{ ++ kfifo_free(&ptl->rx.fifo); ++ sshp_buf_free(&ptl->rx.buf); ++} ++ ++ ++/* -- Request transport layer (rtl). ---------------------------------------- */ ++ ++#define SSH_RTL_REQUEST_TIMEOUT ms_to_ktime(1000) ++#define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50)) ++ ++#define SSH_RTL_MAX_PENDING 3 ++ ++ ++enum ssh_rtl_state_flags { ++ SSH_RTL_SF_SHUTDOWN_BIT, ++}; ++ ++struct ssh_rtl_ops { ++ void (*handle_event)(struct ssh_rtl *rtl, const struct ssh_command *cmd, ++ const struct sshp_span *data); ++}; ++ ++struct ssh_rtl { ++ struct ssh_ptl ptl; ++ unsigned long state; ++ ++ struct { ++ spinlock_t lock; ++ struct list_head head; ++ } queue; ++ ++ struct { ++ spinlock_t lock; ++ struct list_head head; ++ atomic_t count; ++ } pending; ++ ++ struct { ++ struct work_struct work; ++ } tx; ++ ++ struct { ++ ktime_t timeout; ++ ktime_t expires; ++ struct delayed_work reaper; ++ } rtx_timeout; ++ ++ struct ssh_rtl_ops ops; ++}; ++ ++ ++#define rtl_dbg(r, fmt, ...) ptl_dbg(&(r)->ptl, fmt, ##__VA_ARGS__) ++#define rtl_info(p, fmt, ...) ptl_info(&(p)->ptl, fmt, ##__VA_ARGS__) ++#define rtl_warn(r, fmt, ...) ptl_warn(&(r)->ptl, fmt, ##__VA_ARGS__) ++#define rtl_err(r, fmt, ...) ptl_err(&(r)->ptl, fmt, ##__VA_ARGS__) ++#define rtl_dbg_cond(r, fmt, ...) __ssam_prcond(rtl_dbg, r, fmt, ##__VA_ARGS__) ++ ++#define to_ssh_rtl(ptr, member) \ ++ container_of(ptr, struct ssh_rtl, member) ++ ++#define to_ssh_request(ptr, member) \ ++ container_of(ptr, struct ssh_request, member) ++ ++ ++/** ++ * ssh_rtl_should_drop_response - error injection hook to drop request responses ++ * ++ * Useful to cause request transmission timeouts in the driver by dropping the ++ * response to a request. ++ */ ++static noinline_if_inject bool ssh_rtl_should_drop_response(void) ++{ ++ return false; ++} ++// not supported on 4.19 ++// ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response, TRUE); ++ ++ ++static inline void ssh_request_get(struct ssh_request *rqst) ++{ ++ ssh_packet_get(&rqst->packet); ++} ++ ++static inline void ssh_request_put(struct ssh_request *rqst) ++{ ++ ssh_packet_put(&rqst->packet); ++} ++ ++ ++static inline u16 ssh_request_get_rqid(struct ssh_request *rqst) ++{ ++ return get_unaligned_le16(rqst->packet.data ++ + SSH_MSGOFFSET_COMMAND(rqid)); ++} ++ ++static inline u32 ssh_request_get_rqid_safe(struct ssh_request *rqst) ++{ ++ if (!rqst->packet.data) ++ return -1; ++ ++ return ssh_request_get_rqid(rqst); ++} ++ ++ ++static void ssh_rtl_queue_remove(struct ssh_request *rqst) ++{ ++ bool remove; ++ ++ spin_lock(&rqst->rtl->queue.lock); ++ ++ remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state); ++ if (remove) ++ list_del(&rqst->node); ++ ++ spin_unlock(&rqst->rtl->queue.lock); ++ ++ if (remove) ++ ssh_request_put(rqst); ++} ++ ++static void ssh_rtl_pending_remove(struct ssh_request *rqst) ++{ ++ bool remove; ++ ++ spin_lock(&rqst->rtl->pending.lock); ++ ++ remove = test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state); ++ if (remove) { ++ atomic_dec(&rqst->rtl->pending.count); ++ list_del(&rqst->node); ++ } ++ ++ spin_unlock(&rqst->rtl->pending.lock); ++ ++ if (remove) ++ ssh_request_put(rqst); ++} ++ ++ ++static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status) ++{ ++ struct ssh_rtl *rtl = READ_ONCE(rqst->rtl); ++ ++ trace_ssam_request_complete(rqst, status); ++ ++ // rqst->rtl may not be set if we're cancelling before submitting ++ rtl_dbg_cond(rtl, "rtl: completing request (rqid: 0x%04x," ++ " status: %d)\n", ssh_request_get_rqid_safe(rqst), status); ++ ++ rqst->ops->complete(rqst, NULL, NULL, status); ++} ++ ++static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst, ++ const struct ssh_command *cmd, ++ const struct sshp_span *data) ++{ ++ trace_ssam_request_complete(rqst, 0); ++ ++ rtl_dbg(rqst->rtl, "rtl: completing request with response" ++ " (rqid: 0x%04x)\n", ssh_request_get_rqid(rqst)); ++ ++ rqst->ops->complete(rqst, cmd, data, 0); ++} ++ ++ ++static bool ssh_rtl_tx_can_process(struct ssh_request *rqst) ++{ ++ if (test_bit(SSH_REQUEST_TY_FLUSH_BIT, &rqst->state)) ++ return !atomic_read(&rqst->rtl->pending.count); ++ ++ return atomic_read(&rqst->rtl->pending.count) < SSH_RTL_MAX_PENDING; ++} ++ ++static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl) ++{ ++ struct ssh_request *rqst = ERR_PTR(-ENOENT); ++ struct ssh_request *p, *n; ++ ++ spin_lock(&rtl->queue.lock); ++ ++ // find first non-locked request and remove it ++ list_for_each_entry_safe(p, n, &rtl->queue.head, node) { ++ if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state))) ++ continue; ++ ++ if (!ssh_rtl_tx_can_process(p)) { ++ rqst = ERR_PTR(-EBUSY); ++ break; ++ } ++ ++ /* ++ * Remove from queue and mark as transmitting. Ensure that the ++ * state does not get zero via memory barrier. ++ */ ++ set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &p->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &p->state); ++ ++ list_del(&p->node); ++ ++ rqst = p; ++ break; ++ } ++ ++ spin_unlock(&rtl->queue.lock); ++ return rqst; ++} ++ ++static int ssh_rtl_tx_pending_push(struct ssh_request *rqst) ++{ ++ struct ssh_rtl *rtl = rqst->rtl; ++ ++ spin_lock(&rtl->pending.lock); ++ ++ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) { ++ spin_unlock(&rtl->pending.lock); ++ return -EINVAL; ++ } ++ ++ if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) { ++ spin_unlock(&rtl->pending.lock); ++ return -EALREADY; ++ } ++ ++ atomic_inc(&rtl->pending.count); ++ ssh_request_get(rqst); ++ list_add_tail(&rqst->node, &rtl->pending.head); ++ ++ spin_unlock(&rtl->pending.lock); ++ return 0; ++} ++ ++static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl) ++{ ++ struct ssh_request *rqst; ++ int status; ++ ++ // get and prepare next request for transmit ++ rqst = ssh_rtl_tx_next(rtl); ++ if (IS_ERR(rqst)) ++ return PTR_ERR(rqst); ++ ++ // add to/mark as pending ++ status = ssh_rtl_tx_pending_push(rqst); ++ if (status) { ++ ssh_request_put(rqst); ++ return -EAGAIN; ++ } ++ ++ // submit packet ++ status = ssh_ptl_submit(&rtl->ptl, &rqst->packet); ++ if (status == -ESHUTDOWN) { ++ /* ++ * Packet has been refused due to the packet layer shutting ++ * down. Complete it here. ++ */ ++ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state); ++ smp_mb__after_atomic(); ++ ++ ssh_rtl_pending_remove(rqst); ++ ssh_rtl_complete_with_status(rqst, -ESHUTDOWN); ++ ++ ssh_request_put(rqst); ++ return -ESHUTDOWN; ++ ++ } else if (status) { ++ /* ++ * If submitting the packet failed and the packet layer isn't ++ * shutting down, the packet has either been submmitted/queued ++ * before (-EALREADY, which cannot happen as we have guaranteed ++ * that requests cannot be re-submitted), or the packet was ++ * marked as locked (-EINVAL). To mark the packet locked at this ++ * stage, the request, and thus the packets itself, had to have ++ * been canceled. Simply drop the reference. Cancellation itself ++ * will remove it from the set of pending requests. ++ */ ++ ++ WARN_ON(status != -EINVAL); ++ ++ ssh_request_put(rqst); ++ return -EAGAIN; ++ } ++ ++ ssh_request_put(rqst); ++ return 0; ++} ++ ++static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl) ++{ ++ bool empty; ++ ++ spin_lock(&rtl->queue.lock); ++ empty = list_empty(&rtl->queue.head); ++ spin_unlock(&rtl->queue.lock); ++ ++ return empty; ++} ++ ++static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl) ++{ ++ if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING) ++ return false; ++ ++ if (ssh_rtl_queue_empty(rtl)) ++ return false; ++ ++ return schedule_work(&rtl->tx.work); ++} ++ ++static void ssh_rtl_tx_work_fn(struct work_struct *work) ++{ ++ struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work); ++ int i, status; ++ ++ /* ++ * Try to be nice and not block the workqueue: Run a maximum of 10 ++ * tries, then re-submit if necessary. This should not be neccesary, ++ * for normal execution, but guarantee it anyway. ++ */ ++ for (i = 0; i < 10; i++) { ++ status = ssh_rtl_tx_try_process_one(rtl); ++ if (status == -ENOENT || status == -EBUSY) ++ return; // no more requests to process ++ ++ if (status == -ESHUTDOWN) { ++ /* ++ * Packet system shutting down. No new packets can be ++ * transmitted. Return silently, the party initiating ++ * the shutdown should handle the rest. ++ */ ++ return; ++ } ++ ++ WARN_ON(status != 0 && status != -EAGAIN); ++ } ++ ++ // out of tries, reschedule ++ ssh_rtl_tx_schedule(rtl); ++} ++ ++ ++static int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst) ++{ ++ trace_ssam_request_submit(rqst); ++ ++ /* ++ * Ensure that requests expecting a response are sequenced. If this ++ * invariant ever changes, see the comment in ssh_rtl_complete on what ++ * is required to be changed in the code. ++ */ ++ if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &rqst->state)) ++ if (!(rqst->packet.type & SSH_PACKET_TY_SEQUENCED)) ++ return -EINVAL; ++ ++ // try to set rtl and check if this request has already been submitted ++ if (cmpxchg(&rqst->rtl, NULL, rtl) != NULL) ++ return -EALREADY; ++ ++ spin_lock(&rtl->queue.lock); ++ ++ if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) { ++ spin_unlock(&rtl->queue.lock); ++ return -ESHUTDOWN; ++ } ++ ++ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) { ++ spin_unlock(&rtl->queue.lock); ++ return -EINVAL; ++ } ++ ++ ssh_request_get(rqst); ++ set_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state); ++ list_add_tail(&rqst->node, &rtl->queue.head); ++ ++ spin_unlock(&rtl->queue.lock); ++ ++ ssh_rtl_tx_schedule(rtl); ++ return 0; ++} ++ ++ ++static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now, ++ ktime_t expires) ++{ ++ unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now)); ++ ktime_t aexp = ktime_add(expires, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION); ++ ktime_t old; ++ ++ // re-adjust / schedule reaper if it is above resolution delta ++ old = READ_ONCE(rtl->rtx_timeout.expires); ++ while (ktime_before(aexp, old)) ++ old = cmpxchg64(&rtl->rtx_timeout.expires, old, expires); ++ ++ // if we updated the reaper expiration, modify work timeout ++ if (old == expires) ++ mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta); ++} ++ ++static void ssh_rtl_timeout_start(struct ssh_request *rqst) ++{ ++ struct ssh_rtl *rtl = rqst->rtl; ++ ktime_t timestamp = ktime_get_coarse_boottime(); ++ ktime_t timeout = rtl->rtx_timeout.timeout; ++ ++ if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) ++ return; ++ ++ WRITE_ONCE(rqst->timestamp, timestamp); ++ smp_mb__after_atomic(); ++ ++ ssh_rtl_timeout_reaper_mod(rqst->rtl, timestamp, timestamp + timeout); ++} ++ ++ ++static void ssh_rtl_complete(struct ssh_rtl *rtl, ++ const struct ssh_command *command, ++ const struct sshp_span *command_data) ++{ ++ struct ssh_request *r = NULL; ++ struct ssh_request *p, *n; ++ u16 rqid = get_unaligned_le16(&command->rqid); ++ ++ trace_ssam_rx_response_received(command, command_data->len); ++ ++ /* ++ * Get request from pending based on request ID and mark it as response ++ * received and locked. ++ */ ++ spin_lock(&rtl->pending.lock); ++ list_for_each_entry_safe(p, n, &rtl->pending.head, node) { ++ // we generally expect requests to be processed in order ++ if (unlikely(ssh_request_get_rqid(p) != rqid)) ++ continue; ++ ++ // simulate response timeout ++ if (ssh_rtl_should_drop_response()) { ++ spin_unlock(&rtl->pending.lock); ++ ++ trace_ssam_ei_rx_drop_response(p); ++ rtl_info(rtl, "request error injection: " ++ "dropping response for request %p\n", ++ &p->packet); ++ return; ++ } ++ ++ /* ++ * Mark as "response received" and "locked" as we're going to ++ * complete it. Ensure that the state doesn't get zero by ++ * employing a memory barrier. ++ */ ++ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state); ++ set_bit(SSH_REQUEST_SF_RSPRCVD_BIT, &p->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &p->state); ++ ++ atomic_dec(&rtl->pending.count); ++ list_del(&p->node); ++ ++ r = p; ++ break; ++ } ++ spin_unlock(&rtl->pending.lock); ++ ++ if (!r) { ++ rtl_warn(rtl, "rtl: dropping unexpected command message" ++ " (rqid = 0x%04x)\n", rqid); ++ return; ++ } ++ ++ // if the request hasn't been completed yet, we will do this now ++ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) { ++ ssh_request_put(r); ++ ssh_rtl_tx_schedule(rtl); ++ return; ++ } ++ ++ /* ++ * Make sure the request has been transmitted. In case of a sequenced ++ * request, we are guaranteed that the completion callback will run on ++ * the receiver thread directly when the ACK for the packet has been ++ * received. Similarly, this function is guaranteed to run on the ++ * receiver thread. Thus we are guaranteed that if the packet has been ++ * successfully transmitted and received an ACK, the transmitted flag ++ * has been set and is visible here. ++ * ++ * We are currently not handling unsequenced packets here, as those ++ * should never expect a response as ensured in ssh_rtl_submit. If this ++ * ever changes, one would have to test for ++ * ++ * (r->state & (transmitting | transmitted)) ++ * ++ * on unsequenced packets to determine if they could have been ++ * transmitted. There are no synchronization guarantees as in the ++ * sequenced case, since, in this case, the callback function will not ++ * run on the same thread. Thus an exact determination is impossible. ++ */ ++ if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state)) { ++ rtl_err(rtl, "rtl: received response before ACK for request" ++ " (rqid = 0x%04x)\n", rqid); ++ ++ /* ++ * NB: Timeout has already been canceled, request already been ++ * removed from pending and marked as locked and completed. As ++ * we receive a "false" response, the packet might still be ++ * queued though. ++ */ ++ ssh_rtl_queue_remove(r); ++ ++ ssh_rtl_complete_with_status(r, -EREMOTEIO); ++ ssh_request_put(r); ++ ++ ssh_rtl_tx_schedule(rtl); ++ return; ++ } ++ ++ /* ++ * NB: Timeout has already been canceled, request already been ++ * removed from pending and marked as locked and completed. The request ++ * can also not be queued any more, as it has been marked as ++ * transmitting and later transmitted. Thus no need to remove it from ++ * anywhere. ++ */ ++ ++ ssh_rtl_complete_with_rsp(r, command, command_data); ++ ssh_request_put(r); ++ ++ ssh_rtl_tx_schedule(rtl); ++} ++ ++ ++static bool ssh_rtl_cancel_nonpending(struct ssh_request *r) ++{ ++ unsigned long state, fixed; ++ bool remove; ++ ++ /* ++ * Handle unsubmitted request: Try to mark the packet as locked, ++ * expecting the state to be zero (i.e. unsubmitted). Note that, if ++ * setting the state worked, we might still be adding the packet to the ++ * queue in a currently executing submit call. In that case, however, ++ * rqst->rtl must have been set previously, as locked is checked after ++ * setting rqst->rtl. Thus only if we successfully lock this request and ++ * rqst->rtl is NULL, we have successfully removed the request. ++ * Otherwise we need to try and grab it from the queue. ++ * ++ * Note that if the CMPXCHG fails, we are guaranteed that rqst->rtl has ++ * been set and is non-NULL, as states can only be nonzero after this ++ * has been set. Also note that we need to fetch the static (type) flags ++ * to ensure that they don't cause the cmpxchg to fail. ++ */ ++ fixed = READ_ONCE(r->state) & SSH_REQUEST_FLAGS_TY_MASK; ++ state = cmpxchg(&r->state, fixed, SSH_REQUEST_SF_LOCKED_BIT); ++ if (!state && !READ_ONCE(r->rtl)) { ++ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ return true; ++ ++ ssh_rtl_complete_with_status(r, -ECANCELED); ++ return true; ++ } ++ ++ spin_lock(&r->rtl->queue.lock); ++ ++ /* ++ * Note: 1) Requests cannot be re-submitted. 2) If a request is queued, ++ * it cannot be "transmitting"/"pending" yet. Thus, if we successfully ++ * remove the the request here, we have removed all its occurences in ++ * the system. ++ */ ++ ++ remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state); ++ if (!remove) { ++ spin_unlock(&r->rtl->queue.lock); ++ return false; ++ } ++ ++ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state); ++ list_del(&r->node); ++ ++ spin_unlock(&r->rtl->queue.lock); ++ ++ ssh_request_put(r); // drop reference obtained from queue ++ ++ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ return true; ++ ++ ssh_rtl_complete_with_status(r, -ECANCELED); ++ return true; ++} ++ ++static bool ssh_rtl_cancel_pending(struct ssh_request *r) ++{ ++ // if the packet is already locked, it's going to be removed shortly ++ if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state)) ++ return true; ++ ++ /* ++ * Now that we have locked the packet, we have guaranteed that it can't ++ * be added to the system any more. If rqst->rtl is zero, the locked ++ * check in ssh_rtl_submit has not been run and any submission, ++ * currently in progress or called later, won't add the packet. Thus we ++ * can directly complete it. ++ */ ++ if (!READ_ONCE(r->rtl)) { ++ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ return true; ++ ++ ssh_rtl_complete_with_status(r, -ECANCELED); ++ return true; ++ } ++ ++ /* ++ * Try to cancel the packet. If the packet has not been completed yet, ++ * this will subsequently (and synchronously) call the completion ++ * callback of the packet, which will complete the request. ++ */ ++ ssh_ptl_cancel(&r->packet); ++ ++ /* ++ * If the packet has been completed with success, i.e. has not been ++ * canceled by the above call, the request may not have been completed ++ * yet (may be waiting for a response). Check if we need to do this ++ * here. ++ */ ++ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ return true; ++ ++ ssh_rtl_queue_remove(r); ++ ssh_rtl_pending_remove(r); ++ ssh_rtl_complete_with_status(r, -ECANCELED); ++ ++ return true; ++} ++ ++static bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending) ++{ ++ struct ssh_rtl *rtl; ++ bool canceled; ++ ++ if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state)) ++ return true; ++ ++ trace_ssam_request_cancel(rqst); ++ ++ if (pending) ++ canceled = ssh_rtl_cancel_pending(rqst); ++ else ++ canceled = ssh_rtl_cancel_nonpending(rqst); ++ ++ // note: rqst->rtl may be NULL if request has not been submitted yet ++ rtl = READ_ONCE(rqst->rtl); ++ if (canceled && rtl) ++ ssh_rtl_tx_schedule(rtl); ++ ++ return canceled; ++} ++ ++ ++static void ssh_rtl_packet_callback(struct ssh_packet *p, int status) ++{ ++ struct ssh_request *r = to_ssh_request(p, packet); ++ ++ if (unlikely(status)) { ++ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state); ++ ++ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ return; ++ ++ /* ++ * The packet may get cancelled even though it has not been ++ * submitted yet. The request may still be queued. Check the ++ * queue and remove it if necessary. As the timeout would have ++ * been started in this function on success, there's no need to ++ * cancel it here. ++ */ ++ ssh_rtl_queue_remove(r); ++ ssh_rtl_pending_remove(r); ++ ssh_rtl_complete_with_status(r, status); ++ ++ ssh_rtl_tx_schedule(r->rtl); ++ return; ++ } ++ ++ /* ++ * Mark as transmitted, ensure that state doesn't get zero by inserting ++ * a memory barrier. ++ */ ++ set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &r->state); ++ ++ // if we expect a response, we just need to start the timeout ++ if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &r->state)) { ++ ssh_rtl_timeout_start(r); ++ return; ++ } ++ ++ /* ++ * If we don't expect a response, lock, remove, and complete the ++ * request. Note that, at this point, the request is guaranteed to have ++ * left the queue and no timeout has been started. Thus we only need to ++ * remove it from pending. If the request has already been completed (it ++ * may have been canceled) return. ++ */ ++ ++ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state); ++ if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ return; ++ ++ ssh_rtl_pending_remove(r); ++ ssh_rtl_complete_with_status(r, 0); ++ ++ ssh_rtl_tx_schedule(r->rtl); ++} ++ ++ ++static ktime_t ssh_request_get_expiration(struct ssh_request *r, ktime_t timeo) ++{ ++ ktime_t timestamp = READ_ONCE(r->timestamp); ++ ++ if (timestamp != KTIME_MAX) ++ return ktime_add(timestamp, timeo); ++ else ++ return KTIME_MAX; ++} ++ ++static void ssh_rtl_timeout_reap(struct work_struct *work) ++{ ++ struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work); ++ struct ssh_request *r, *n; ++ LIST_HEAD(claimed); ++ ktime_t now = ktime_get_coarse_boottime(); ++ ktime_t timeout = rtl->rtx_timeout.timeout; ++ ktime_t next = KTIME_MAX; ++ ++ trace_ssam_rtl_timeout_reap("pending", atomic_read(&rtl->pending.count)); ++ ++ /* ++ * Mark reaper as "not pending". This is done before checking any ++ * requests to avoid lost-update type problems. ++ */ ++ WRITE_ONCE(rtl->rtx_timeout.expires, KTIME_MAX); ++ smp_mb__after_atomic(); ++ ++ spin_lock(&rtl->pending.lock); ++ list_for_each_entry_safe(r, n, &rtl->pending.head, node) { ++ ktime_t expires = ssh_request_get_expiration(r, timeout); ++ ++ /* ++ * Check if the timeout hasn't expired yet. Find out next ++ * expiration date to be handled after this run. ++ */ ++ if (ktime_after(expires, now)) { ++ next = ktime_before(expires, next) ? expires : next; ++ continue; ++ } ++ ++ // avoid further transitions if locked ++ if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state)) ++ continue; ++ ++ /* ++ * We have now marked the packet as locked. Thus it cannot be ++ * added to the pending or queued lists again after we've ++ * removed it here. We can therefore re-use the node of this ++ * packet temporarily. ++ */ ++ ++ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state); ++ ++ atomic_dec(&rtl->pending.count); ++ list_del(&r->node); ++ ++ list_add_tail(&r->node, &claimed); ++ } ++ spin_unlock(&rtl->pending.lock); ++ ++ // cancel and complete the request ++ list_for_each_entry_safe(r, n, &claimed, node) { ++ trace_ssam_request_timeout(r); ++ ++ /* ++ * At this point we've removed the packet from pending. This ++ * means that we've obtained the last (only) reference of the ++ * system to it. Thus we can just complete it. ++ */ ++ if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ ssh_rtl_complete_with_status(r, -ETIMEDOUT); ++ ++ // drop the reference we've obtained by removing it from pending ++ list_del(&r->node); ++ ssh_request_put(r); ++ } ++ ++ // ensure that reaper doesn't run again immediately ++ next = max(next, ktime_add(now, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION)); ++ if (next != KTIME_MAX) ++ ssh_rtl_timeout_reaper_mod(rtl, now, next); ++ ++ ssh_rtl_tx_schedule(rtl); ++} ++ ++ ++static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd, ++ const struct sshp_span *data) ++{ ++ trace_ssam_rx_event_received(cmd, data->len); ++ ++ rtl_dbg(rtl, "rtl: handling event (rqid: 0x%04x)\n", ++ get_unaligned_le16(&cmd->rqid)); ++ ++ rtl->ops.handle_event(rtl, cmd, data); ++} ++ ++static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct sshp_span *data) ++{ ++ struct ssh_rtl *rtl = to_ssh_rtl(p, ptl); ++ struct device *dev = &p->serdev->dev; ++ struct ssh_command *command; ++ struct sshp_span command_data; ++ ++ if (sshp_parse_command(dev, data, &command, &command_data)) ++ return; ++ ++ if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid))) ++ ssh_rtl_rx_event(rtl, command, &command_data); ++ else ++ ssh_rtl_complete(rtl, command, &command_data); ++} ++ ++static void ssh_rtl_rx_data(struct ssh_ptl *p, const struct sshp_span *data) ++{ ++ switch (data->ptr[0]) { ++ case SSH_PLD_TYPE_CMD: ++ ssh_rtl_rx_command(p, data); ++ break; ++ ++ default: ++ ptl_err(p, "rtl: rx: unknown frame payload type" ++ " (type: 0x%02x)\n", data->ptr[0]); ++ break; ++ } ++} ++ ++ ++static inline struct device *ssh_rtl_get_device(struct ssh_rtl *rtl) ++{ ++ return ssh_ptl_get_device(&rtl->ptl); ++} ++ ++static inline bool ssh_rtl_tx_flush(struct ssh_rtl *rtl) ++{ ++ return flush_work(&rtl->tx.work); ++} ++ ++static inline int ssh_rtl_tx_start(struct ssh_rtl *rtl) ++{ ++ int status; ++ bool sched; ++ ++ status = ssh_ptl_tx_start(&rtl->ptl); ++ if (status) ++ return status; ++ ++ /* ++ * If the packet layer has been shut down and restarted without shutting ++ * down the request layer, there may still be requests queued and not ++ * handled. ++ */ ++ spin_lock(&rtl->queue.lock); ++ sched = !list_empty(&rtl->queue.head); ++ spin_unlock(&rtl->queue.lock); ++ ++ if (sched) ++ ssh_rtl_tx_schedule(rtl); ++ ++ return 0; ++} ++ ++static inline int ssh_rtl_rx_start(struct ssh_rtl *rtl) ++{ ++ return ssh_ptl_rx_start(&rtl->ptl); ++} ++ ++static int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev, ++ struct ssh_rtl_ops *ops) ++{ ++ struct ssh_ptl_ops ptl_ops; ++ int status; ++ ++ ptl_ops.data_received = ssh_rtl_rx_data; ++ ++ status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops); ++ if (status) ++ return status; ++ ++ spin_lock_init(&rtl->queue.lock); ++ INIT_LIST_HEAD(&rtl->queue.head); ++ ++ spin_lock_init(&rtl->pending.lock); ++ INIT_LIST_HEAD(&rtl->pending.head); ++ atomic_set_release(&rtl->pending.count, 0); ++ ++ INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn); ++ ++ rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT; ++ rtl->rtx_timeout.expires = KTIME_MAX; ++ INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap); ++ ++ rtl->ops = *ops; ++ ++ return 0; ++} ++ ++static void ssh_rtl_destroy(struct ssh_rtl *rtl) ++{ ++ ssh_ptl_destroy(&rtl->ptl); ++} ++ ++ ++static void ssh_rtl_packet_release(struct ssh_packet *p) ++{ ++ struct ssh_request *rqst = to_ssh_request(p, packet); ++ rqst->ops->release(rqst); ++} ++ ++static const struct ssh_packet_ops ssh_rtl_packet_ops = { ++ .complete = ssh_rtl_packet_callback, ++ .release = ssh_rtl_packet_release, ++}; ++ ++static void ssh_request_init(struct ssh_request *rqst, ++ enum ssam_request_flags flags, ++ const struct ssh_request_ops *ops) ++{ ++ struct ssh_packet_args packet_args; ++ ++ packet_args.type = SSH_PACKET_TY_BLOCKING; ++ if (!(flags & SSAM_REQUEST_UNSEQUENCED)) ++ packet_args.type = SSH_PACKET_TY_SEQUENCED; ++ ++ packet_args.priority = SSH_PACKET_PRIORITY(DATA, 0); ++ packet_args.ops = &ssh_rtl_packet_ops; ++ ++ ssh_packet_init(&rqst->packet, &packet_args); ++ ++ rqst->rtl = NULL; ++ INIT_LIST_HEAD(&rqst->node); ++ ++ rqst->state = 0; ++ if (flags & SSAM_REQUEST_HAS_RESPONSE) ++ rqst->state |= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT); ++ ++ rqst->timestamp = KTIME_MAX; ++ rqst->ops = ops; ++} ++ ++ ++struct ssh_flush_request { ++ struct ssh_request base; ++ struct completion completion; ++ int status; ++}; ++ ++static void ssh_rtl_flush_request_complete(struct ssh_request *r, ++ const struct ssh_command *cmd, ++ const struct sshp_span *data, ++ int status) ++{ ++ struct ssh_flush_request *rqst; ++ ++ rqst = container_of(r, struct ssh_flush_request, base); ++ rqst->status = status; ++} ++ ++static void ssh_rtl_flush_request_release(struct ssh_request *r) ++{ ++ struct ssh_flush_request *rqst; ++ ++ rqst = container_of(r, struct ssh_flush_request, base); ++ complete_all(&rqst->completion); ++} ++ ++static const struct ssh_request_ops ssh_rtl_flush_request_ops = { ++ .complete = ssh_rtl_flush_request_complete, ++ .release = ssh_rtl_flush_request_release, ++}; ++ ++/** ++ * ssh_rtl_flush - flush the request transmission layer ++ * @rtl: request transmission layer ++ * @timeout: timeout for the flush operation in jiffies ++ * ++ * Queue a special flush request and wait for its completion. This request ++ * will be completed after all other currently queued and pending requests ++ * have been completed. Instead of a normal data packet, this request submits ++ * a special flush packet, meaning that upon completion, also the underlying ++ * packet transmission layer has been flushed. ++ * ++ * Flushing the request layer gurarantees that all previously submitted ++ * requests have been fully completed before this call returns. Additinally, ++ * flushing blocks execution of all later submitted requests until the flush ++ * has been completed. ++ * ++ * If the caller ensures that no new requests are submitted after a call to ++ * this function, the request transmission layer is guaranteed to have no ++ * remaining requests when this call returns. The same guarantee does not hold ++ * for the packet layer, on which control packets may still be queued after ++ * this call. See the documentation of ssh_ptl_flush for more details on ++ * packet layer flushing. ++ * ++ * Return: Zero on success, -ETIMEDOUT if the flush timed out and has been ++ * canceled as a result of the timeout, or -ESHUTDOWN if the packet and/or ++ * request transmission layer has been shut down before this call. May also ++ * return -EINTR if the underlying packet transmission has been interrupted. ++ */ ++static int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout) ++{ ++ const unsigned init_flags = SSAM_REQUEST_UNSEQUENCED; ++ struct ssh_flush_request rqst; ++ int status; ++ ++ ssh_request_init(&rqst.base, init_flags, &ssh_rtl_flush_request_ops); ++ rqst.base.packet.type |= SSH_PACKET_TY_FLUSH; ++ rqst.base.packet.priority = SSH_PACKET_PRIORITY(FLUSH, 0); ++ rqst.base.state |= BIT(SSH_REQUEST_TY_FLUSH_BIT); ++ ++ init_completion(&rqst.completion); ++ ++ status = ssh_rtl_submit(rtl, &rqst.base); ++ if (status) ++ return status; ++ ++ ssh_request_put(&rqst.base); ++ ++ if (wait_for_completion_timeout(&rqst.completion, timeout)) ++ return 0; ++ ++ ssh_rtl_cancel(&rqst.base, true); ++ wait_for_completion(&rqst.completion); ++ ++ WARN_ON(rqst.status != 0 && rqst.status != -ECANCELED ++ && rqst.status != -ESHUTDOWN && rqst.status != -EINTR); ++ ++ return rqst.status == -ECANCELED ? -ETIMEDOUT : status; ++} ++ ++ ++static void ssh_rtl_shutdown(struct ssh_rtl *rtl) ++{ ++ struct ssh_request *r, *n; ++ LIST_HEAD(claimed); ++ int pending; ++ ++ set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state); ++ smp_mb__after_atomic(); ++ ++ // remove requests from queue ++ spin_lock(&rtl->queue.lock); ++ list_for_each_entry_safe(r, n, &rtl->queue.head, node) { ++ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state); ++ ++ list_del(&r->node); ++ list_add_tail(&r->node, &claimed); ++ } ++ spin_unlock(&rtl->queue.lock); ++ ++ /* ++ * We have now guaranteed that the queue is empty and no more new ++ * requests can be submitted (i.e. it will stay empty). This means that ++ * calling ssh_rtl_tx_schedule will not schedule tx.work any more. So we ++ * can simply call cancel_work_sync on tx.work here and when that ++ * returns, we've locked it down. This also means that after this call, ++ * we don't submit any more packets to the underlying packet layer, so ++ * we can also shut that down. ++ */ ++ ++ cancel_work_sync(&rtl->tx.work); ++ ssh_ptl_shutdown(&rtl->ptl); ++ cancel_delayed_work_sync(&rtl->rtx_timeout.reaper); ++ ++ /* ++ * Shutting down the packet layer should also have caneled all requests. ++ * Thus the pending set should be empty. Attempt to handle this ++ * gracefully anyways, even though this should be dead code. ++ */ ++ ++ pending = atomic_read(&rtl->pending.count); ++ WARN_ON(pending); ++ ++ if (pending) { ++ spin_lock(&rtl->pending.lock); ++ list_for_each_entry_safe(r, n, &rtl->pending.head, node) { ++ set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state); ++ smp_mb__before_atomic(); ++ clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state); ++ ++ list_del(&r->node); ++ list_add_tail(&r->node, &claimed); ++ } ++ spin_unlock(&rtl->pending.lock); ++ } ++ ++ // finally cancel and complete requests ++ list_for_each_entry_safe(r, n, &claimed, node) { ++ // test_and_set because we still might compete with cancellation ++ if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) ++ ssh_rtl_complete_with_status(r, -ESHUTDOWN); ++ ++ // drop the reference we've obtained by removing it from list ++ list_del(&r->node); ++ ssh_request_put(r); ++ } ++} ++ ++ ++/* -- Event notifier/callbacks. --------------------------------------------- */ ++/* ++ * The notifier system is based on linux/notifier.h, specifically the SRCU ++ * implementation. The difference to that is, that some bits of the notifier ++ * call return value can be tracked accross multiple calls. This is done so that ++ * handling of events can be tracked and a warning can be issued in case an ++ * event goes unhandled. The idea of that waring is that it should help discover ++ * and identify new/currently unimplemented features. ++ */ ++ ++struct ssam_nf_head { ++ struct srcu_struct srcu; ++ struct ssam_notifier_block __rcu *head; ++}; ++ ++ ++int ssam_nfblk_call_chain(struct ssam_nf_head *nh, struct ssam_event *event) ++{ ++ struct ssam_notifier_block *nb, *next_nb; ++ int ret = 0, idx; ++ ++ idx = srcu_read_lock(&nh->srcu); ++ ++ nb = rcu_dereference_raw(nh->head); ++ while (nb) { ++ next_nb = rcu_dereference_raw(nb->next); ++ ++ ret = (ret & SSAM_NOTIF_STATE_MASK) | nb->fn(nb, event); ++ if (ret & SSAM_NOTIF_STOP) ++ break; ++ ++ nb = next_nb; ++ } ++ ++ srcu_read_unlock(&nh->srcu, idx); ++ return ret; ++} ++ ++/* ++ * Note: This function must be synchronized by the caller with respect to other ++ * insert and/or remove calls. ++ */ ++int __ssam_nfblk_insert(struct ssam_nf_head *nh, struct ssam_notifier_block *nb) ++{ ++ struct ssam_notifier_block **link = &nh->head; ++ ++ while ((*link) != NULL) { ++ if (unlikely((*link) == nb)) { ++ WARN(1, "double register detected"); ++ return -EINVAL; ++ } ++ ++ if (nb->priority > (*link)->priority) ++ break; ++ ++ link = &((*link)->next); ++ } ++ ++ nb->next = *link; ++ rcu_assign_pointer(*link, nb); ++ ++ return 0; ++} ++ ++/* ++ * Note: This function must be synchronized by the caller with respect to other ++ * insert and/or remove calls. On success, the caller _must_ ensure SRCU ++ * synchronization by calling `synchronize_srcu(&nh->srcu)` after leaving the ++ * critical section, to ensure that the removed notifier block is not in use any ++ * more. ++ */ ++int __ssam_nfblk_remove(struct ssam_nf_head *nh, struct ssam_notifier_block *nb) ++{ ++ struct ssam_notifier_block **link = &nh->head; ++ ++ while ((*link) != NULL) { ++ if ((*link) == nb) { ++ rcu_assign_pointer(*link, nb->next); ++ return 0; ++ } ++ ++ link = &((*link)->next); ++ } ++ ++ return -ENOENT; ++} ++ ++static int ssam_nf_head_init(struct ssam_nf_head *nh) ++{ ++ int status; ++ ++ status = init_srcu_struct(&nh->srcu); ++ if (status) ++ return status; ++ ++ nh->head = NULL; ++ return 0; ++} ++ ++static void ssam_nf_head_destroy(struct ssam_nf_head *nh) ++{ ++ cleanup_srcu_struct(&nh->srcu); ++} ++ ++ ++/* -- Event/notification registry. ------------------------------------------ */ ++ ++struct ssam_nf_refcount_key { ++ struct ssam_event_registry reg; ++ struct ssam_event_id id; ++}; ++ ++struct ssam_nf_refcount_entry { ++ struct rb_node node; ++ struct ssam_nf_refcount_key key; ++ int refcount; ++}; ++ ++struct ssam_nf { ++ struct mutex lock; ++ struct rb_root refcount; ++ struct ssam_nf_head head[SURFACE_SAM_SSH_NUM_EVENTS]; ++}; ++ ++ ++static int ssam_nf_refcount_inc(struct ssam_nf *nf, ++ struct ssam_event_registry reg, ++ struct ssam_event_id id) ++{ ++ struct ssam_nf_refcount_entry *entry; ++ struct ssam_nf_refcount_key key; ++ struct rb_node **link = &nf->refcount.rb_node; ++ struct rb_node *parent; ++ int cmp; ++ ++ key.reg = reg; ++ key.id = id; ++ ++ while (*link) { ++ entry = rb_entry(*link, struct ssam_nf_refcount_entry, node); ++ parent = *link; ++ ++ cmp = memcmp(&key, &entry->key, sizeof(key)); ++ if (cmp < 0) { ++ link = &(*link)->rb_left; ++ } else if (cmp > 0) { ++ link = &(*link)->rb_right; ++ } else if (entry->refcount < INT_MAX) { ++ return ++entry->refcount; ++ } else { ++ return -ENOSPC; ++ } ++ } ++ ++ entry = kzalloc(sizeof(*entry), GFP_KERNEL); ++ if (!entry) ++ return -ENOMEM; ++ ++ entry->key = key; ++ entry->refcount = 1; ++ ++ rb_link_node(&entry->node, parent, link); ++ rb_insert_color(&entry->node, &nf->refcount); ++ ++ return entry->refcount; ++} ++ ++static int ssam_nf_refcount_dec(struct ssam_nf *nf, ++ struct ssam_event_registry reg, ++ struct ssam_event_id id) ++{ ++ struct ssam_nf_refcount_entry *entry; ++ struct ssam_nf_refcount_key key; ++ struct rb_node *node = nf->refcount.rb_node; ++ int cmp, rc; ++ ++ key.reg = reg; ++ key.id = id; ++ ++ while (node) { ++ entry = rb_entry(node, struct ssam_nf_refcount_entry, node); ++ ++ cmp = memcmp(&key, &entry->key, sizeof(key)); ++ if (cmp < 0) { ++ node = node->rb_left; ++ } else if (cmp > 0) { ++ node = node->rb_right; ++ } else { ++ rc = --entry->refcount; ++ ++ if (rc == 0) { ++ rb_erase(&entry->node, &nf->refcount); ++ kfree(entry); ++ } ++ ++ return rc; ++ } ++ } ++ ++ return -ENOENT; ++} ++ ++static void ssam_nf_call(struct ssam_nf *nf, struct device *dev, u16 rqid, ++ struct ssam_event *event) ++{ ++ struct ssam_nf_head *nf_head; ++ int status, nf_ret; ++ ++ if (!ssh_rqid_is_event(rqid)) { ++ dev_warn(dev, "event: unsupported rqid: 0x%04x\n", rqid); ++ return; ++ } ++ ++ nf_head = &nf->head[ssh_rqid_to_event(rqid)]; ++ nf_ret = ssam_nfblk_call_chain(nf_head, event); ++ status = ssam_notifier_to_errno(nf_ret); ++ ++ if (status < 0) { ++ dev_err(dev, "event: error handling event: %d " ++ "(tc: 0x%02x, cid: 0x%02x, iid: 0x%02x, chn: 0x%02x)\n", ++ status, event->target_category, event->command_id, ++ event->instance_id, event->channel); ++ } ++ ++ if (!(nf_ret & SSAM_NOTIF_HANDLED)) { ++ dev_warn(dev, "event: unhandled event (rqid: 0x%02x, " ++ "tc: 0x%02x, cid: 0x%02x, iid: 0x%02x, chn: 0x%02x)\n", ++ rqid, event->target_category, event->command_id, ++ event->instance_id, event->channel); ++ } ++} ++ ++static int ssam_nf_register(struct ssam_nf *nf, struct ssam_event_notifier *n) ++{ ++ u16 rqid = ssh_tc_to_rqid(n->event.id.target_category); ++ struct ssam_nf_head *nf_head; ++ int rc, status; ++ ++ if (!ssh_rqid_is_event(rqid)) ++ return -EINVAL; ++ ++ nf_head = &nf->head[ssh_rqid_to_event(rqid)]; ++ ++ mutex_lock(&nf->lock); ++ ++ rc = ssam_nf_refcount_inc(nf, n->event.reg, n->event.id); ++ if (rc < 0) { ++ mutex_lock(&nf->lock); ++ return rc; ++ } ++ ++ status = __ssam_nfblk_insert(nf_head, &n->base); ++ if (status) ++ ssam_nf_refcount_dec(nf, n->event.reg, n->event.id); ++ ++ mutex_unlock(&nf->lock); ++ return status; ++} ++ ++static int ssam_nf_unregister(struct ssam_nf *nf, struct ssam_event_notifier *n) ++{ ++ u16 rqid = ssh_tc_to_rqid(n->event.id.target_category); ++ struct ssam_nf_head *nf_head; ++ int status; ++ ++ if (!ssh_rqid_is_event(rqid)) ++ return -EINVAL; ++ ++ nf_head = &nf->head[ssh_rqid_to_event(rqid)]; ++ ++ mutex_lock(&nf->lock); ++ ++ status = __ssam_nfblk_remove(nf_head, &n->base); ++ if (status) { ++ mutex_unlock(&nf->lock); ++ return status; ++ } ++ ++ ssam_nf_refcount_dec(nf, n->event.reg, n->event.id); ++ ++ mutex_unlock(&nf->lock); ++ synchronize_srcu(&nf_head->srcu); ++ ++ return 0; ++} ++ ++static int ssam_nf_init(struct ssam_nf *nf) ++{ ++ int i, status; ++ ++ for (i = 0; i < SURFACE_SAM_SSH_NUM_EVENTS; i++) { ++ status = ssam_nf_head_init(&nf->head[i]); ++ if (status) ++ break; ++ } ++ ++ if (status) { ++ for (i = i - 1; i >= 0; i--) ++ ssam_nf_head_destroy(&nf->head[i]); ++ ++ return status; ++ } ++ ++ mutex_init(&nf->lock); ++ return 0; ++} ++ ++static void ssam_nf_destroy(struct ssam_nf *nf) ++{ ++ int i; ++ ++ for (i = 0; i < SURFACE_SAM_SSH_NUM_EVENTS; i++) ++ ssam_nf_head_destroy(&nf->head[i]); ++ ++ mutex_destroy(&nf->lock); ++} ++ ++ ++/* -- Event/async request completion system. -------------------------------- */ ++ ++#define SSAM_CPLT_WQ_NAME "ssam_cpltq" ++ ++ ++struct ssam_cplt; ++ ++struct ssam_event_item { ++ struct list_head node; ++ u16 rqid; ++ struct ssam_event event; // must be last ++}; ++ ++struct ssam_event_queue { ++ struct ssam_cplt *cplt; ++ ++ spinlock_t lock; ++ struct list_head head; ++ struct work_struct work; ++}; ++ ++struct ssam_event_channel { ++ struct ssam_event_queue queue[SURFACE_SAM_SSH_NUM_EVENTS]; ++}; ++ ++struct ssam_cplt { ++ struct device *dev; ++ struct workqueue_struct *wq; ++ ++ struct { ++ struct ssam_event_channel channel[SURFACE_SAM_SSH_NUM_CHANNELS]; ++ struct ssam_nf notif; ++ } event; ++}; ++ ++ ++static void ssam_event_queue_push(struct ssam_event_queue *q, ++ struct ssam_event_item *item) ++{ ++ spin_lock(&q->lock); ++ list_add_tail(&item->node, &q->head); ++ spin_unlock(&q->lock); ++} ++ ++static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q) ++{ ++ struct ssam_event_item *item; ++ ++ spin_lock(&q->lock); ++ item = list_first_entry_or_null(&q->head, struct ssam_event_item, node); ++ if (item) ++ list_del(&item->node); ++ spin_unlock(&q->lock); ++ ++ return item; ++} ++ ++static bool ssam_event_queue_is_empty(struct ssam_event_queue *q) ++{ ++ bool empty; ++ ++ spin_lock(&q->lock); ++ empty = list_empty(&q->head); ++ spin_unlock(&q->lock); ++ ++ return empty; ++} ++ ++static struct ssam_event_queue *ssam_cplt_get_event_queue( ++ struct ssam_cplt *cplt, u8 channel, u16 rqid) ++{ ++ u16 event = ssh_rqid_to_event(rqid); ++ u16 chidx = ssh_channel_to_index(channel); ++ ++ if (!ssh_rqid_is_event(rqid)) { ++ dev_err(cplt->dev, "event: unsupported rqid: 0x%04x\n", rqid); ++ return NULL; ++ } ++ ++ if (!ssh_channel_is_valid(channel)) { ++ dev_warn(cplt->dev, "event: unsupported channel: %u\n", ++ channel); ++ chidx = 0; ++ } ++ ++ return &cplt->event.channel[chidx].queue[event]; ++} ++ ++static inline bool ssam_cplt_submit(struct ssam_cplt *cplt, ++ struct work_struct *work) ++{ ++ return queue_work(cplt->wq, work); ++} ++ ++static int ssam_cplt_submit_event(struct ssam_cplt *cplt, ++ struct ssam_event_item *item) ++{ ++ struct ssam_event_queue *evq; ++ ++ evq = ssam_cplt_get_event_queue(cplt, item->event.channel, item->rqid); ++ if (!evq) ++ return -EINVAL; ++ ++ ssam_event_queue_push(evq, item); ++ ssam_cplt_submit(cplt, &evq->work); ++ return 0; ++} ++ ++static void ssam_cplt_flush(struct ssam_cplt *cplt) ++{ ++ flush_workqueue(cplt->wq); ++} ++ ++static void ssam_event_queue_work_fn(struct work_struct *work) ++{ ++ struct ssam_event_queue *queue; ++ struct ssam_event_item *item; ++ struct ssam_nf *nf; ++ struct device *dev; ++ int i; ++ ++ queue = container_of(work, struct ssam_event_queue, work); ++ nf = &queue->cplt->event.notif; ++ dev = queue->cplt->dev; ++ ++ for (i = 0; i < 10; i++) { ++ item = ssam_event_queue_pop(queue); ++ if (item == NULL) ++ return; ++ ++ ssam_nf_call(nf, dev, item->rqid, &item->event); ++ kfree(item); ++ } ++ ++ if (!ssam_event_queue_is_empty(queue)) ++ ssam_cplt_submit(queue->cplt, &queue->work); ++} ++ ++static void ssam_event_queue_init(struct ssam_cplt *cplt, ++ struct ssam_event_queue *evq) ++{ ++ evq->cplt = cplt; ++ spin_lock_init(&evq->lock); ++ INIT_LIST_HEAD(&evq->head); ++ INIT_WORK(&evq->work, ssam_event_queue_work_fn); ++} ++ ++static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev) ++{ ++ struct ssam_event_channel *channel; ++ int status, c, i; ++ ++ cplt->dev = dev; ++ ++ cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME); ++ if (!cplt->wq) ++ return -ENOMEM; ++ ++ for (c = 0; c < ARRAY_SIZE(cplt->event.channel); c++) { ++ channel = &cplt->event.channel[c]; ++ ++ for (i = 0; i < ARRAY_SIZE(channel->queue); i++) ++ ssam_event_queue_init(cplt, &channel->queue[i]); ++ } ++ ++ status = ssam_nf_init(&cplt->event.notif); ++ if (status) ++ destroy_workqueue(cplt->wq); ++ ++ return status; ++} ++ ++static void ssam_cplt_destroy(struct ssam_cplt *cplt) ++{ ++ destroy_workqueue(cplt->wq); ++ ssam_nf_destroy(&cplt->event.notif); ++} ++ ++ ++/* -- Top-Level Request Interface ------------------------------------------- */ ++ ++struct ssam_response { ++ int status; ++ u16 capacity; ++ u16 length; ++ u8 *pointer; ++}; ++ ++struct ssam_request_sync { ++ struct ssh_request base; ++ struct completion comp; ++ struct ssam_response resp; ++}; ++ ++ ++static void ssam_request_sync_complete(struct ssh_request *rqst, ++ const struct ssh_command *cmd, ++ const struct sshp_span *data, int status) ++{ ++ struct ssam_request_sync *r; ++ struct ssh_rtl *rtl = READ_ONCE(rqst->rtl); ++ ++ r = container_of(rqst, struct ssam_request_sync, base); ++ r->resp.status = status; ++ r->resp.length = 0; ++ ++ if (status) { ++ rtl_dbg_cond(rtl, "rsp: request failed: %d\n", status); ++ return; ++ } ++ ++ if (!data) // handle requests without a response ++ return; ++ ++ if (!r->resp.pointer && data->len) { ++ rtl_warn(rtl, "rsp: no response buffer provided, dropping data\n"); ++ return; ++ } ++ ++ if (data->len > r->resp.capacity) { ++ rtl_err(rtl, "rsp: response buffer too small," ++ " capacity: %u bytes, got: %zu bytes\n", ++ r->resp.capacity, data->len); ++ status = -ENOSPC; ++ return; ++ } ++ ++ r->resp.length = data->len; ++ memcpy(r->resp.pointer, data->ptr, data->len); ++} ++ ++static void ssam_request_sync_release(struct ssh_request *rqst) ++{ ++ complete_all(&container_of(rqst, struct ssam_request_sync, base)->comp); ++} ++ ++static const struct ssh_request_ops ssam_request_sync_ops = { ++ .release = ssam_request_sync_release, ++ .complete = ssam_request_sync_complete, ++}; ++ ++static void ssam_request_sync_wait_complete(struct ssam_request_sync *rqst) ++{ ++ wait_for_completion(&rqst->comp); ++} ++ ++ ++/* -- TODO ------------------------------------------------------------------ */ + +enum ssh_ec_state { + SSH_EC_UNINITIALIZED, @@ -5398,126 +9454,47 @@ index 000000000000..988be7c2d286 + SSH_EC_SUSPENDED, +}; + -+struct ssh_counters { -+ u8 seq; // control sequence id -+ u16 rqid; // id for request/response matching -+}; -+ -+struct ssh_writer { -+ u8 *data; -+ u8 *ptr; -+} __packed; -+ -+enum ssh_receiver_state { -+ SSH_RCV_DISCARD, -+ SSH_RCV_CONTROL, -+ SSH_RCV_COMMAND, -+}; -+ -+struct ssh_receiver { -+ spinlock_t lock; -+ enum ssh_receiver_state state; -+ struct completion signal; -+ struct kfifo fifo; -+ struct { -+ bool pld; -+ u8 seq; -+ u16 rqid; -+ } expect; -+ struct { -+ u16 cap; -+ u16 len; -+ u8 *ptr; -+ } eval_buf; -+}; -+ -+struct ssh_event_handler { -+ surface_sam_ssh_event_handler_fn handler; -+ surface_sam_ssh_event_handler_delay delay; -+ void *data; -+}; -+ -+struct ssh_events { -+ spinlock_t lock; -+ struct workqueue_struct *queue_ack; -+ struct workqueue_struct *queue_evt; -+ struct ssh_event_handler handler[SAM_NUM_EVENT_TYPES]; -+}; -+ +struct sam_ssh_ec { -+ struct mutex lock; -+ enum ssh_ec_state state; + struct serdev_device *serdev; -+ struct ssh_counters counter; -+ struct ssh_writer writer; -+ struct ssh_receiver receiver; -+ struct ssh_events events; ++ ++ struct ssh_rtl rtl; ++ struct ssam_cplt cplt; ++ ++ struct { ++ struct ssh_seq_counter seq; ++ struct ssh_rqid_counter rqid; ++ } counter; ++ ++ enum ssh_ec_state state; ++ + int irq; + bool irq_wakeup_enabled; +}; + -+struct ssh_fifo_packet { -+ u8 type; // packet type (ACK/RETRY/CMD) -+ u8 seq; -+ u8 len; -+}; -+ -+struct ssh_event_work { -+ refcount_t refcount; -+ struct sam_ssh_ec *ec; -+ struct work_struct work_ack; -+ struct delayed_work work_evt; -+ struct surface_sam_ssh_event event; -+ u8 seq; -+}; -+ -+ +static struct sam_ssh_ec ssh_ec = { -+ .lock = __MUTEX_INITIALIZER(ssh_ec.lock), + .state = SSH_EC_UNINITIALIZED, + .serdev = NULL, -+ .counter = { -+ .seq = 0, -+ .rqid = 0, -+ }, -+ .writer = { -+ .data = NULL, -+ .ptr = NULL, -+ }, -+ .receiver = { -+ .lock = __SPIN_LOCK_UNLOCKED(), -+ .state = SSH_RCV_DISCARD, -+ .expect = {}, -+ }, -+ .events = { -+ .lock = __SPIN_LOCK_UNLOCKED(), -+ .handler = {}, -+ }, -+ .irq = -1, +}; + + ++/* -- TODO ------------------------------------------------------------------ */ ++ ++#define ssh_dbg(ec, fmt, ...) dev_dbg(&(ec)->serdev->dev, fmt, ##__VA_ARGS__) ++#define ssh_warn(ec, fmt, ...) dev_warn(&(ec)->serdev->dev, fmt, ##__VA_ARGS__) ++#define ssh_err(ec, fmt, ...) dev_err(&(ec)->serdev->dev, fmt, ##__VA_ARGS__) ++ ++ +static inline struct sam_ssh_ec *surface_sam_ssh_acquire(void) +{ -+ struct sam_ssh_ec *ec = &ssh_ec; -+ -+ mutex_lock(&ec->lock); -+ return ec; -+} -+ -+static inline void surface_sam_ssh_release(struct sam_ssh_ec *ec) -+{ -+ mutex_unlock(&ec->lock); ++ return &ssh_ec; +} + +static inline struct sam_ssh_ec *surface_sam_ssh_acquire_init(void) +{ + struct sam_ssh_ec *ec = surface_sam_ssh_acquire(); + -+ if (ec->state == SSH_EC_UNINITIALIZED) { -+ surface_sam_ssh_release(ec); ++ if (smp_load_acquire(&ec->state) == SSH_EC_UNINITIALIZED) + return NULL; -+ } + + return ec; +} @@ -5536,427 +9513,276 @@ index 000000000000..988be7c2d286 + if (!link) + return -EFAULT; + -+ surface_sam_ssh_release(ec); + return 0; +} +EXPORT_SYMBOL_GPL(surface_sam_ssh_consumer_register); + + -+static inline u16 sam_rqid_to_rqst(u16 rqid) -+{ -+ return rqid << SURFACE_SAM_SSH_RQID_EVENT_BITS; -+} ++static int __surface_sam_ssh_rqst(struct sam_ssh_ec *ec, ++ const struct surface_sam_ssh_rqst *rqst, ++ struct surface_sam_ssh_buf *result); + -+static inline bool sam_rqid_is_event(u16 rqid) ++static int surface_sam_ssh_event_enable(struct sam_ssh_ec *ec, ++ struct ssam_event_registry reg, ++ struct ssam_event_id id, ++ u8 flags) +{ -+ const u16 mask = (1 << SURFACE_SAM_SSH_RQID_EVENT_BITS) - 1; ++ struct ssh_notification_params params; ++ struct surface_sam_ssh_rqst rqst; ++ struct surface_sam_ssh_buf result; + -+ return rqid != 0 && (rqid | mask) == mask; -+} -+ -+int surface_sam_ssh_enable_event_source(u8 tc, u8 unknown, u16 rqid) -+{ -+ u8 pld[4] = { tc, unknown, rqid & 0xff, rqid >> 8 }; ++ u16 rqid = ssh_tc_to_rqid(id.target_category); + u8 buf[1] = { 0x00 }; -+ -+ struct surface_sam_ssh_rqst rqst = { -+ .tc = 0x01, -+ .cid = 0x0b, -+ .iid = 0x00, -+ .pri = SURFACE_SAM_PRIORITY_NORMAL, -+ .snc = 0x01, -+ .cdl = 0x04, -+ .pld = pld, -+ }; -+ -+ struct surface_sam_ssh_buf result = { -+ result.cap = ARRAY_SIZE(buf), -+ result.len = 0, -+ result.data = buf, -+ }; -+ + int status; + + // only allow RQIDs that lie within event spectrum -+ if (!sam_rqid_is_event(rqid)) ++ if (!ssh_rqid_is_event(rqid)) + return -EINVAL; + -+ status = surface_sam_ssh_rqst(&rqst, &result); ++ params.target_category = id.target_category; ++ params.instance_id = id.instance; ++ params.flags = flags; ++ put_unaligned_le16(rqid, ¶ms.request_id); ++ ++ rqst.tc = reg.target_category; ++ rqst.cid = reg.cid_enable; ++ rqst.iid = 0x00; ++ rqst.chn = reg.channel; ++ rqst.snc = 0x01; ++ rqst.cdl = sizeof(params); ++ rqst.pld = (u8 *)¶ms; ++ ++ result.cap = ARRAY_SIZE(buf); ++ result.len = 0; ++ result.data = buf; ++ ++ status = __surface_sam_ssh_rqst(ec, &rqst, &result); ++ ++ if (status) { ++ dev_err(&ec->serdev->dev, "failed to enable event source" ++ " (tc: 0x%02x, rqid: 0x%04x)\n", ++ id.target_category, rqid); ++ } + + if (buf[0] != 0x00) { + pr_warn(SSH_RQST_TAG_FULL -+ "unexpected result while enabling event source: 0x%02x\n", -+ buf[0]); ++ "unexpected result while enabling event source: " ++ "0x%02x\n", buf[0]); + } + + return status; + +} -+EXPORT_SYMBOL_GPL(surface_sam_ssh_enable_event_source); + -+int surface_sam_ssh_disable_event_source(u8 tc, u8 unknown, u16 rqid) ++static int surface_sam_ssh_event_disable(struct sam_ssh_ec *ec, ++ struct ssam_event_registry reg, ++ struct ssam_event_id id, ++ u8 flags) +{ -+ u8 pld[4] = { tc, unknown, rqid & 0xff, rqid >> 8 }; ++ struct ssh_notification_params params; ++ struct surface_sam_ssh_rqst rqst; ++ struct surface_sam_ssh_buf result; ++ ++ u16 rqid = ssh_tc_to_rqid(id.target_category); + u8 buf[1] = { 0x00 }; -+ -+ struct surface_sam_ssh_rqst rqst = { -+ .tc = 0x01, -+ .cid = 0x0c, -+ .iid = 0x00, -+ .pri = SURFACE_SAM_PRIORITY_NORMAL, -+ .snc = 0x01, -+ .cdl = 0x04, -+ .pld = pld, -+ }; -+ -+ struct surface_sam_ssh_buf result = { -+ result.cap = ARRAY_SIZE(buf), -+ result.len = 0, -+ result.data = buf, -+ }; -+ + int status; + + // only allow RQIDs that lie within event spectrum -+ if (!sam_rqid_is_event(rqid)) ++ if (!ssh_rqid_is_event(rqid)) + return -EINVAL; + -+ status = surface_sam_ssh_rqst(&rqst, &result); ++ params.target_category = id.target_category; ++ params.instance_id = id.instance; ++ params.flags = flags; ++ put_unaligned_le16(rqid, ¶ms.request_id); ++ ++ rqst.tc = reg.target_category; ++ rqst.cid = reg.cid_disable; ++ rqst.iid = 0x00; ++ rqst.chn = reg.channel; ++ rqst.snc = 0x01; ++ rqst.cdl = sizeof(params); ++ rqst.pld = (u8 *)¶ms; ++ ++ result.cap = ARRAY_SIZE(buf); ++ result.len = 0; ++ result.data = buf; ++ ++ status = __surface_sam_ssh_rqst(ec, &rqst, &result); ++ ++ if (status) { ++ dev_err(&ec->serdev->dev, "failed to disable event source" ++ " (tc: 0x%02x, rqid: 0x%04x)\n", ++ id.target_category, rqid); ++ } + + if (buf[0] != 0x00) { -+ pr_warn(SSH_RQST_TAG_FULL -+ "unexpected result while disabling event source: 0x%02x\n", -+ buf[0]); ++ dev_warn(&ec->serdev->dev, ++ "unexpected result while disabling event source: " ++ "0x%02x\n", buf[0]); + } + + return status; +} -+EXPORT_SYMBOL_GPL(surface_sam_ssh_disable_event_source); + -+static unsigned long sam_event_default_delay(struct surface_sam_ssh_event *event, void *data) -+{ -+ return event->pri == SURFACE_SAM_PRIORITY_HIGH ? SURFACE_SAM_SSH_EVENT_IMMEDIATE : 0; -+} + -+int surface_sam_ssh_set_delayed_event_handler( -+ u16 rqid, surface_sam_ssh_event_handler_fn fn, -+ surface_sam_ssh_event_handler_delay delay, -+ void *data) ++int surface_sam_ssh_notifier_register(struct ssam_event_notifier *n) +{ ++ struct ssam_nf_head *nf_head; + struct sam_ssh_ec *ec; -+ unsigned long flags; ++ struct ssam_nf *nf; ++ u16 event = ssh_tc_to_event(n->event.id.target_category); ++ u16 rqid = ssh_event_to_rqid(event); ++ int rc, status; + -+ if (!sam_rqid_is_event(rqid)) ++ if (!ssh_rqid_is_event(rqid)) + return -EINVAL; + + ec = surface_sam_ssh_acquire_init(); + if (!ec) + return -ENXIO; + -+ if (!delay) -+ delay = sam_event_default_delay; ++ nf = &ec->cplt.event.notif; ++ nf_head = &nf->head[event]; + -+ spin_lock_irqsave(&ec->events.lock, flags); -+ // check if we already have a handler -+ if (ec->events.handler[rqid - 1].handler) { -+ spin_unlock_irqrestore(&ec->events.lock, flags); -+ return -EINVAL; ++ mutex_lock(&nf->lock); ++ ++ rc = ssam_nf_refcount_inc(nf, n->event.reg, n->event.id); ++ if (rc < 0) { ++ mutex_unlock(&nf->lock); ++ return rc; + } + -+ // 0 is not a valid event RQID -+ ec->events.handler[rqid - 1].handler = fn; -+ ec->events.handler[rqid - 1].delay = delay; -+ ec->events.handler[rqid - 1].data = data; ++ ssh_dbg(ec, "enabling event (tc: 0x%02x, rc: %d)\n", rqid, rc); + -+ spin_unlock_irqrestore(&ec->events.lock, flags); -+ surface_sam_ssh_release(ec); ++ status = __ssam_nfblk_insert(nf_head, &n->base); ++ if (status) { ++ ssam_nf_refcount_dec(nf, n->event.reg, n->event.id); ++ mutex_unlock(&nf->lock); ++ return status; ++ } + ++ if (rc == 1) { ++ status = surface_sam_ssh_event_enable(ec, n->event.reg, n->event.id, n->event.flags); ++ if (status) { ++ __ssam_nfblk_remove(nf_head, &n->base); ++ ssam_nf_refcount_dec(nf, n->event.reg, n->event.id); ++ mutex_unlock(&nf->lock); ++ return status; ++ } ++ } ++ ++ mutex_unlock(&nf->lock); + return 0; +} -+EXPORT_SYMBOL_GPL(surface_sam_ssh_set_delayed_event_handler); ++EXPORT_SYMBOL_GPL(surface_sam_ssh_notifier_register); + -+int surface_sam_ssh_remove_event_handler(u16 rqid) ++int surface_sam_ssh_notifier_unregister(struct ssam_event_notifier *n) +{ ++ struct ssam_nf_head *nf_head; + struct sam_ssh_ec *ec; -+ unsigned long flags; ++ struct ssam_nf *nf; ++ u16 event = ssh_tc_to_event(n->event.id.target_category); ++ u16 rqid = ssh_event_to_rqid(event); ++ int rc, status = 0; + -+ if (!sam_rqid_is_event(rqid)) ++ if (!ssh_rqid_is_event(rqid)) + return -EINVAL; + + ec = surface_sam_ssh_acquire_init(); + if (!ec) + return -ENXIO; + -+ spin_lock_irqsave(&ec->events.lock, flags); ++ nf = &ec->cplt.event.notif; ++ nf_head = &nf->head[event]; + -+ // 0 is not a valid event RQID -+ ec->events.handler[rqid - 1].handler = NULL; -+ ec->events.handler[rqid - 1].delay = NULL; -+ ec->events.handler[rqid - 1].data = NULL; ++ mutex_lock(&nf->lock); + -+ spin_unlock_irqrestore(&ec->events.lock, flags); -+ surface_sam_ssh_release(ec); ++ rc = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id); ++ if (rc < 0) { ++ mutex_unlock(&nf->lock); ++ return rc; ++ } + -+ /* -+ * Make sure that the handler is not in use any more after we've -+ * removed it. -+ */ -+ flush_workqueue(ec->events.queue_evt); ++ ssh_dbg(ec, "disabling event (tc: 0x%02x, rc: %d)\n", rqid, rc); + -+ return 0; ++ if (rc == 0) ++ status = surface_sam_ssh_event_disable(ec, n->event.reg, n->event.id, n->event.flags); ++ ++ __ssam_nfblk_remove(nf_head, &n->base); ++ mutex_unlock(&nf->lock); ++ synchronize_srcu(&nf_head->srcu); ++ ++ return status; +} -+EXPORT_SYMBOL_GPL(surface_sam_ssh_remove_event_handler); ++EXPORT_SYMBOL_GPL(surface_sam_ssh_notifier_unregister); + + -+static inline u16 ssh_crc(const u8 *buf, size_t size) ++static int __surface_sam_ssh_rqst(struct sam_ssh_ec *ec, ++ const struct surface_sam_ssh_rqst *rqst, ++ struct surface_sam_ssh_buf *result) +{ -+ return crc_ccitt_false(0xffff, buf, size); -+} -+ -+static inline void ssh_write_u16(struct ssh_writer *writer, u16 in) -+{ -+ put_unaligned_le16(in, writer->ptr); -+ writer->ptr += 2; -+} -+ -+static inline void ssh_write_crc(struct ssh_writer *writer, -+ const u8 *buf, size_t size) -+{ -+ ssh_write_u16(writer, ssh_crc(buf, size)); -+} -+ -+static inline void ssh_write_syn(struct ssh_writer *writer) -+{ -+ u8 *w = writer->ptr; -+ -+ *w++ = 0xaa; -+ *w++ = 0x55; -+ -+ writer->ptr = w; -+} -+ -+static inline void ssh_write_ter(struct ssh_writer *writer) -+{ -+ u8 *w = writer->ptr; -+ -+ *w++ = 0xff; -+ *w++ = 0xff; -+ -+ writer->ptr = w; -+} -+ -+static inline void ssh_write_buf(struct ssh_writer *writer, -+ u8 *in, size_t len) -+{ -+ writer->ptr = memcpy(writer->ptr, in, len) + len; -+} -+ -+static inline void ssh_write_hdr(struct ssh_writer *writer, -+ const struct surface_sam_ssh_rqst *rqst, -+ struct sam_ssh_ec *ec) -+{ -+ struct ssh_frame_ctrl *hdr = (struct ssh_frame_ctrl *)writer->ptr; -+ u8 *begin = writer->ptr; -+ -+ hdr->type = SSH_FRAME_TYPE_CMD; -+ hdr->len = SSH_BYTELEN_CMDFRAME + rqst->cdl; // without CRC -+ hdr->pad = 0x00; -+ hdr->seq = ec->counter.seq; -+ -+ writer->ptr += sizeof(*hdr); -+ -+ ssh_write_crc(writer, begin, writer->ptr - begin); -+} -+ -+static inline void ssh_write_cmd(struct ssh_writer *writer, -+ const struct surface_sam_ssh_rqst *rqst, -+ struct sam_ssh_ec *ec) -+{ -+ struct ssh_frame_cmd *cmd = (struct ssh_frame_cmd *)writer->ptr; -+ u8 *begin = writer->ptr; -+ -+ u16 rqid = sam_rqid_to_rqst(ec->counter.rqid); -+ u8 rqid_lo = rqid & 0xFF; -+ u8 rqid_hi = rqid >> 8; -+ -+ cmd->type = SSH_FRAME_TYPE_CMD; -+ cmd->tc = rqst->tc; -+ cmd->pri_out = rqst->pri; -+ cmd->pri_in = 0x00; -+ cmd->iid = rqst->iid; -+ cmd->rqid_lo = rqid_lo; -+ cmd->rqid_hi = rqid_hi; -+ cmd->cid = rqst->cid; -+ -+ writer->ptr += sizeof(*cmd); -+ -+ ssh_write_buf(writer, rqst->pld, rqst->cdl); -+ ssh_write_crc(writer, begin, writer->ptr - begin); -+} -+ -+static inline void ssh_write_ack(struct ssh_writer *writer, u8 seq) -+{ -+ struct ssh_frame_ctrl *ack = (struct ssh_frame_ctrl *)writer->ptr; -+ u8 *begin = writer->ptr; -+ -+ ack->type = SSH_FRAME_TYPE_ACK; -+ ack->len = 0x00; -+ ack->pad = 0x00; -+ ack->seq = seq; -+ -+ writer->ptr += sizeof(*ack); -+ -+ ssh_write_crc(writer, begin, writer->ptr - begin); -+} -+ -+static inline void ssh_writer_reset(struct ssh_writer *writer) -+{ -+ writer->ptr = writer->data; -+} -+ -+static inline int ssh_writer_flush(struct sam_ssh_ec *ec) -+{ -+ struct ssh_writer *writer = &ec->writer; -+ struct serdev_device *serdev = ec->serdev; ++ struct ssam_request_sync actual; ++ struct msgbuf msgb; ++ size_t msglen = SSH_COMMAND_MESSAGE_LENGTH(rqst->cdl); ++ unsigned flags = 0; ++ u16 rqid; ++ u8 seq; + int status; + -+ size_t len = writer->ptr - writer->data; -+ -+ dev_dbg(&ec->serdev->dev, "sending message\n"); -+ print_hex_dump_debug("send: ", DUMP_PREFIX_OFFSET, 16, 1, -+ writer->data, writer->ptr - writer->data, false); -+ -+ status = serdev_device_write(serdev, writer->data, len, SSH_WRITE_TIMEOUT); -+ return status >= 0 ? 0 : status; -+} -+ -+static inline void ssh_write_msg_cmd(struct sam_ssh_ec *ec, -+ const struct surface_sam_ssh_rqst *rqst) -+{ -+ ssh_writer_reset(&ec->writer); -+ ssh_write_syn(&ec->writer); -+ ssh_write_hdr(&ec->writer, rqst, ec); -+ ssh_write_cmd(&ec->writer, rqst, ec); -+} -+ -+static inline void ssh_write_msg_ack(struct sam_ssh_ec *ec, u8 seq) -+{ -+ ssh_writer_reset(&ec->writer); -+ ssh_write_syn(&ec->writer); -+ ssh_write_ack(&ec->writer, seq); -+ ssh_write_ter(&ec->writer); -+} -+ -+static inline void ssh_receiver_restart(struct sam_ssh_ec *ec, -+ const struct surface_sam_ssh_rqst *rqst) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&ec->receiver.lock, flags); -+ reinit_completion(&ec->receiver.signal); -+ ec->receiver.state = SSH_RCV_CONTROL; -+ ec->receiver.expect.pld = rqst->snc; -+ ec->receiver.expect.seq = ec->counter.seq; -+ ec->receiver.expect.rqid = sam_rqid_to_rqst(ec->counter.rqid); -+ ec->receiver.eval_buf.len = 0; -+ spin_unlock_irqrestore(&ec->receiver.lock, flags); -+} -+ -+static inline void ssh_receiver_discard(struct sam_ssh_ec *ec) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&ec->receiver.lock, flags); -+ ec->receiver.state = SSH_RCV_DISCARD; -+ ec->receiver.eval_buf.len = 0; -+ kfifo_reset(&ec->receiver.fifo); -+ spin_unlock_irqrestore(&ec->receiver.lock, flags); -+} -+ -+static int surface_sam_ssh_rqst_unlocked(struct sam_ssh_ec *ec, -+ const struct surface_sam_ssh_rqst *rqst, -+ struct surface_sam_ssh_buf *result) -+{ -+ struct device *dev = &ec->serdev->dev; -+ struct ssh_fifo_packet packet = {}; -+ int status; -+ int try; -+ unsigned int rem; -+ -+ if (rqst->cdl > SURFACE_SAM_SSH_MAX_RQST_PAYLOAD) { -+ dev_err(dev, SSH_RQST_TAG "request payload too large\n"); ++ // prevent overflow ++ if (rqst->cdl > SSH_COMMAND_MAX_PAYLOAD_SIZE) { ++ ssh_err(ec, SSH_RQST_TAG "request payload too large\n"); + return -EINVAL; + } + -+ // write command in buffer, we may need it multiple times -+ ssh_write_msg_cmd(ec, rqst); -+ ssh_receiver_restart(ec, rqst); ++ if (result && result->data && rqst->snc) ++ flags |= SSAM_REQUEST_HAS_RESPONSE; + -+ // send command, try to get an ack response -+ for (try = 0; try < SSH_NUM_RETRY; try++) { -+ status = ssh_writer_flush(ec); -+ if (status) -+ goto out; ++ ssh_request_init(&actual.base, flags, &ssam_request_sync_ops); ++ init_completion(&actual.comp); + -+ rem = wait_for_completion_timeout(&ec->receiver.signal, SSH_READ_TIMEOUT); -+ if (rem) { -+ // completion assures valid packet, thus ignore returned length -+ (void) !kfifo_out(&ec->receiver.fifo, &packet, sizeof(packet)); ++ actual.resp.pointer = NULL; ++ actual.resp.capacity = 0; ++ actual.resp.length = 0; ++ actual.resp.status = 0; + -+ if (packet.type == SSH_FRAME_TYPE_ACK) -+ break; -+ } ++ if (result) { ++ actual.resp.pointer = result->data; ++ actual.resp.capacity = result->cap; + } + -+ // check if we ran out of tries? -+ if (try >= SSH_NUM_RETRY) { -+ dev_err(dev, SSH_RQST_TAG "communication failed %d times, giving up\n", try); -+ status = -EIO; -+ goto out; ++ // alloc and create message ++ status = msgb_alloc(&msgb, msglen, GFP_KERNEL); ++ if (status) ++ return status; ++ ++ seq = ssh_seq_next(&ec->counter.seq); ++ rqid = ssh_rqid_next(&ec->counter.rqid); ++ msgb_push_cmd(&msgb, seq, rqst, rqid); ++ ++ actual.base.packet.data = msgb.buffer; ++ actual.base.packet.data_length = msgb.ptr - msgb.buffer; ++ ++ status = ssh_rtl_submit(&ec->rtl, &actual.base); ++ if (status) { ++ msgb_free(&msgb); ++ return status; + } + -+ ec->counter.seq += 1; -+ ec->counter.rqid += 1; ++ ssh_request_put(&actual.base); ++ ssam_request_sync_wait_complete(&actual); ++ msgb_free(&msgb); + -+ // get command response/payload -+ if (rqst->snc && result) { -+ rem = wait_for_completion_timeout(&ec->receiver.signal, SSH_READ_TIMEOUT); -+ if (rem) { -+ // completion assures valid packet, thus ignore returned length -+ (void) !kfifo_out(&ec->receiver.fifo, &packet, sizeof(packet)); ++ if (result) ++ result->len = actual.resp.length; + -+ if (result->cap < packet.len) { -+ status = -EINVAL; -+ goto out; -+ } -+ -+ // completion assures valid packet, thus ignore returned length -+ (void) !kfifo_out(&ec->receiver.fifo, result->data, packet.len); -+ result->len = packet.len; -+ } else { -+ dev_err(dev, SSH_RQST_TAG "communication timed out\n"); -+ status = -EIO; -+ goto out; -+ } -+ -+ // send ACK -+ if (packet.type == SSH_FRAME_TYPE_CMD) { -+ ssh_write_msg_ack(ec, packet.seq); -+ status = ssh_writer_flush(ec); -+ if (status) -+ goto out; -+ } -+ } -+ -+out: -+ ssh_receiver_discard(ec); -+ return status; ++ return actual.resp.status; +} + +int surface_sam_ssh_rqst(const struct surface_sam_ssh_rqst *rqst, struct surface_sam_ssh_buf *result) +{ + struct sam_ssh_ec *ec; -+ int status; + + ec = surface_sam_ssh_acquire_init(); + if (!ec) { @@ -5964,30 +9790,38 @@ index 000000000000..988be7c2d286 + return -ENXIO; + } + -+ if (ec->state == SSH_EC_SUSPENDED) { -+ dev_warn(&ec->serdev->dev, SSH_RQST_TAG "embedded controller is suspended\n"); -+ -+ surface_sam_ssh_release(ec); ++ if (smp_load_acquire(&ec->state) == SSH_EC_SUSPENDED) { ++ ssh_warn(ec, SSH_RQST_TAG "embedded controller is suspended\n"); + return -EPERM; + } + -+ status = surface_sam_ssh_rqst_unlocked(ec, rqst, result); -+ -+ surface_sam_ssh_release(ec); -+ return status; ++ return __surface_sam_ssh_rqst(ec, rqst, result); +} +EXPORT_SYMBOL_GPL(surface_sam_ssh_rqst); + + ++/** ++ * surface_sam_ssh_ec_resume - Resume the EC if it is in a suspended mode. ++ * @ec: the EC to resume ++ * ++ * Moves the EC from a suspended state to a normal state. See the ++ * `surface_sam_ssh_ec_suspend` function what the specific differences of ++ * these states are. Multiple repeated calls to this function seem to be ++ * handled fine by the EC, after the first call, the state will remain ++ * "normal". ++ * ++ * Must be called with the EC initialized and its lock held. ++ */ +static int surface_sam_ssh_ec_resume(struct sam_ssh_ec *ec) +{ + u8 buf[1] = { 0x00 }; ++ int status; + + struct surface_sam_ssh_rqst rqst = { + .tc = 0x01, + .cid = 0x16, + .iid = 0x00, -+ .pri = SURFACE_SAM_PRIORITY_NORMAL, ++ .chn = 0x01, + .snc = 0x01, + .cdl = 0x00, + .pld = NULL, @@ -5999,30 +9833,49 @@ index 000000000000..988be7c2d286 + result.data = buf, + }; + -+ int status; -+ -+ status = surface_sam_ssh_rqst_unlocked(ec, &rqst, &result); ++ ssh_dbg(ec, "pm: resuming system aggregator module\n"); ++ status = __surface_sam_ssh_rqst(ec, &rqst, &result); + if (status) + return status; + ++ /* ++ * The purpose of the return value of this request is unknown. Based on ++ * logging and experience, we expect it to be zero. No other value has ++ * been observed so far. ++ */ + if (buf[0] != 0x00) { -+ dev_warn(&ec->serdev->dev, -+ "unexpected result while trying to resume EC: 0x%02x\n", -+ buf[0]); ++ ssh_warn(ec, "unexpected result while trying to resume EC: " ++ "0x%02x\n", buf[0]); + } + + return 0; +} + ++/** ++ * surface_sam_ssh_ec_suspend - Put the EC in a suspended mode: ++ * @ec: the EC to suspend ++ * ++ * Tells the EC to enter a suspended mode. In this mode, events are quiesced ++ * and the wake IRQ is armed (note that the wake IRQ does not fire if the EC ++ * has not been suspended via this request). On some devices, the keyboard ++ * backlight is turned off. Apart from this, the EC seems to continue to work ++ * as normal, meaning requests sent to it are acknowledged and seem to be ++ * correctly handled, including potential responses. Multiple repeated calls ++ * to this function seem to be handled fine by the EC, after the first call, ++ * the state will remain "suspended". ++ * ++ * Must be called with the EC initialized and its lock held. ++ */ +static int surface_sam_ssh_ec_suspend(struct sam_ssh_ec *ec) +{ + u8 buf[1] = { 0x00 }; ++ int status; + + struct surface_sam_ssh_rqst rqst = { + .tc = 0x01, + .cid = 0x15, + .iid = 0x00, -+ .pri = SURFACE_SAM_PRIORITY_NORMAL, ++ .chn = 0x01, + .snc = 0x01, + .cdl = 0x00, + .pld = NULL, @@ -6034,458 +9887,289 @@ index 000000000000..988be7c2d286 + result.data = buf, + }; + -+ int status; -+ -+ status = surface_sam_ssh_rqst_unlocked(ec, &rqst, &result); ++ ssh_dbg(ec, "pm: suspending system aggregator module\n"); ++ status = __surface_sam_ssh_rqst(ec, &rqst, &result); + if (status) + return status; + ++ /* ++ * The purpose of the return value of this request is unknown. Based on ++ * logging and experience, we expect it to be zero. No other value has ++ * been observed so far. ++ */ + if (buf[0] != 0x00) { -+ dev_warn(&ec->serdev->dev, -+ "unexpected result while trying to suspend EC: 0x%02x\n", -+ buf[0]); ++ ssh_warn(ec, "unexpected result while trying to suspend EC: " ++ "0x%02x\n", buf[0]); + } + + return 0; +} + + -+static inline bool ssh_is_valid_syn(const u8 *ptr) ++static int surface_sam_ssh_get_controller_version(struct sam_ssh_ec *ec, u32 *version) +{ -+ return ptr[0] == 0xaa && ptr[1] == 0x55; ++ struct surface_sam_ssh_rqst rqst = { ++ .tc = 0x01, ++ .cid = 0x13, ++ .iid = 0x00, ++ .chn = 0x01, ++ .snc = 0x01, ++ .cdl = 0x00, ++ .pld = NULL, ++ }; ++ ++ struct surface_sam_ssh_buf result = { ++ result.cap = sizeof(*version), ++ result.len = 0, ++ result.data = (u8 *)version, ++ }; ++ ++ *version = 0; ++ return __surface_sam_ssh_rqst(ec, &rqst, &result); +} + -+static inline bool ssh_is_valid_ter(const u8 *ptr) ++static int surface_sam_ssh_log_controller_version(struct sam_ssh_ec *ec) +{ -+ return ptr[0] == 0xff && ptr[1] == 0xff; -+} -+ -+static inline bool ssh_is_valid_crc(const u8 *begin, const u8 *end) -+{ -+ u16 crc; -+ -+ crc = ssh_crc(begin, end - begin); -+ return (end[0] == (crc & 0xff)) && (end[1] == (crc >> 8)); -+} -+ -+ -+static int surface_sam_ssh_send_ack(struct sam_ssh_ec *ec, u8 seq) -+{ -+ int status; -+ u8 buf[SSH_MSG_LEN_CTRL]; -+ u16 crc; -+ -+ buf[0] = 0xaa; -+ buf[1] = 0x55; -+ buf[2] = 0x40; -+ buf[3] = 0x00; -+ buf[4] = 0x00; -+ buf[5] = seq; -+ -+ crc = ssh_crc(buf + SSH_FRAME_OFFS_CTRL, SSH_BYTELEN_CTRL); -+ buf[6] = crc & 0xff; -+ buf[7] = crc >> 8; -+ -+ buf[8] = 0xff; -+ buf[9] = 0xff; -+ -+ dev_dbg(&ec->serdev->dev, "sending message\n"); -+ print_hex_dump_debug("send: ", DUMP_PREFIX_OFFSET, 16, 1, -+ buf, SSH_MSG_LEN_CTRL, false); -+ -+ status = serdev_device_write(ec->serdev, buf, SSH_MSG_LEN_CTRL, SSH_WRITE_TIMEOUT); -+ return status >= 0 ? 0 : status; -+} -+ -+static void surface_sam_ssh_event_work_ack_handler(struct work_struct *_work) -+{ -+ struct surface_sam_ssh_event *event; -+ struct ssh_event_work *work; -+ struct sam_ssh_ec *ec; -+ struct device *dev; ++ u32 version, a, b, c; + int status; + -+ work = container_of(_work, struct ssh_event_work, work_ack); -+ event = &work->event; -+ ec = work->ec; -+ dev = &ec->serdev->dev; ++ status = surface_sam_ssh_get_controller_version(ec, &version); ++ if (status) ++ return status; + -+ /* make sure we load a fresh ec state */ -+ smp_mb(); ++ a = (version >> 24) & 0xff; ++ b = le16_to_cpu((version >> 8) & 0xffff); ++ c = version & 0xff; + -+ if (ec->state == SSH_EC_INITIALIZED) { -+ status = surface_sam_ssh_send_ack(ec, work->seq); -+ if (status) -+ dev_err(dev, SSH_EVENT_TAG "failed to send ACK: %d\n", status); -+ } -+ -+ if (refcount_dec_and_test(&work->refcount)) -+ kfree(work); ++ dev_info(&ec->serdev->dev, "SAM controller version: %u.%u.%u\n", ++ a, b, c); ++ return 0; +} + -+static void surface_sam_ssh_event_work_evt_handler(struct work_struct *_work) ++ ++static const struct acpi_gpio_params gpio_ssh_wakeup_int = { 0, 0, false }; ++static const struct acpi_gpio_params gpio_ssh_wakeup = { 1, 0, false }; ++ ++static const struct acpi_gpio_mapping ssh_acpi_gpios[] = { ++ { "ssh_wakeup-int-gpio", &gpio_ssh_wakeup_int, 1 }, ++ { "ssh_wakeup-gpio", &gpio_ssh_wakeup, 1 }, ++ { }, ++}; ++ ++static irqreturn_t ssh_wake_irq_handler(int irq, void *dev_id) +{ -+ struct delayed_work *dwork = (struct delayed_work *)_work; -+ struct ssh_event_work *work; -+ struct surface_sam_ssh_event *event; -+ struct sam_ssh_ec *ec; -+ struct device *dev; -+ unsigned long flags; ++ struct serdev_device *serdev = dev_id; + -+ surface_sam_ssh_event_handler_fn handler; -+ void *handler_data; ++ dev_dbg(&serdev->dev, "pm: wake irq triggered\n"); + ++ // TODO: Send GPIO callback command repeatedly to EC until callback ++ // returns 0x00. Return flag of callback is "has more events". ++ // Each time the command is sent, one event is "released". Once ++ // all events have been released (return = 0x00), the GPIO is ++ // re-armed. ++ ++ return IRQ_HANDLED; ++} ++ ++static int ssh_setup_irq(struct serdev_device *serdev) ++{ ++ const int irqf = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING; ++ struct gpio_desc *gpiod; ++ int irq; ++ int status; ++ ++ gpiod = gpiod_get(&serdev->dev, "ssh_wakeup-int", GPIOD_ASIS); ++ if (IS_ERR(gpiod)) ++ return PTR_ERR(gpiod); ++ ++ irq = gpiod_to_irq(gpiod); ++ gpiod_put(gpiod); ++ ++ if (irq < 0) ++ return irq; ++ ++ status = request_threaded_irq(irq, NULL, ssh_wake_irq_handler, ++ irqf, "surface_sam_sh_wakeup", serdev); ++ if (status) ++ return status; ++ ++ return irq; ++} ++ ++ ++static acpi_status ssh_setup_from_resource(struct acpi_resource *rsc, void *ctx) ++{ ++ struct serdev_device *serdev = ctx; ++ struct acpi_resource_common_serialbus *serial; ++ struct acpi_resource_uart_serialbus *uart; ++ bool flow_control; + int status = 0; + -+ work = container_of(dwork, struct ssh_event_work, work_evt); -+ event = &work->event; -+ ec = work->ec; -+ dev = &ec->serdev->dev; ++ if (rsc->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) ++ return AE_OK; + -+ spin_lock_irqsave(&ec->events.lock, flags); -+ handler = ec->events.handler[event->rqid - 1].handler; -+ handler_data = ec->events.handler[event->rqid - 1].data; -+ spin_unlock_irqrestore(&ec->events.lock, flags); ++ serial = &rsc->data.common_serial_bus; ++ if (serial->type != ACPI_RESOURCE_SERIAL_TYPE_UART) ++ return AE_OK; + -+ /* -+ * During handler removal or driver release, we ensure every event gets -+ * handled before return of that function. Thus a handler obtained here is -+ * guaranteed to be valid at least until this function returns. -+ */ ++ uart = &rsc->data.uart_serial_bus; + -+ if (handler) -+ status = handler(event, handler_data); -+ else -+ dev_warn(dev, SSH_EVENT_TAG "unhandled event (rqid: %04x)\n", event->rqid); ++ // set up serdev device ++ serdev_device_set_baudrate(serdev, uart->default_baud_rate); + -+ if (status) -+ dev_err(dev, SSH_EVENT_TAG "error handling event: %d\n", status); ++ // serdev currently only supports RTSCTS flow control ++ if (uart->flow_control & SSH_SUPPORTED_FLOW_CONTROL_MASK) { ++ dev_warn(&serdev->dev, "setup: unsupported flow control" ++ " (value: 0x%02x)\n", uart->flow_control); ++ } + -+ if (refcount_dec_and_test(&work->refcount)) -+ kfree(work); ++ // set RTSCTS flow control ++ flow_control = uart->flow_control & ACPI_UART_FLOW_CONTROL_HW; ++ serdev_device_set_flow_control(serdev, flow_control); ++ ++ // serdev currently only supports EVEN/ODD parity ++ switch (uart->parity) { ++ case ACPI_UART_PARITY_NONE: ++ status = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE); ++ break; ++ case ACPI_UART_PARITY_EVEN: ++ status = serdev_device_set_parity(serdev, SERDEV_PARITY_EVEN); ++ break; ++ case ACPI_UART_PARITY_ODD: ++ status = serdev_device_set_parity(serdev, SERDEV_PARITY_ODD); ++ break; ++ default: ++ dev_warn(&serdev->dev, "setup: unsupported parity" ++ " (value: 0x%02x)\n", uart->parity); ++ break; ++ } ++ ++ if (status) { ++ dev_err(&serdev->dev, "setup: failed to set parity" ++ " (value: 0x%02x)\n", uart->parity); ++ return status; ++ } ++ ++ return AE_CTRL_TERMINATE; // we've found the resource and are done +} + -+static void ssh_handle_event(struct sam_ssh_ec *ec, const u8 *buf) ++ ++static int surface_sam_ssh_suspend(struct device *dev) +{ -+ const struct ssh_frame_ctrl *ctrl; -+ const struct ssh_frame_cmd *cmd; -+ struct ssh_event_work *work; -+ unsigned long flags; -+ u16 pld_len; ++ struct sam_ssh_ec *ec; ++ int status; + -+ surface_sam_ssh_event_handler_delay delay_fn; -+ void *handler_data; -+ unsigned long delay; ++ dev_dbg(dev, "pm: suspending\n"); + -+ ctrl = (const struct ssh_frame_ctrl *)(buf + SSH_FRAME_OFFS_CTRL); -+ cmd = (const struct ssh_frame_cmd *)(buf + SSH_FRAME_OFFS_CMD); ++ ec = surface_sam_ssh_acquire_init(); ++ if (ec) { ++ status = surface_sam_ssh_ec_suspend(ec); ++ if (status) ++ return status; + -+ pld_len = ctrl->len - SSH_BYTELEN_CMDFRAME; ++ if (device_may_wakeup(dev)) { ++ status = enable_irq_wake(ec->irq); ++ if (status) ++ return status; + -+ work = kzalloc(sizeof(struct ssh_event_work) + pld_len, GFP_ATOMIC); -+ if (!work) ++ ec->irq_wakeup_enabled = true; ++ } else { ++ ec->irq_wakeup_enabled = false; ++ } ++ ++ smp_store_release(&ec->state, SSH_EC_SUSPENDED); ++ } ++ ++ return 0; ++} ++ ++static int surface_sam_ssh_resume(struct device *dev) ++{ ++ struct sam_ssh_ec *ec; ++ int status; ++ ++ dev_dbg(dev, "pm: resuming\n"); ++ ++ ec = surface_sam_ssh_acquire_init(); ++ if (ec) { ++ smp_store_release(&ec->state, SSH_EC_INITIALIZED); ++ ++ if (ec->irq_wakeup_enabled) { ++ status = disable_irq_wake(ec->irq); ++ if (status) ++ return status; ++ ++ ec->irq_wakeup_enabled = false; ++ } ++ ++ status = surface_sam_ssh_ec_resume(ec); ++ if (status) ++ return status; ++ } ++ ++ return 0; ++} ++ ++static SIMPLE_DEV_PM_OPS(surface_sam_ssh_pm_ops, surface_sam_ssh_suspend, ++ surface_sam_ssh_resume); ++ ++ ++static void ssam_handle_event(struct ssh_rtl *rtl, ++ const struct ssh_command *cmd, ++ const struct sshp_span *data) ++{ ++ struct sam_ssh_ec *ec = container_of(rtl, struct sam_ssh_ec, rtl); ++ struct ssam_event_item *item; ++ ++ item = kzalloc(sizeof(struct ssam_event_item) + data->len, GFP_KERNEL); ++ if (!item) + return; + -+ refcount_set(&work->refcount, 1); -+ work->ec = ec; -+ work->seq = ctrl->seq; -+ work->event.rqid = (cmd->rqid_hi << 8) | cmd->rqid_lo; -+ work->event.tc = cmd->tc; -+ work->event.cid = cmd->cid; -+ work->event.iid = cmd->iid; -+ work->event.pri = cmd->pri_in; -+ work->event.len = pld_len; -+ work->event.pld = ((u8 *)work) + sizeof(struct ssh_event_work); ++ item->rqid = get_unaligned_le16(&cmd->rqid); ++ item->event.target_category = cmd->tc; ++ item->event.command_id = cmd->cid; ++ item->event.instance_id = cmd->iid; ++ item->event.channel = cmd->chn_in; ++ item->event.length = data->len; ++ memcpy(&item->event.data[0], data->ptr, data->len); + -+ memcpy(work->event.pld, buf + SSH_FRAME_OFFS_CMD_PLD, pld_len); -+ -+ // queue ACK for if required -+ if (ctrl->type == SSH_FRAME_TYPE_CMD) { -+ refcount_set(&work->refcount, 2); -+ INIT_WORK(&work->work_ack, surface_sam_ssh_event_work_ack_handler); -+ queue_work(ec->events.queue_ack, &work->work_ack); -+ } -+ -+ spin_lock_irqsave(&ec->events.lock, flags); -+ handler_data = ec->events.handler[work->event.rqid - 1].data; -+ delay_fn = ec->events.handler[work->event.rqid - 1].delay; -+ -+ /* Note: -+ * We need to check delay_fn here: This may have never been set as we -+ * can't guarantee that events only occur when they have been enabled. -+ */ -+ delay = delay_fn ? delay_fn(&work->event, handler_data) : 0; -+ spin_unlock_irqrestore(&ec->events.lock, flags); -+ -+ // immediate execution for high priority events (e.g. keyboard) -+ if (delay == SURFACE_SAM_SSH_EVENT_IMMEDIATE) { -+ surface_sam_ssh_event_work_evt_handler(&work->work_evt.work); -+ } else { -+ INIT_DELAYED_WORK(&work->work_evt, surface_sam_ssh_event_work_evt_handler); -+ queue_delayed_work(ec->events.queue_evt, &work->work_evt, delay); -+ } ++ ssam_cplt_submit_event(&ec->cplt, item); +} + -+static int ssh_receive_msg_ctrl(struct sam_ssh_ec *ec, const u8 *buf, size_t size) ++static struct ssh_rtl_ops ssam_rtl_ops = { ++ .handle_event = ssam_handle_event, ++}; ++ ++ ++static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf, size_t n) +{ -+ struct device *dev = &ec->serdev->dev; -+ struct ssh_receiver *rcv = &ec->receiver; -+ const struct ssh_frame_ctrl *ctrl; -+ struct ssh_fifo_packet packet; -+ -+ const u8 *ctrl_begin = buf + SSH_FRAME_OFFS_CTRL; -+ const u8 *ctrl_end = buf + SSH_FRAME_OFFS_CTRL_CRC; -+ -+ ctrl = (const struct ssh_frame_ctrl *)(ctrl_begin); -+ -+ // actual length check -+ if (size < SSH_MSG_LEN_CTRL) -+ return 0; // need more bytes -+ -+ // validate TERM -+ if (!ssh_is_valid_ter(buf + SSH_FRAME_OFFS_TERM)) { -+ dev_err(dev, SSH_RECV_TAG "invalid end of message\n"); -+ return size; // discard everything -+ } -+ -+ // validate CRC -+ if (!ssh_is_valid_crc(ctrl_begin, ctrl_end)) { -+ dev_err(dev, SSH_RECV_TAG "invalid checksum (ctrl)\n"); -+ return SSH_MSG_LEN_CTRL; // only discard message -+ } -+ -+ // check if we expect the message -+ if (rcv->state != SSH_RCV_CONTROL) { -+ dev_err(dev, SSH_RECV_TAG "discarding message: ctrl not expected\n"); -+ return SSH_MSG_LEN_CTRL; // discard message -+ } -+ -+ // check if it is for our request -+ if (ctrl->type == SSH_FRAME_TYPE_ACK && ctrl->seq != rcv->expect.seq) { -+ dev_err(dev, SSH_RECV_TAG "discarding message: ack does not match\n"); -+ return SSH_MSG_LEN_CTRL; // discard message -+ } -+ -+ // we now have a valid & expected ACK/RETRY message -+ dev_dbg(dev, SSH_RECV_TAG "valid control message received (type: 0x%02x)\n", ctrl->type); -+ -+ packet.type = ctrl->type; -+ packet.seq = ctrl->seq; -+ packet.len = 0; -+ -+ if (kfifo_avail(&rcv->fifo) >= sizeof(packet)) { -+ kfifo_in(&rcv->fifo, (u8 *) &packet, sizeof(packet)); -+ -+ } else { -+ dev_warn(dev, SSH_RECV_TAG -+ "dropping frame: not enough space in fifo (type = %d)\n", -+ ctrl->type); -+ -+ return SSH_MSG_LEN_CTRL; // discard message -+ } -+ -+ // update decoder state -+ if (ctrl->type == SSH_FRAME_TYPE_ACK) { -+ rcv->state = rcv->expect.pld -+ ? SSH_RCV_COMMAND -+ : SSH_RCV_DISCARD; -+ } -+ -+ complete(&rcv->signal); -+ return SSH_MSG_LEN_CTRL; // handled message ++ struct sam_ssh_ec *ec = serdev_device_get_drvdata(dev); ++ return ssh_ptl_rx_rcvbuf(&ec->rtl.ptl, buf, n); +} + -+static int ssh_receive_msg_cmd(struct sam_ssh_ec *ec, const u8 *buf, size_t size) ++static void ssam_write_wakeup(struct serdev_device *dev) +{ -+ struct device *dev = &ec->serdev->dev; -+ struct ssh_receiver *rcv = &ec->receiver; -+ const struct ssh_frame_ctrl *ctrl; -+ const struct ssh_frame_cmd *cmd; -+ struct ssh_fifo_packet packet; -+ -+ const u8 *ctrl_begin = buf + SSH_FRAME_OFFS_CTRL; -+ const u8 *ctrl_end = buf + SSH_FRAME_OFFS_CTRL_CRC; -+ const u8 *cmd_begin = buf + SSH_FRAME_OFFS_CMD; -+ const u8 *cmd_begin_pld = buf + SSH_FRAME_OFFS_CMD_PLD; -+ const u8 *cmd_end; -+ -+ size_t msg_len; -+ -+ ctrl = (const struct ssh_frame_ctrl *)(ctrl_begin); -+ cmd = (const struct ssh_frame_cmd *)(cmd_begin); -+ -+ // we need at least a full control frame -+ if (size < (SSH_BYTELEN_SYNC + SSH_BYTELEN_CTRL + SSH_BYTELEN_CRC)) -+ return 0; // need more bytes -+ -+ // validate control-frame CRC -+ if (!ssh_is_valid_crc(ctrl_begin, ctrl_end)) { -+ dev_err(dev, SSH_RECV_TAG "invalid checksum (cmd-ctrl)\n"); -+ /* -+ * We can't be sure here if length is valid, thus -+ * discard everything. -+ */ -+ return size; -+ } -+ -+ // actual length check (ctrl->len contains command-frame but not crc) -+ msg_len = SSH_MSG_LEN_CMD_BASE + ctrl->len; -+ if (size < msg_len) -+ return 0; // need more bytes -+ -+ cmd_end = cmd_begin + ctrl->len; -+ -+ // validate command-frame type -+ if (cmd->type != SSH_FRAME_TYPE_CMD) { -+ dev_err(dev, SSH_RECV_TAG "expected command frame type but got 0x%02x\n", cmd->type); -+ return size; // discard everything -+ } -+ -+ // validate command-frame CRC -+ if (!ssh_is_valid_crc(cmd_begin, cmd_end)) { -+ dev_err(dev, SSH_RECV_TAG "invalid checksum (cmd-pld)\n"); -+ -+ /* -+ * The message length is provided in the control frame. As we -+ * already validated that, we can be sure here that it's -+ * correct, so we only need to discard the message. -+ */ -+ return msg_len; -+ } -+ -+ // check if we received an event notification -+ if (sam_rqid_is_event((cmd->rqid_hi << 8) | cmd->rqid_lo)) { -+ ssh_handle_event(ec, buf); -+ return msg_len; // handled message -+ } -+ -+ // check if we expect the message -+ if (rcv->state != SSH_RCV_COMMAND) { -+ dev_dbg(dev, SSH_RECV_TAG "discarding message: command not expected\n"); -+ return msg_len; // discard message -+ } -+ -+ // check if response is for our request -+ if (rcv->expect.rqid != (cmd->rqid_lo | (cmd->rqid_hi << 8))) { -+ dev_dbg(dev, SSH_RECV_TAG "discarding message: command not a match\n"); -+ return msg_len; // discard message -+ } -+ -+ // we now have a valid & expected command message -+ dev_dbg(dev, SSH_RECV_TAG "valid command message received\n"); -+ -+ packet.type = ctrl->type; -+ packet.seq = ctrl->seq; -+ packet.len = cmd_end - cmd_begin_pld; -+ -+ if (kfifo_avail(&rcv->fifo) >= sizeof(packet) + packet.len) { -+ kfifo_in(&rcv->fifo, &packet, sizeof(packet)); -+ kfifo_in(&rcv->fifo, cmd_begin_pld, packet.len); -+ -+ } else { -+ dev_warn(dev, SSH_RECV_TAG -+ "dropping frame: not enough space in fifo (type = %d)\n", -+ ctrl->type); -+ -+ return SSH_MSG_LEN_CTRL; // discard message -+ } -+ -+ rcv->state = SSH_RCV_DISCARD; -+ -+ complete(&rcv->signal); -+ return msg_len; // handled message ++ struct sam_ssh_ec *ec = serdev_device_get_drvdata(dev); ++ ssh_ptl_tx_wakeup(&ec->rtl.ptl, true); +} + -+static int ssh_eval_buf(struct sam_ssh_ec *ec, const u8 *buf, size_t size) -+{ -+ struct device *dev = &ec->serdev->dev; -+ struct ssh_frame_ctrl *ctrl; -+ -+ // we need at least a control frame to check what to do -+ if (size < (SSH_BYTELEN_SYNC + SSH_BYTELEN_CTRL)) -+ return 0; // need more bytes -+ -+ // make sure we're actually at the start of a new message -+ if (!ssh_is_valid_syn(buf)) { -+ dev_err(dev, SSH_RECV_TAG "invalid start of message\n"); -+ return size; // discard everything -+ } -+ -+ // handle individual message types separately -+ ctrl = (struct ssh_frame_ctrl *)(buf + SSH_FRAME_OFFS_CTRL); -+ -+ switch (ctrl->type) { -+ case SSH_FRAME_TYPE_ACK: -+ case SSH_FRAME_TYPE_RETRY: -+ return ssh_receive_msg_ctrl(ec, buf, size); -+ -+ case SSH_FRAME_TYPE_CMD: -+ case SSH_FRAME_TYPE_CMD_NOACK: -+ return ssh_receive_msg_cmd(ec, buf, size); -+ -+ default: -+ dev_err(dev, SSH_RECV_TAG "unknown frame type 0x%02x\n", ctrl->type); -+ return size; // discard everything -+ } -+} -+ -+static int ssh_receive_buf(struct serdev_device *serdev, -+ const unsigned char *buf, size_t size) -+{ -+ struct sam_ssh_ec *ec = serdev_device_get_drvdata(serdev); -+ struct ssh_receiver *rcv = &ec->receiver; -+ unsigned long flags; -+ int offs = 0; -+ int used, n; -+ -+ dev_dbg(&serdev->dev, SSH_RECV_TAG "received buffer (size: %zu)\n", size); -+ print_hex_dump_debug(SSH_RECV_TAG, DUMP_PREFIX_OFFSET, 16, 1, buf, size, false); -+ -+ /* -+ * The battery _BIX message gets a bit long, thus we have to add some -+ * additional buffering here. -+ */ -+ -+ spin_lock_irqsave(&rcv->lock, flags); -+ -+ // copy to eval-buffer -+ used = min(size, (size_t)(rcv->eval_buf.cap - rcv->eval_buf.len)); -+ memcpy(rcv->eval_buf.ptr + rcv->eval_buf.len, buf, used); -+ rcv->eval_buf.len += used; -+ -+ // evaluate buffer until we need more bytes or eval-buf is empty -+ while (offs < rcv->eval_buf.len) { -+ n = rcv->eval_buf.len - offs; -+ n = ssh_eval_buf(ec, rcv->eval_buf.ptr + offs, n); -+ if (n <= 0) -+ break; // need more bytes -+ -+ offs += n; -+ } -+ -+ // throw away the evaluated parts -+ rcv->eval_buf.len -= offs; -+ memmove(rcv->eval_buf.ptr, rcv->eval_buf.ptr + offs, rcv->eval_buf.len); -+ -+ spin_unlock_irqrestore(&rcv->lock, flags); -+ -+ return used; -+} ++struct serdev_device_ops ssam_serdev_ops = { ++ .receive_buf = ssam_receive_buf, ++ .write_wakeup = ssam_write_wakeup, ++}; + + +#ifdef CONFIG_SURFACE_SAM_SSH_DEBUG_DEVICE + -+#include -+ -+static char sam_ssh_debug_rqst_buf_sysfs[SURFACE_SAM_SSH_MAX_RQST_RESPONSE + 1] = { 0 }; -+static char sam_ssh_debug_rqst_buf_pld[SURFACE_SAM_SSH_MAX_RQST_PAYLOAD] = { 0 }; -+static char sam_ssh_debug_rqst_buf_res[SURFACE_SAM_SSH_MAX_RQST_RESPONSE] = { 0 }; ++static char sam_ssh_debug_rqst_buf_sysfs[256] = { 0 }; ++static char sam_ssh_debug_rqst_buf_pld[255] = { 0 }; ++static char sam_ssh_debug_rqst_buf_res[255] = { 0 }; + +struct sysfs_rqst { + u8 tc; + u8 cid; + u8 iid; -+ u8 pri; ++ u8 chn; + u8 snc; + u8 cdl; + u8 pld[0]; @@ -6494,7 +10178,7 @@ index 000000000000..988be7c2d286 +static ssize_t rqst_read(struct file *f, struct kobject *kobj, struct bin_attribute *attr, + char *buf, loff_t offs, size_t count) +{ -+ if (offs < 0 || count + offs > SURFACE_SAM_SSH_MAX_RQST_RESPONSE) ++ if (offs < 0 || count + offs > ARRAY_SIZE(sam_ssh_debug_rqst_buf_sysfs)) + return -EINVAL; + + memcpy(buf, sam_ssh_debug_rqst_buf_sysfs + offs, count); @@ -6510,7 +10194,7 @@ index 000000000000..988be7c2d286 + int status; + + // check basic write constriants -+ if (offs != 0 || count > SURFACE_SAM_SSH_MAX_RQST_PAYLOAD + sizeof(struct sysfs_rqst)) ++ if (offs != 0 || count - sizeof(struct sysfs_rqst) > ARRAY_SIZE(sam_ssh_debug_rqst_buf_pld)) + return -EINVAL; + + if (count < sizeof(struct sysfs_rqst)) @@ -6525,13 +10209,13 @@ index 000000000000..988be7c2d286 + rqst.tc = input->tc; + rqst.cid = input->cid; + rqst.iid = input->iid; -+ rqst.pri = input->pri; ++ rqst.chn = input->chn; + rqst.snc = input->snc; + rqst.cdl = input->cdl; + rqst.pld = sam_ssh_debug_rqst_buf_pld; + memcpy(sam_ssh_debug_rqst_buf_pld, &input->pld[0], input->cdl); + -+ result.cap = SURFACE_SAM_SSH_MAX_RQST_RESPONSE; ++ result.cap = ARRAY_SIZE(sam_ssh_debug_rqst_buf_res); + result.len = 0; + result.data = sam_ssh_debug_rqst_buf_res; + @@ -6542,13 +10226,12 @@ index 000000000000..988be7c2d286 + sam_ssh_debug_rqst_buf_sysfs[0] = result.len; + memcpy(sam_ssh_debug_rqst_buf_sysfs + 1, result.data, result.len); + memset(sam_ssh_debug_rqst_buf_sysfs + result.len + 1, 0, -+ SURFACE_SAM_SSH_MAX_RQST_RESPONSE + 1 - result.len); ++ ARRAY_SIZE(sam_ssh_debug_rqst_buf_sysfs) + 1 - result.len); + + return count; +} + -+static const BIN_ATTR_RW(rqst, SURFACE_SAM_SSH_MAX_RQST_RESPONSE + 1); -+ ++static const BIN_ATTR_RW(rqst, ARRAY_SIZE(sam_ssh_debug_rqst_buf_sysfs)); + +static int surface_sam_ssh_sysfs_register(struct device *dev) +{ @@ -6560,7 +10243,7 @@ index 000000000000..988be7c2d286 + sysfs_remove_bin_file(&dev->kobj, &bin_attr_rqst); +} + -+#else /* CONFIG_SURFACE_ACPI_SSH_DEBUG_DEVICE */ ++#else /* CONFIG_SURFACE_SAM_SSH_DEBUG_DEVICE */ + +static int surface_sam_ssh_sysfs_register(struct device *dev) +{ @@ -6571,275 +10254,54 @@ index 000000000000..988be7c2d286 +{ +} + -+#endif /* CONFIG_SURFACE_SAM_SSH_DEBUG_DEVICE */ ++#endif /* CONFIG_SURFACE_SAM_SSH_DEBUG_DEVICE */ + + -+static const struct acpi_gpio_params gpio_sam_wakeup_int = { 0, 0, false }; -+static const struct acpi_gpio_params gpio_sam_wakeup = { 1, 0, false }; -+ -+static const struct acpi_gpio_mapping surface_sam_acpi_gpios[] = { -+ { "sam_wakeup-int-gpio", &gpio_sam_wakeup_int, 1 }, -+ { "sam_wakeup-gpio", &gpio_sam_wakeup, 1 }, -+ { }, -+}; -+ -+static irqreturn_t surface_sam_irq_handler(int irq, void *dev_id) -+{ -+ struct serdev_device *serdev = dev_id; -+ -+ dev_info(&serdev->dev, "wake irq triggered\n"); -+ return IRQ_HANDLED; -+} -+ -+static int surface_sam_setup_irq(struct serdev_device *serdev) -+{ -+ const int irqf = IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_RISING; -+ struct gpio_desc *gpiod; -+ int irq; -+ int status; -+ -+ gpiod = gpiod_get(&serdev->dev, "sam_wakeup-int", GPIOD_ASIS); -+ if (IS_ERR(gpiod)) -+ return PTR_ERR(gpiod); -+ -+ irq = gpiod_to_irq(gpiod); -+ gpiod_put(gpiod); -+ -+ if (irq < 0) -+ return irq; -+ -+ status = request_threaded_irq(irq, NULL, surface_sam_irq_handler, -+ irqf, "surface_sam_wakeup", serdev); -+ if (status) -+ return status; -+ -+ return irq; -+} -+ -+ -+static acpi_status -+ssh_setup_from_resource(struct acpi_resource *resource, void *context) -+{ -+ struct serdev_device *serdev = context; -+ struct acpi_resource_common_serialbus *serial; -+ struct acpi_resource_uart_serialbus *uart; -+ int status = 0; -+ -+ if (resource->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) -+ return AE_OK; -+ -+ serial = &resource->data.common_serial_bus; -+ if (serial->type != ACPI_RESOURCE_SERIAL_TYPE_UART) -+ return AE_OK; -+ -+ uart = &resource->data.uart_serial_bus; -+ -+ // set up serdev device -+ serdev_device_set_baudrate(serdev, uart->default_baud_rate); -+ -+ // serdev currently only supports RTSCTS flow control -+ if (uart->flow_control & SSH_SUPPORTED_FLOW_CONTROL_MASK) -+ dev_warn(&serdev->dev, "unsupported flow control (value: 0x%02x)\n", uart->flow_control); -+ -+ // set RTSCTS flow control -+ serdev_device_set_flow_control(serdev, uart->flow_control & ACPI_UART_FLOW_CONTROL_HW); -+ -+ // serdev currently only supports EVEN/ODD parity -+ switch (uart->parity) { -+ case ACPI_UART_PARITY_NONE: -+ status = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE); -+ break; -+ case ACPI_UART_PARITY_EVEN: -+ status = serdev_device_set_parity(serdev, SERDEV_PARITY_EVEN); -+ break; -+ case ACPI_UART_PARITY_ODD: -+ status = serdev_device_set_parity(serdev, SERDEV_PARITY_ODD); -+ break; -+ default: -+ dev_warn(&serdev->dev, "unsupported parity (value: 0x%02x)\n", uart->parity); -+ break; -+ } -+ -+ if (status) { -+ dev_err(&serdev->dev, "failed to set parity (value: 0x%02x)\n", uart->parity); -+ return status; -+ } -+ -+ return AE_CTRL_TERMINATE; // we've found the resource and are done -+} -+ -+ -+static int surface_sam_ssh_suspend(struct device *dev) -+{ -+ struct sam_ssh_ec *ec; -+ int status; -+ -+ dev_dbg(dev, "suspending\n"); -+ -+ ec = surface_sam_ssh_acquire_init(); -+ if (ec) { -+ status = surface_sam_ssh_ec_suspend(ec); -+ if (status) { -+ surface_sam_ssh_release(ec); -+ return status; -+ } -+ -+ if (device_may_wakeup(dev)) { -+ status = enable_irq_wake(ec->irq); -+ if (status) { -+ surface_sam_ssh_release(ec); -+ return status; -+ } -+ -+ ec->irq_wakeup_enabled = true; -+ } else { -+ ec->irq_wakeup_enabled = false; -+ } -+ -+ ec->state = SSH_EC_SUSPENDED; -+ surface_sam_ssh_release(ec); -+ } -+ -+ return 0; -+} -+ -+static int surface_sam_ssh_resume(struct device *dev) -+{ -+ struct sam_ssh_ec *ec; -+ int status; -+ -+ dev_dbg(dev, "resuming\n"); -+ -+ ec = surface_sam_ssh_acquire_init(); -+ if (ec) { -+ ec->state = SSH_EC_INITIALIZED; -+ -+ if (ec->irq_wakeup_enabled) { -+ status = disable_irq_wake(ec->irq); -+ if (status) { -+ surface_sam_ssh_release(ec); -+ return status; -+ } -+ -+ ec->irq_wakeup_enabled = false; -+ } -+ -+ status = surface_sam_ssh_ec_resume(ec); -+ if (status) { -+ surface_sam_ssh_release(ec); -+ return status; -+ } -+ -+ surface_sam_ssh_release(ec); -+ } -+ -+ return 0; -+} -+ -+static SIMPLE_DEV_PM_OPS(surface_sam_ssh_pm_ops, surface_sam_ssh_suspend, surface_sam_ssh_resume); -+ -+ -+static const struct serdev_device_ops ssh_device_ops = { -+ .receive_buf = ssh_receive_buf, -+ .write_wakeup = serdev_device_write_wakeup, -+}; -+ -+ -+static int surface_sam_ssh_sysfs_register(struct device *dev); -+static void surface_sam_ssh_sysfs_unregister(struct device *dev); -+ +static int surface_sam_ssh_probe(struct serdev_device *serdev) +{ + struct sam_ssh_ec *ec; -+ struct workqueue_struct *event_queue_ack; -+ struct workqueue_struct *event_queue_evt; -+ u8 *write_buf; -+ u8 *read_buf; -+ u8 *eval_buf; + acpi_handle *ssh = ACPI_HANDLE(&serdev->dev); -+ acpi_status status; -+ int irq; -+ -+ dev_dbg(&serdev->dev, "probing\n"); ++ int status, irq; + + if (gpiod_count(&serdev->dev, NULL) < 0) + return -ENODEV; + -+ status = devm_acpi_dev_add_driver_gpios(&serdev->dev, surface_sam_acpi_gpios); ++ status = devm_acpi_dev_add_driver_gpios(&serdev->dev, ssh_acpi_gpios); + if (status) + return status; + -+ // allocate buffers -+ write_buf = kzalloc(SSH_WRITE_BUF_LEN, GFP_KERNEL); -+ if (!write_buf) { -+ status = -ENOMEM; -+ goto err_write_buf; -+ } -+ -+ read_buf = kzalloc(SSH_READ_BUF_LEN, GFP_KERNEL); -+ if (!read_buf) { -+ status = -ENOMEM; -+ goto err_read_buf; -+ } -+ -+ eval_buf = kzalloc(SSH_EVAL_BUF_LEN, GFP_KERNEL); -+ if (!eval_buf) { -+ status = -ENOMEM; -+ goto err_eval_buf; -+ } -+ -+ event_queue_ack = create_singlethread_workqueue("surface_sh_ackq"); -+ if (!event_queue_ack) { -+ status = -ENOMEM; -+ goto err_ackq; -+ } -+ -+ event_queue_evt = create_workqueue("surface_sh_evtq"); -+ if (!event_queue_evt) { -+ status = -ENOMEM; -+ goto err_evtq; -+ } -+ -+ irq = surface_sam_setup_irq(serdev); -+ if (irq < 0) { -+ status = irq; -+ goto err_irq; -+ } ++ // setup IRQ ++ irq = ssh_setup_irq(serdev); ++ if (irq < 0) ++ return irq; + + // set up EC + ec = surface_sam_ssh_acquire(); -+ if (ec->state != SSH_EC_UNINITIALIZED) { ++ if (smp_load_acquire(&ec->state) != SSH_EC_UNINITIALIZED) { + dev_err(&serdev->dev, "embedded controller already initialized\n"); -+ surface_sam_ssh_release(ec); + + status = -EBUSY; -+ goto err_busy; ++ goto err_ecinit; + } + -+ ec->serdev = serdev; -+ ec->irq = irq; -+ ec->writer.data = write_buf; -+ ec->writer.ptr = write_buf; ++ ec->serdev = serdev; ++ ec->irq = irq; ++ ssh_seq_reset(&ec->counter.seq); ++ ssh_rqid_reset(&ec->counter.rqid); + -+ // initialize receiver -+ init_completion(&ec->receiver.signal); -+ kfifo_init(&ec->receiver.fifo, read_buf, SSH_READ_BUF_LEN); -+ ec->receiver.eval_buf.ptr = eval_buf; -+ ec->receiver.eval_buf.cap = SSH_EVAL_BUF_LEN; -+ ec->receiver.eval_buf.len = 0; ++ // initialize event/request completion system ++ status = ssam_cplt_init(&ec->cplt, &serdev->dev); ++ if (status) ++ goto err_ecinit; + -+ // initialize event handling -+ ec->events.queue_ack = event_queue_ack; -+ ec->events.queue_evt = event_queue_evt; -+ -+ ec->state = SSH_EC_INITIALIZED; ++ // initialize request and packet transmission layers ++ status = ssh_rtl_init(&ec->rtl, serdev, &ssam_rtl_ops); ++ if (status) ++ goto err_rtl; + + serdev_device_set_drvdata(serdev, ec); + -+ /* ensure everything is properly set-up before we open the device */ -+ smp_mb(); -+ -+ serdev_device_set_client_ops(serdev, &ssh_device_ops); ++ serdev_device_set_client_ops(serdev, &ssam_serdev_ops); + status = serdev_device_open(serdev); + if (status) + goto err_open; @@ -6849,15 +10311,27 @@ index 000000000000..988be7c2d286 + if (ACPI_FAILURE(status)) + goto err_devinit; + -+ status = surface_sam_ssh_ec_resume(ec); ++ status = ssh_rtl_tx_start(&ec->rtl); + if (status) + goto err_devinit; + ++ status = ssh_rtl_rx_start(&ec->rtl); ++ if (status) ++ goto err_devinit; ++ ++ smp_store_release(&ec->state, SSH_EC_INITIALIZED); ++ ++ status = surface_sam_ssh_log_controller_version(ec); ++ if (status) ++ goto err_finalize; ++ ++ status = surface_sam_ssh_ec_resume(ec); ++ if (status) ++ goto err_finalize; ++ + status = surface_sam_ssh_sysfs_register(&serdev->dev); + if (status) -+ goto err_devinit; -+ -+ surface_sam_ssh_release(ec); ++ goto err_finalize; + + // TODO: The EC can wake up the system via the associated GPIO interrupt in + // multiple situations. One of which is the remaining battery capacity @@ -6872,32 +10346,26 @@ index 000000000000..988be7c2d286 + + return 0; + ++err_finalize: ++ smp_store_release(&ec->state, SSH_EC_UNINITIALIZED); ++ ssh_rtl_flush(&ec->rtl, msecs_to_jiffies(5000)); +err_devinit: + serdev_device_close(serdev); +err_open: -+ ec->state = SSH_EC_UNINITIALIZED; -+ serdev_device_set_drvdata(serdev, NULL); -+ surface_sam_ssh_release(ec); -+err_busy: ++ ssh_rtl_shutdown(&ec->rtl); ++ ssh_rtl_destroy(&ec->rtl); ++err_rtl: ++ ssam_cplt_flush(&ec->cplt); ++ ssam_cplt_destroy(&ec->cplt); ++err_ecinit: + free_irq(irq, serdev); -+err_irq: -+ destroy_workqueue(event_queue_evt); -+err_evtq: -+ destroy_workqueue(event_queue_ack); -+err_ackq: -+ kfree(eval_buf); -+err_eval_buf: -+ kfree(read_buf); -+err_read_buf: -+ kfree(write_buf); -+err_write_buf: ++ serdev_device_set_drvdata(serdev, NULL); + return status; +} + +static void surface_sam_ssh_remove(struct serdev_device *serdev) +{ + struct sam_ssh_ec *ec; -+ unsigned long flags; + int status; + + ec = surface_sam_ssh_acquire_init(); @@ -6912,60 +10380,41 @@ index 000000000000..988be7c2d286 + if (status) + dev_err(&serdev->dev, "failed to suspend EC: %d\n", status); + -+ // make sure all events (received up to now) have been properly handled -+ flush_workqueue(ec->events.queue_ack); -+ flush_workqueue(ec->events.queue_evt); ++ // flush pending events and requests while everything still works ++ status = ssh_rtl_flush(&ec->rtl, msecs_to_jiffies(5000)); ++ if (status) ++ dev_err(&serdev->dev, "failed to flush request transmission layer: %d\n", status); + -+ // remove event handlers -+ spin_lock_irqsave(&ec->events.lock, flags); -+ memset(ec->events.handler, 0, -+ sizeof(struct ssh_event_handler) -+ * SAM_NUM_EVENT_TYPES); -+ spin_unlock_irqrestore(&ec->events.lock, flags); ++ ssam_cplt_flush(&ec->cplt); ++ ++ // mark device as uninitialized ++ smp_store_release(&ec->state, SSH_EC_UNINITIALIZED); ++ ++ // cancel rem. requests, ensure no new ones can be queued, stop threads ++ ssh_rtl_tx_flush(&ec->rtl); ++ ssh_rtl_shutdown(&ec->rtl); ++ ++ // shut down actual transport ++ serdev_device_wait_until_sent(ec->serdev, 0); ++ serdev_device_close(ec->serdev); ++ ++ /* ++ * Ensure _all_ events are completed. New ones could still have been ++ * received after the last flush, before the request transport layer ++ * has been shut down. At this point we can be sure that no requests ++ * will remain after this call. ++ */ ++ ssam_cplt_flush(&ec->cplt); ++ ++ // actually free resources ++ ssam_cplt_destroy(&ec->cplt); ++ ssh_rtl_destroy(&ec->rtl); + -+ // set device to deinitialized state -+ ec->state = SSH_EC_UNINITIALIZED; + ec->serdev = NULL; -+ -+ /* ensure state and serdev get set before continuing */ -+ smp_mb(); -+ -+ /* -+ * Flush any event that has not been processed yet to ensure we're not going to -+ * use the serial device any more (e.g. for ACKing). -+ */ -+ flush_workqueue(ec->events.queue_ack); -+ flush_workqueue(ec->events.queue_evt); -+ -+ serdev_device_close(serdev); -+ -+ /* -+ * Only at this point, no new events can be received. Destroying the -+ * workqueue here flushes all remaining events. Those events will be -+ * silently ignored and neither ACKed nor any handler gets called. -+ */ -+ destroy_workqueue(ec->events.queue_ack); -+ destroy_workqueue(ec->events.queue_evt); -+ -+ // free writer -+ kfree(ec->writer.data); -+ ec->writer.data = NULL; -+ ec->writer.ptr = NULL; -+ -+ // free receiver -+ spin_lock_irqsave(&ec->receiver.lock, flags); -+ ec->receiver.state = SSH_RCV_DISCARD; -+ kfifo_free(&ec->receiver.fifo); -+ -+ kfree(ec->receiver.eval_buf.ptr); -+ ec->receiver.eval_buf.ptr = NULL; -+ ec->receiver.eval_buf.cap = 0; -+ ec->receiver.eval_buf.len = 0; -+ spin_unlock_irqrestore(&ec->receiver.lock, flags); ++ ec->irq = -1; + + device_set_wakeup_capable(&serdev->dev, false); + serdev_device_set_drvdata(serdev, NULL); -+ surface_sam_ssh_release(ec); +} + + @@ -6980,7 +10429,7 @@ index 000000000000..988be7c2d286 + .remove = surface_sam_ssh_remove, + .driver = { + .name = "surface_sam_ssh", -+ .acpi_match_table = ACPI_PTR(surface_sam_ssh_match), ++ .acpi_match_table = surface_sam_ssh_match, + .pm = &surface_sam_ssh_pm_ops, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, @@ -7012,21 +10461,21 @@ index 000000000000..988be7c2d286 + +MODULE_AUTHOR("Maximilian Luz "); +MODULE_DESCRIPTION("Surface Serial Hub Driver for 5th Generation Surface Devices"); -+MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/surface_sam/surface_sam_ssh.h b/drivers/platform/x86/surface_sam/surface_sam_ssh.h new file mode 100644 -index 000000000000..435b5c7bac9a +index 0000000000000..e4f0e343496c4 --- /dev/null +++ b/drivers/platform/x86/surface_sam/surface_sam_ssh.h -@@ -0,0 +1,98 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ +@@ -0,0 +1,482 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Interface for Surface Serial Hub (SSH). + * + * The SSH is the main communication hub for communication between host and + * the Surface/System Aggregator Module (SAM) on newer Microsoft Surface + * devices (Book 2, Pro 5, Laptops, ...). Also referred to as SAM-over-SSH. -+ * Older devices (Book 1, Pro 4) use SAM-over-I2C. ++ * Older devices (Book 1, Pro 4) use SAM-over-HID (via I2C). + */ + +#ifndef _SURFACE_SAM_SSH_H @@ -7036,11 +10485,414 @@ index 000000000000..435b5c7bac9a +#include + + -+/* -+ * Maximum request payload size in bytes. -+ * Value based on ACPI (255 bytes minus header/status bytes). ++/* -- Data structures for SAM-over-SSH communication. ----------------------- */ ++ ++/** ++ * enum ssh_frame_type - Frame types for SSH frames. ++ * @SSH_FRAME_TYPE_DATA_SEQ: Indicates a data frame, followed by a payload with ++ * the length specified in the ssh_frame.len field. This ++ * frame is sequenced, meaning that an ACK is required. ++ * @SSH_FRAME_TYPE_DATA_NSQ: Same as SSH_FRAME_TYPE_DATA_SEQ, but unsequenced, ++ * meaning that the message does not have to be ACKed. ++ * @SSH_FRAME_TYPE_ACK: Indicates an ACK message. ++ * @SSH_FRAME_TYPE_NAK: Indicates an error response for previously sent ++ * frame. In general, this means that the frame and/or ++ * payload is malformed, e.g. a CRC is wrong. For command- ++ * type payloads, this can also mean that the command is ++ * invalid. + */ -+#define SURFACE_SAM_SSH_MAX_RQST_PAYLOAD (255 - 10) ++enum ssh_frame_type { ++ SSH_FRAME_TYPE_DATA_SEQ = 0x80, ++ SSH_FRAME_TYPE_DATA_NSQ = 0x00, ++ SSH_FRAME_TYPE_ACK = 0x40, ++ SSH_FRAME_TYPE_NAK = 0x04, ++}; ++ ++/** ++ * struct ssh_frame - SSH communication frame. ++ * @type: The type of the frame. See &enum ssh_frame_type. ++ * @len: The length of the frame payload directly following the CRC for this ++ * frame. Does not include the final CRC for that payload. ++ * @seq: The sequence number for this message/exchange. ++ */ ++struct ssh_frame { ++ u8 type; ++ __le16 len; ++ u8 seq; ++} __packed; ++ ++/* ++ * Maximum SSH frame payload length in bytes. This is the physical maximum ++ * length of the protocol. Implementations may set a more constrained limit. ++ */ ++#define SSH_FRAME_MAX_PAYLOAD_SIZE U16_MAX ++ ++/** ++ * enum ssh_payload_type - Type indicator for the SSH payload. ++ * @SSH_PLD_TYPE_CMD: The payload is a command structure with optional command ++ * payload. ++ */ ++enum ssh_payload_type { ++ SSH_PLD_TYPE_CMD = 0x80, ++}; ++ ++/** ++ * struct ssh_command - Payload of a command-type frame. ++ * @type: The type of the payload. See &enum ssh_payload_type. Should be ++ * SSH_PLD_TYPE_CMD for this struct. ++ * @tc: Command target category. ++ * @chn_out: Output channel. Should be zero if this an incoming (EC to host) ++ * message. ++ * @chn_in: Input channel. Should be zero if this is an outgoing (hos to EC) ++ * message. ++ * @iid: Instance ID. ++ * @rqid: Request ID. Used to match requests with responses and differentiate ++ * between responses and events. ++ * @cid: Command ID. ++ */ ++struct ssh_command { ++ u8 type; ++ u8 tc; ++ u8 chn_out; ++ u8 chn_in; ++ u8 iid; ++ __le16 rqid; ++ u8 cid; ++} __packed; ++ ++/* ++ * Maximum SSH command payload length in bytes. This is the physical maximum ++ * length of the protocol. Implementations may set a more constrained limit. ++ */ ++#define SSH_COMMAND_MAX_PAYLOAD_SIZE \ ++ (SSH_FRAME_MAX_PAYLOAD_SIZE - sizeof(struct ssh_command)) ++ ++/** ++ * struct ssh_notification_params - Command payload to enable/disable SSH ++ * notifications. ++ * @target_category: The target category for which notifications should be ++ * enabled/disabled. ++ * @flags: Flags determining how notifications are being sent. ++ * @request_id: The request ID that is used to send these notifications. ++ * @instance_id: The specific instance in the given target category for ++ * which notifications should be enabled. ++ */ ++struct ssh_notification_params { ++ u8 target_category; ++ u8 flags; ++ __le16 request_id; ++ u8 instance_id; ++} __packed; ++ ++/** ++ * SSH message syncrhonization (SYN) bytes. ++ */ ++#define SSH_MSG_SYN ((u16)0x55aa) ++ ++/** ++ * Base-length of a SSH message. This is the minimum number of bytes required ++ * to form a message. The actual message length is SSH_MSG_LEN_BASE plus the ++ * length of the frame payload. ++ */ ++#define SSH_MSG_LEN_BASE (sizeof(struct ssh_frame) + 3ull * sizeof(u16)) ++ ++/** ++ * Length of a SSH control message. ++ */ ++#define SSH_MSG_LEN_CTRL SSH_MSG_LEN_BASE ++ ++/** ++ * Length of a SSH message with payload of specified size. ++ */ ++#define SSH_MESSAGE_LENGTH(payload_size) (SSH_MSG_LEN_BASE + payload_size) ++ ++/** ++ * Length of a SSH command message with command payload of specified size. ++ */ ++#define SSH_COMMAND_MESSAGE_LENGTH(payload_size) \ ++ SSH_MESSAGE_LENGTH(sizeof(struct ssh_command) + payload_size) ++ ++/** ++ * Offset of the specified struct ssh_frame field in the raw SSH message data. ++ */ ++#define SSH_MSGOFFSET_FRAME(field) \ ++ (sizeof(u16) + offsetof(struct ssh_frame, field)) ++ ++/** ++ * Offset of the specified struct ssh_command field in the raw SSH message data. ++ */ ++#define SSH_MSGOFFSET_COMMAND(field) \ ++ (2ull * sizeof(u16) + sizeof(struct ssh_frame) \ ++ + offsetof(struct ssh_command, field)) ++ ++struct sshp_span { ++ u8 *ptr; ++ size_t len; ++}; ++ ++ ++/* -- Packet transport layer (ptl). ----------------------------------------- */ ++ ++enum ssh_packet_priority { ++ SSH_PACKET_PRIORITY_FLUSH = 0, ++ SSH_PACKET_PRIORITY_DATA = 0, ++ SSH_PACKET_PRIORITY_NAK = 1 << 4, ++ SSH_PACKET_PRIORITY_ACK = 2 << 4, ++}; ++ ++#define SSH_PACKET_PRIORITY(base, try) \ ++ ((SSH_PACKET_PRIORITY_##base) | ((try) & 0x0f)) ++ ++#define ssh_packet_priority_get_try(p) ((p) & 0x0f) ++ ++ ++enum ssh_packet_type_flags { ++ SSH_PACKET_TY_FLUSH_BIT, ++ SSH_PACKET_TY_SEQUENCED_BIT, ++ SSH_PACKET_TY_BLOCKING_BIT, ++ ++ SSH_PACKET_TY_FLUSH = BIT(SSH_PACKET_TY_FLUSH_BIT), ++ SSH_PACKET_TY_SEQUENCED = BIT(SSH_PACKET_TY_SEQUENCED_BIT), ++ SSH_PACKET_TY_BLOCKING = BIT(SSH_PACKET_TY_BLOCKING_BIT), ++}; ++ ++enum ssh_packet_state_flags { ++ SSH_PACKET_SF_LOCKED_BIT, ++ SSH_PACKET_SF_QUEUED_BIT, ++ SSH_PACKET_SF_PENDING_BIT, ++ SSH_PACKET_SF_TRANSMITTING_BIT, ++ SSH_PACKET_SF_TRANSMITTED_BIT, ++ SSH_PACKET_SF_ACKED_BIT, ++ SSH_PACKET_SF_CANCELED_BIT, ++ SSH_PACKET_SF_COMPLETED_BIT, ++}; ++ ++ ++struct ssh_ptl; ++struct ssh_packet; ++ ++struct ssh_packet_ops { ++ void (*release)(struct ssh_packet *packet); ++ void (*complete)(struct ssh_packet *packet, int status); ++}; ++ ++struct ssh_packet { ++ struct ssh_ptl *ptl; ++ struct kref refcnt; ++ ++ u8 type; ++ u8 priority; ++ u16 data_length; ++ u8 *data; ++ ++ unsigned long state; ++ ktime_t timestamp; ++ ++ struct list_head queue_node; ++ struct list_head pending_node; ++ ++ const struct ssh_packet_ops *ops; ++}; ++ ++ ++/* -- Request transport layer (rtl). ---------------------------------------- */ ++ ++enum ssh_request_flags { ++ SSH_REQUEST_SF_LOCKED_BIT, ++ SSH_REQUEST_SF_QUEUED_BIT, ++ SSH_REQUEST_SF_PENDING_BIT, ++ SSH_REQUEST_SF_TRANSMITTING_BIT, ++ SSH_REQUEST_SF_TRANSMITTED_BIT, ++ SSH_REQUEST_SF_RSPRCVD_BIT, ++ SSH_REQUEST_SF_CANCELED_BIT, ++ SSH_REQUEST_SF_COMPLETED_BIT, ++ ++ SSH_REQUEST_TY_FLUSH_BIT, ++ SSH_REQUEST_TY_HAS_RESPONSE_BIT, ++ ++ SSH_REQUEST_FLAGS_SF_MASK = ++ BIT(SSH_REQUEST_SF_LOCKED_BIT) ++ | BIT(SSH_REQUEST_SF_QUEUED_BIT) ++ | BIT(SSH_REQUEST_SF_PENDING_BIT) ++ | BIT(SSH_REQUEST_SF_TRANSMITTING_BIT) ++ | BIT(SSH_REQUEST_SF_TRANSMITTED_BIT) ++ | BIT(SSH_REQUEST_SF_RSPRCVD_BIT) ++ | BIT(SSH_REQUEST_SF_CANCELED_BIT) ++ | BIT(SSH_REQUEST_SF_COMPLETED_BIT), ++ ++ SSH_REQUEST_FLAGS_TY_MASK = ++ BIT(SSH_REQUEST_TY_FLUSH_BIT) ++ | BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT), ++}; ++ ++ ++struct ssh_rtl; ++struct ssh_request; ++ ++struct ssh_request_ops { ++ void (*release)(struct ssh_request *rqst); ++ void (*complete)(struct ssh_request *rqst, ++ const struct ssh_command *cmd, ++ const struct sshp_span *data, int status); ++}; ++ ++struct ssh_request { ++ struct ssh_rtl *rtl; ++ struct ssh_packet packet; ++ struct list_head node; ++ ++ unsigned long state; ++ ktime_t timestamp; ++ ++ const struct ssh_request_ops *ops; ++}; ++ ++ ++/* -- Main data types and definitions --------------------------------------- */ ++ ++enum ssam_ssh_tc { ++ SSAM_SSH_TC_SAM = 0x01, // generic system functionality, real-time clock ++ SSAM_SSH_TC_BAT = 0x02, // battery/power subsystem ++ SSAM_SSH_TC_TMP = 0x03, // thermal subsystem ++ SSAM_SSH_TC_PMC = 0x04, ++ SSAM_SSH_TC_FAN = 0x05, ++ SSAM_SSH_TC_PoM = 0x06, ++ SSAM_SSH_TC_DBG = 0x07, ++ SSAM_SSH_TC_KBD = 0x08, // legacy keyboard (Laptop 1/2) ++ SSAM_SSH_TC_FWU = 0x09, ++ SSAM_SSH_TC_UNI = 0x0a, ++ SSAM_SSH_TC_LPC = 0x0b, ++ SSAM_SSH_TC_TCL = 0x0c, ++ SSAM_SSH_TC_SFL = 0x0d, ++ SSAM_SSH_TC_KIP = 0x0e, ++ SSAM_SSH_TC_EXT = 0x0f, ++ SSAM_SSH_TC_BLD = 0x10, ++ SSAM_SSH_TC_BAS = 0x11, // detachment system (Surface Book 2/3) ++ SSAM_SSH_TC_SEN = 0x12, ++ SSAM_SSH_TC_SRQ = 0x13, ++ SSAM_SSH_TC_MCU = 0x14, ++ SSAM_SSH_TC_HID = 0x15, // generic HID input subsystem ++ SSAM_SSH_TC_TCH = 0x16, ++ SSAM_SSH_TC_BKL = 0x17, ++ SSAM_SSH_TC_TAM = 0x18, ++ SSAM_SSH_TC_ACC = 0x19, ++ SSAM_SSH_TC_UFI = 0x1a, ++ SSAM_SSH_TC_USC = 0x1b, ++ SSAM_SSH_TC_PEN = 0x1c, ++ SSAM_SSH_TC_VID = 0x1d, ++ SSAM_SSH_TC_AUD = 0x1e, ++ SSAM_SSH_TC_SMC = 0x1f, ++ SSAM_SSH_TC_KPD = 0x20, ++ SSAM_SSH_TC_REG = 0x21, ++}; ++ ++/** ++ * struct ssam_event_flags - Flags for enabling/disabling SAM-over-SSH events ++ * @SSAM_EVENT_SEQUENCED: The event will be sent via a sequenced data frame. ++ */ ++enum ssam_event_flags { ++ SSAM_EVENT_SEQUENCED = BIT(0), ++}; ++ ++struct ssam_event { ++ u8 target_category; ++ u8 command_id; ++ u8 instance_id; ++ u8 channel; ++ u16 length; ++ u8 data[0]; ++}; ++ ++ ++/* -- Event notifier/callbacks. --------------------------------------------- */ ++ ++#define SSAM_NOTIF_STATE_SHIFT 2 ++#define SSAM_NOTIF_STATE_MASK ((1 << SSAM_NOTIF_STATE_SHIFT) - 1) ++ ++#define SSAM_NOTIF_HANDLED BIT(0) ++#define SSAM_NOTIF_STOP BIT(1) ++ ++ ++struct ssam_notifier_block; ++ ++typedef u32 (*ssam_notifier_fn_t)(struct ssam_notifier_block *nb, ++ const struct ssam_event *event); ++ ++struct ssam_notifier_block { ++ struct ssam_notifier_block __rcu *next; ++ ssam_notifier_fn_t fn; ++ int priority; ++}; ++ ++ ++static inline u32 ssam_notifier_from_errno(int err) ++{ ++ WARN_ON(err > 0); ++ ++ if (err >= 0) ++ return 0; ++ else ++ return ((-err) << SSAM_NOTIF_STATE_SHIFT) | SSAM_NOTIF_STOP; ++} ++ ++static inline int ssam_notifier_to_errno(u32 ret) ++{ ++ return -(ret >> SSAM_NOTIF_STATE_SHIFT); ++} ++ ++ ++/* -- Event/notification registry. ------------------------------------------ */ ++ ++struct ssam_event_registry { ++ u8 target_category; ++ u8 channel; ++ u8 cid_enable; ++ u8 cid_disable; ++}; ++ ++struct ssam_event_id { ++ u8 target_category; ++ u8 instance; ++}; ++ ++ ++#define SSAM_EVENT_REGISTRY(tc, chn, cid_en, cid_dis) \ ++ ((struct ssam_event_registry) { \ ++ .target_category = (tc), \ ++ .channel = (chn), \ ++ .cid_enable = (cid_en), \ ++ .cid_disable = (cid_dis), \ ++ }) ++ ++#define SSAM_EVENT_ID(tc, iid) \ ++ ((struct ssam_event_id) { \ ++ .target_category = tc, \ ++ .instance = iid, \ ++ }) ++ ++ ++#define SSAM_EVENT_REGISTRY_SAM \ ++ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_SAM, 0x01, 0x0b, 0x0c) ++ ++#define SSAM_EVENT_REGISTRY_KIP \ ++ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, 0x02, 0x27, 0x28) ++ ++#define SSAM_EVENT_REGISTRY_REG \ ++ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02) ++ ++ ++struct ssam_event_notifier { ++ struct ssam_notifier_block base; ++ ++ struct { ++ struct ssam_event_registry reg; ++ struct ssam_event_id id; ++ u8 flags; ++ } event; ++}; ++ ++ ++/* -- TODO -------------------------------------------------------------------*/ + +/* + * Maximum response payload size in bytes. @@ -7049,22 +10901,17 @@ index 000000000000..435b5c7bac9a +#define SURFACE_SAM_SSH_MAX_RQST_RESPONSE (255 - 4) + +/* -+ * The number of (lower) bits of the request ID (RQID) reserved for events. -+ * These bits may only be used exclusively for events sent from the EC to the -+ * host. ++ * The number of reserved event IDs, used for registering an SSH event ++ * handler. Valid event IDs are numbers below or equal to this value, with ++ * exception of zero, which is not an event ID. Thus, this is also the ++ * absolute maximum number of event handlers that can be registered. + */ -+#define SURFACE_SAM_SSH_RQID_EVENT_BITS 5 ++#define SURFACE_SAM_SSH_NUM_EVENTS 0x22 + +/* -+ * Special event-handler delay value indicating that the corresponding event -+ * should be handled immediately in the interrupt and not be relayed through -+ * the workqueue. Intended for low-latency events, such as keyboard events. ++ * The number of communication channels used in the protocol. + */ -+#define SURFACE_SAM_SSH_EVENT_IMMEDIATE ((unsigned long) -1) -+ -+ -+#define SURFACE_SAM_PRIORITY_NORMAL 1 -+#define SURFACE_SAM_PRIORITY_HIGH 2 ++#define SURFACE_SAM_SSH_NUM_CHANNELS 2 + + +struct surface_sam_ssh_buf { @@ -7077,53 +10924,581 @@ index 000000000000..435b5c7bac9a + u8 tc; // target category + u8 cid; // command ID + u8 iid; // instance ID -+ u8 pri; // priority -+ u8 snc; // expect response flag -+ u8 cdl; // command data length (length of payload) ++ u8 chn; // channel ++ u8 snc; // expect response flag (bool: 0/1) ++ u16 cdl; // command data length (length of payload) + u8 *pld; // pointer to payload of length cdl +}; + ++// TODO: remove rqid on external api +struct surface_sam_ssh_event { + u16 rqid; // event type/source ID + u8 tc; // target category + u8 cid; // command ID + u8 iid; // instance ID -+ u8 pri; // priority ++ u8 chn; // channel + u8 len; // length of payload + u8 *pld; // payload of length len +}; + + -+typedef int (*surface_sam_ssh_event_handler_fn)(struct surface_sam_ssh_event *event, void *data); -+typedef unsigned long (*surface_sam_ssh_event_handler_delay)(struct surface_sam_ssh_event *event, void *data); -+ +int surface_sam_ssh_consumer_register(struct device *consumer); + ++int surface_sam_ssh_notifier_register(struct ssam_event_notifier *n); ++int surface_sam_ssh_notifier_unregister(struct ssam_event_notifier *n); ++ +int surface_sam_ssh_rqst(const struct surface_sam_ssh_rqst *rqst, struct surface_sam_ssh_buf *result); + -+int surface_sam_ssh_enable_event_source(u8 tc, u8 unknown, u16 rqid); -+int surface_sam_ssh_disable_event_source(u8 tc, u8 unknown, u16 rqid); -+int surface_sam_ssh_remove_event_handler(u16 rqid); ++#endif /* _SURFACE_SAM_SSH_H */ +diff --git a/drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h b/drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h +new file mode 100644 +index 0000000000000..801c60205128c +--- /dev/null ++++ b/drivers/platform/x86/surface_sam/surface_sam_ssh_trace.h +@@ -0,0 +1,536 @@ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM surface_sam_ssh + -+int surface_sam_ssh_set_delayed_event_handler(u16 rqid, -+ surface_sam_ssh_event_handler_fn fn, -+ surface_sam_ssh_event_handler_delay delay, -+ void *data); ++#if !defined(_SURFACE_SAM_SSH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _SURFACE_SAM_SSH_TRACE_H + -+static inline int surface_sam_ssh_set_event_handler(u16 rqid, surface_sam_ssh_event_handler_fn fn, void *data) ++#include ++ ++#include "surface_sam_ssh.h" ++ ++ ++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_SEQ); ++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_DATA_NSQ); ++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_ACK); ++TRACE_DEFINE_ENUM(SSH_FRAME_TYPE_NAK); ++ ++TRACE_DEFINE_ENUM(SSH_PACKET_TY_FLUSH_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_TY_SEQUENCED_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_TY_BLOCKING_BIT); ++ ++TRACE_DEFINE_ENUM(SSH_PACKET_TY_FLUSH); ++TRACE_DEFINE_ENUM(SSH_PACKET_TY_SEQUENCED); ++TRACE_DEFINE_ENUM(SSH_PACKET_TY_BLOCKING); ++ ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_LOCKED_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_QUEUED_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_PENDING_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTING_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_TRANSMITTED_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_ACKED_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_CANCELED_BIT); ++TRACE_DEFINE_ENUM(SSH_PACKET_SF_COMPLETED_BIT); ++ ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_LOCKED_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_QUEUED_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_PENDING_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTING_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_TRANSMITTED_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_RSPRCVD_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_CANCELED_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_SF_COMPLETED_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_TY_FLUSH_BIT); ++TRACE_DEFINE_ENUM(SSH_REQUEST_TY_HAS_RESPONSE_BIT); ++ ++TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_SF_MASK); ++TRACE_DEFINE_ENUM(SSH_REQUEST_FLAGS_TY_MASK); ++ ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SAM); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAT); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TMP); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_PMC); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_FAN); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_PoM); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_DBG); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_KBD); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_FWU); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_UNI); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_LPC); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCL); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SFL); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_KIP); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_EXT); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BLD); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BAS); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SEN); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SRQ); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_MCU); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_HID); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TCH); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_BKL); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_TAM); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_ACC); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_UFI); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_USC); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_PEN); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_VID); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_AUD); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_SMC); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_KPD); ++TRACE_DEFINE_ENUM(SSAM_SSH_TC_REG); ++ ++ ++#define SSAM_PTR_UID_LEN 9 ++#define SSAM_U8_FIELD_NOT_APPLICABLE ((u16)-1) ++#define SSAM_SEQ_NOT_APPLICABLE ((u16)-1) ++#define SSAM_RQID_NOT_APPLICABLE ((u32)-1) ++#define SSAM_SSH_TC_NOT_APPLICABLE 0 ++ ++ ++#ifndef _SURFACE_SAM_SSH_TRACE_HELPERS ++#define _SURFACE_SAM_SSH_TRACE_HELPERS ++ ++static inline void ssam_trace_ptr_uid(const void *ptr, char* uid_str) +{ -+ return surface_sam_ssh_set_delayed_event_handler(rqid, fn, NULL, data); ++ char buf[2 * sizeof(void*) + 1]; ++ ++ snprintf(buf, ARRAY_SIZE(buf), "%p", ptr); ++ memcpy(uid_str, &buf[ARRAY_SIZE(buf) - SSAM_PTR_UID_LEN], ++ SSAM_PTR_UID_LEN); +} + ++static inline u16 ssam_trace_get_packet_seq(const struct ssh_packet *p) ++{ ++ if (!p->data || p->data_length < SSH_MESSAGE_LENGTH(0)) ++ return SSAM_SEQ_NOT_APPLICABLE; + -+#endif /* _SURFACE_SAM_SSH_H */ ++ return p->data[SSH_MSGOFFSET_FRAME(seq)]; ++} ++ ++static inline u32 ssam_trace_get_request_id(const struct ssh_packet *p) ++{ ++ if (!p->data || p->data_length < SSH_COMMAND_MESSAGE_LENGTH(0)) ++ return SSAM_RQID_NOT_APPLICABLE; ++ ++ return get_unaligned_le16(&p->data[SSH_MSGOFFSET_COMMAND(rqid)]); ++} ++ ++static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p) ++{ ++ if (!p->data || p->data_length < SSH_COMMAND_MESSAGE_LENGTH(0)) ++ return SSAM_SSH_TC_NOT_APPLICABLE; ++ ++ return get_unaligned_le16(&p->data[SSH_MSGOFFSET_COMMAND(tc)]); ++} ++ ++#endif /* _SURFACE_SAM_SSH_TRACE_HELPERS */ ++ ++#define ssam_trace_get_command_field_u8(packet, field) \ ++ ((!packet || packet->data_length < SSH_COMMAND_MESSAGE_LENGTH(0)) \ ++ ? 0 : p->data[SSH_MSGOFFSET_COMMAND(field)]) ++ ++#define ssam_show_generic_u8_field(value) \ ++ __print_symbolic(value, \ ++ { SSAM_U8_FIELD_NOT_APPLICABLE, "N/A" } \ ++ ) ++ ++ ++#define ssam_show_frame_type(ty) \ ++ __print_symbolic(ty, \ ++ { SSH_FRAME_TYPE_DATA_SEQ, "DSEQ" }, \ ++ { SSH_FRAME_TYPE_DATA_NSQ, "DNSQ" }, \ ++ { SSH_FRAME_TYPE_ACK, "ACK" }, \ ++ { SSH_FRAME_TYPE_NAK, "NAK" } \ ++ ) ++ ++#define ssam_show_packet_type(type) \ ++ __print_flags(type, "", \ ++ { SSH_PACKET_TY_FLUSH, "F" }, \ ++ { SSH_PACKET_TY_SEQUENCED, "S" }, \ ++ { SSH_PACKET_TY_BLOCKING, "B" } \ ++ ) ++ ++#define ssam_show_packet_state(state) \ ++ __print_flags(state, "", \ ++ { BIT(SSH_PACKET_SF_LOCKED_BIT), "L" }, \ ++ { BIT(SSH_PACKET_SF_QUEUED_BIT), "Q" }, \ ++ { BIT(SSH_PACKET_SF_PENDING_BIT), "P" }, \ ++ { BIT(SSH_PACKET_SF_TRANSMITTING_BIT), "S" }, \ ++ { BIT(SSH_PACKET_SF_TRANSMITTED_BIT), "T" }, \ ++ { BIT(SSH_PACKET_SF_ACKED_BIT), "A" }, \ ++ { BIT(SSH_PACKET_SF_CANCELED_BIT), "C" }, \ ++ { BIT(SSH_PACKET_SF_COMPLETED_BIT), "F" } \ ++ ) ++ ++#define ssam_show_packet_seq(seq) \ ++ __print_symbolic(seq, \ ++ { SSAM_SEQ_NOT_APPLICABLE, "N/A" } \ ++ ) ++ ++ ++#define ssam_show_request_type(flags) \ ++ __print_flags(flags & SSH_REQUEST_FLAGS_TY_MASK, "", \ ++ { BIT(SSH_REQUEST_TY_FLUSH_BIT), "F" }, \ ++ { BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT), "R" } \ ++ ) ++ ++#define ssam_show_request_state(flags) \ ++ __print_flags(flags & SSH_REQUEST_FLAGS_SF_MASK, "", \ ++ { BIT(SSH_REQUEST_SF_LOCKED_BIT), "L" }, \ ++ { BIT(SSH_REQUEST_SF_QUEUED_BIT), "Q" }, \ ++ { BIT(SSH_REQUEST_SF_PENDING_BIT), "P" }, \ ++ { BIT(SSH_REQUEST_SF_TRANSMITTING_BIT), "S" }, \ ++ { BIT(SSH_REQUEST_SF_TRANSMITTED_BIT), "T" }, \ ++ { BIT(SSH_REQUEST_SF_RSPRCVD_BIT), "A" }, \ ++ { BIT(SSH_REQUEST_SF_CANCELED_BIT), "C" }, \ ++ { BIT(SSH_REQUEST_SF_COMPLETED_BIT), "F" } \ ++ ) ++ ++#define ssam_show_request_id(rqid) \ ++ __print_symbolic(rqid, \ ++ { SSAM_RQID_NOT_APPLICABLE, "N/A" } \ ++ ) ++ ++#define ssam_show_ssh_tc(rqid) \ ++ __print_symbolic(rqid, \ ++ { SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \ ++ { SSAM_SSH_TC_SAM, "SAM" }, \ ++ { SSAM_SSH_TC_BAT, "BAT" }, \ ++ { SSAM_SSH_TC_TMP, "TMP" }, \ ++ { SSAM_SSH_TC_PMC, "PMC" }, \ ++ { SSAM_SSH_TC_FAN, "FAN" }, \ ++ { SSAM_SSH_TC_PoM, "PoM" }, \ ++ { SSAM_SSH_TC_DBG, "DBG" }, \ ++ { SSAM_SSH_TC_KBD, "KBD" }, \ ++ { SSAM_SSH_TC_FWU, "FWU" }, \ ++ { SSAM_SSH_TC_UNI, "UNI" }, \ ++ { SSAM_SSH_TC_LPC, "LPC" }, \ ++ { SSAM_SSH_TC_TCL, "TCL" }, \ ++ { SSAM_SSH_TC_SFL, "SFL" }, \ ++ { SSAM_SSH_TC_KIP, "KIP" }, \ ++ { SSAM_SSH_TC_EXT, "EXT" }, \ ++ { SSAM_SSH_TC_BLD, "BLD" }, \ ++ { SSAM_SSH_TC_BAS, "BAS" }, \ ++ { SSAM_SSH_TC_SEN, "SEN" }, \ ++ { SSAM_SSH_TC_SRQ, "SRQ" }, \ ++ { SSAM_SSH_TC_MCU, "MCU" }, \ ++ { SSAM_SSH_TC_HID, "HID" }, \ ++ { SSAM_SSH_TC_TCH, "TCH" }, \ ++ { SSAM_SSH_TC_BKL, "BKL" }, \ ++ { SSAM_SSH_TC_TAM, "TAM" }, \ ++ { SSAM_SSH_TC_ACC, "ACC" }, \ ++ { SSAM_SSH_TC_UFI, "UFI" }, \ ++ { SSAM_SSH_TC_USC, "USC" }, \ ++ { SSAM_SSH_TC_PEN, "PEN" }, \ ++ { SSAM_SSH_TC_VID, "VID" }, \ ++ { SSAM_SSH_TC_AUD, "AUD" }, \ ++ { SSAM_SSH_TC_SMC, "SMC" }, \ ++ { SSAM_SSH_TC_KPD, "KPD" }, \ ++ { SSAM_SSH_TC_REG, "REG" } \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_frame_class, ++ TP_PROTO(const struct ssh_frame *frame), ++ ++ TP_ARGS(frame), ++ ++ TP_STRUCT__entry( ++ __field(u8, type) ++ __field(u8, seq) ++ __field(u16, len) ++ ), ++ ++ TP_fast_assign( ++ __entry->type = frame->type; ++ __entry->seq = frame->seq; ++ __entry->len = get_unaligned_le16(&frame->len); ++ ), ++ ++ TP_printk("ty=%s, seq=0x%02x, len=%u", ++ ssam_show_frame_type(__entry->type), ++ __entry->seq, ++ __entry->len ++ ) ++); ++ ++#define DEFINE_SSAM_FRAME_EVENT(name) \ ++ DEFINE_EVENT(ssam_frame_class, ssam_##name, \ ++ TP_PROTO(const struct ssh_frame *frame), \ ++ TP_ARGS(frame) \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_command_class, ++ TP_PROTO(const struct ssh_command *cmd, u16 len), ++ ++ TP_ARGS(cmd, len), ++ ++ TP_STRUCT__entry( ++ __field(u16, rqid) ++ __field(u16, len) ++ __field(u8, tc) ++ __field(u8, cid) ++ __field(u8, iid) ++ ), ++ ++ TP_fast_assign( ++ __entry->rqid = get_unaligned_le16(&cmd->rqid); ++ __entry->tc = cmd->tc; ++ __entry->cid = cmd->cid; ++ __entry->iid = cmd->iid; ++ __entry->len = len; ++ ), ++ ++ TP_printk("rqid=0x%04x, tc=%s, cid=0x%02x, iid=0x%02x, len=%u", ++ __entry->rqid, ++ ssam_show_ssh_tc(__entry->tc), ++ __entry->cid, ++ __entry->iid, ++ __entry->len ++ ) ++); ++ ++#define DEFINE_SSAM_COMMAND_EVENT(name) \ ++ DEFINE_EVENT(ssam_command_class, ssam_##name, \ ++ TP_PROTO(const struct ssh_command *cmd, u16 len), \ ++ TP_ARGS(cmd, len) \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_packet_class, ++ TP_PROTO(const struct ssh_packet *packet), ++ ++ TP_ARGS(packet), ++ ++ TP_STRUCT__entry( ++ __array(char, uid, SSAM_PTR_UID_LEN) ++ __field(u8, type) ++ __field(u8, priority) ++ __field(u16, length) ++ __field(unsigned long, state) ++ __field(u16, seq) ++ ), ++ ++ TP_fast_assign( ++ ssam_trace_ptr_uid(packet, __entry->uid); ++ __entry->type = packet->type; ++ __entry->priority = READ_ONCE(packet->priority); ++ __entry->length = packet->data_length; ++ __entry->state = READ_ONCE(packet->state); ++ __entry->seq = ssam_trace_get_packet_seq(packet); ++ ), ++ ++ TP_printk("uid=%s, seq=%s, ty=%s, pri=0x%02x, len=%u, sta=%s", ++ __entry->uid, ++ ssam_show_packet_seq(__entry->seq), ++ ssam_show_packet_type(__entry->type), ++ __entry->priority, ++ __entry->length, ++ ssam_show_packet_state(__entry->state) ++ ) ++); ++ ++#define DEFINE_SSAM_PACKET_EVENT(name) \ ++ DEFINE_EVENT(ssam_packet_class, ssam_##name, \ ++ TP_PROTO(const struct ssh_packet *packet), \ ++ TP_ARGS(packet) \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_packet_status_class, ++ TP_PROTO(const struct ssh_packet *packet, int status), ++ ++ TP_ARGS(packet, status), ++ ++ TP_STRUCT__entry( ++ __array(char, uid, SSAM_PTR_UID_LEN) ++ __field(u8, type) ++ __field(u8, priority) ++ __field(u16, length) ++ __field(unsigned long, state) ++ __field(u16, seq) ++ __field(int, status) ++ ), ++ ++ TP_fast_assign( ++ ssam_trace_ptr_uid(packet, __entry->uid); ++ __entry->type = packet->type; ++ __entry->priority = READ_ONCE(packet->priority); ++ __entry->length = packet->data_length; ++ __entry->state = READ_ONCE(packet->state); ++ __entry->seq = ssam_trace_get_packet_seq(packet); ++ __entry->status = status; ++ ), ++ ++ TP_printk("uid=%s, seq=%s, ty=%s, pri=0x%02x, len=%u, sta=%s, status=%d", ++ __entry->uid, ++ ssam_show_packet_seq(__entry->seq), ++ ssam_show_packet_type(__entry->type), ++ __entry->priority, ++ __entry->length, ++ ssam_show_packet_state(__entry->state), ++ __entry->status ++ ) ++); ++ ++#define DEFINE_SSAM_PACKET_STATUS_EVENT(name) \ ++ DEFINE_EVENT(ssam_packet_status_class, ssam_##name, \ ++ TP_PROTO(const struct ssh_packet *packet, int status), \ ++ TP_ARGS(packet, status) \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_request_class, ++ TP_PROTO(const struct ssh_request *request), ++ ++ TP_ARGS(request), ++ ++ TP_STRUCT__entry( ++ __array(char, uid, SSAM_PTR_UID_LEN) ++ __field(unsigned long, state) ++ __field(u32, rqid) ++ __field(u8, tc) ++ __field(u16, cid) ++ __field(u16, iid) ++ ), ++ ++ TP_fast_assign( ++ const struct ssh_packet *p = &request->packet; ++ ++ // use packet for UID so we can match requests to packets ++ ssam_trace_ptr_uid(p, __entry->uid); ++ __entry->state = READ_ONCE(request->state); ++ __entry->rqid = ssam_trace_get_request_id(p); ++ __entry->tc = ssam_trace_get_request_tc(p); ++ __entry->cid = ssam_trace_get_command_field_u8(p, cid); ++ __entry->iid = ssam_trace_get_command_field_u8(p, iid); ++ ), ++ ++ TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s", ++ __entry->uid, ++ ssam_show_request_id(__entry->rqid), ++ ssam_show_request_type(__entry->state), ++ ssam_show_request_state(__entry->state), ++ ssam_show_ssh_tc(__entry->tc), ++ ssam_show_generic_u8_field(__entry->cid), ++ ssam_show_generic_u8_field(__entry->iid) ++ ) ++); ++ ++#define DEFINE_SSAM_REQUEST_EVENT(name) \ ++ DEFINE_EVENT(ssam_request_class, ssam_##name, \ ++ TP_PROTO(const struct ssh_request *request), \ ++ TP_ARGS(request) \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_request_status_class, ++ TP_PROTO(const struct ssh_request *request, int status), ++ ++ TP_ARGS(request, status), ++ ++ TP_STRUCT__entry( ++ __array(char, uid, SSAM_PTR_UID_LEN) ++ __field(unsigned long, state) ++ __field(u32, rqid) ++ __field(u8, tc) ++ __field(u16, cid) ++ __field(u16, iid) ++ __field(int, status) ++ ), ++ ++ TP_fast_assign( ++ const struct ssh_packet *p = &request->packet; ++ ++ // use packet for UID so we can match requests to packets ++ ssam_trace_ptr_uid(p, __entry->uid); ++ __entry->state = READ_ONCE(request->state); ++ __entry->rqid = ssam_trace_get_request_id(p); ++ __entry->tc = ssam_trace_get_request_tc(p); ++ __entry->cid = ssam_trace_get_command_field_u8(p, cid); ++ __entry->iid = ssam_trace_get_command_field_u8(p, iid); ++ __entry->status = status; ++ ), ++ ++ TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s, status=%d", ++ __entry->uid, ++ ssam_show_request_id(__entry->rqid), ++ ssam_show_request_type(__entry->state), ++ ssam_show_request_state(__entry->state), ++ ssam_show_ssh_tc(__entry->tc), ++ ssam_show_generic_u8_field(__entry->cid), ++ ssam_show_generic_u8_field(__entry->iid), ++ __entry->status ++ ) ++); ++ ++#define DEFINE_SSAM_REQUEST_STATUS_EVENT(name) \ ++ DEFINE_EVENT(ssam_request_status_class, ssam_##name, \ ++ TP_PROTO(const struct ssh_request *request, int status),\ ++ TP_ARGS(request, status) \ ++ ) ++ ++ ++DECLARE_EVENT_CLASS(ssam_generic_uint_class, ++ TP_PROTO(const char* property, unsigned int value), ++ ++ TP_ARGS(property, value), ++ ++ TP_STRUCT__entry( ++ __string(property, property) ++ __field(unsigned int, value) ++ ), ++ ++ TP_fast_assign( ++ __assign_str(property, property); ++ __entry->value = value; ++ ), ++ ++ TP_printk("%s=%u", __get_str(property), __entry->value) ++); ++ ++#define DEFINE_SSAM_GENERIC_UINT_EVENT(name) \ ++ DEFINE_EVENT(ssam_generic_uint_class, ssam_##name, \ ++ TP_PROTO(const char* property, unsigned int value), \ ++ TP_ARGS(property, value) \ ++ ) ++ ++ ++DEFINE_SSAM_FRAME_EVENT(rx_frame_received); ++DEFINE_SSAM_COMMAND_EVENT(rx_response_received); ++DEFINE_SSAM_COMMAND_EVENT(rx_event_received); ++ ++DEFINE_SSAM_PACKET_EVENT(packet_release); ++DEFINE_SSAM_PACKET_EVENT(packet_submit); ++DEFINE_SSAM_PACKET_EVENT(packet_resubmit); ++DEFINE_SSAM_PACKET_EVENT(packet_timeout); ++DEFINE_SSAM_PACKET_EVENT(packet_cancel); ++DEFINE_SSAM_PACKET_STATUS_EVENT(packet_complete); ++DEFINE_SSAM_GENERIC_UINT_EVENT(ptl_timeout_reap); ++ ++DEFINE_SSAM_REQUEST_EVENT(request_submit); ++DEFINE_SSAM_REQUEST_EVENT(request_timeout); ++DEFINE_SSAM_REQUEST_EVENT(request_cancel); ++DEFINE_SSAM_REQUEST_STATUS_EVENT(request_complete); ++DEFINE_SSAM_GENERIC_UINT_EVENT(rtl_timeout_reap); ++ ++DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_ack_packet); ++DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_nak_packet); ++DEFINE_SSAM_PACKET_EVENT(ei_tx_drop_dsq_packet); ++DEFINE_SSAM_PACKET_STATUS_EVENT(ei_tx_fail_write); ++DEFINE_SSAM_PACKET_EVENT(ei_tx_corrupt_data); ++DEFINE_SSAM_GENERIC_UINT_EVENT(ei_rx_corrupt_syn); ++DEFINE_SSAM_FRAME_EVENT(ei_rx_corrupt_data); ++DEFINE_SSAM_REQUEST_EVENT(ei_rx_drop_response); ++ ++#endif /* _SURFACE_SAM_SSH_TRACE_H */ ++ ++/* This part must be outside protection */ ++#undef TRACE_INCLUDE_PATH ++#undef TRACE_INCLUDE_FILE ++ ++#define TRACE_INCLUDE_PATH . ++#define TRACE_INCLUDE_FILE surface_sam_ssh_trace ++ ++#include diff --git a/drivers/platform/x86/surface_sam/surface_sam_vhf.c b/drivers/platform/x86/surface_sam/surface_sam_vhf.c new file mode 100644 -index 000000000000..a00763805eca +index 0000000000000..984035c55d63a --- /dev/null +++ b/drivers/platform/x86/surface_sam/surface_sam_vhf.c -@@ -0,0 +1,270 @@ -+// SPDX-License-Identifier: GPL-2.0 +@@ -0,0 +1,261 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Virtual HID Framework (VHF) driver for input events via SAM. + * Used for keyboard input events on the Surface Laptops. @@ -7143,21 +11518,11 @@ index 000000000000..a00763805eca + +#define VHF_INPUT_NAME "Microsoft Virtual HID Framework Device" + -+/* -+ * Request ID for VHF events. This value is based on the output of the Surface -+ * EC and should not be changed. -+ */ -+#define SAM_EVENT_VHF_RQID 0x0001 -+#define SAM_EVENT_VHF_TC 0x08 -+ -+ -+struct vhf_evtctx { -+ struct device *dev; -+ struct hid_device *hid; -+}; + +struct vhf_drvdata { -+ struct vhf_evtctx event_ctx; ++ struct platform_device *dev; ++ struct hid_device *hid; ++ struct ssam_event_notifier notif; +}; + + @@ -7294,14 +11659,19 @@ index 000000000000..a00763805eca + return hid; +} + -+static int vhf_event_handler(struct surface_sam_ssh_event *event, void *data) ++static u32 vhf_event_handler(struct ssam_notifier_block *nb, const struct ssam_event *event) +{ -+ struct vhf_evtctx *ctx = (struct vhf_evtctx *)data; ++ struct vhf_drvdata *drvdata = container_of(nb, struct vhf_drvdata, notif.base); ++ int status; + -+ if (event->tc == 0x08 && (event->cid == 0x03 || event->cid == 0x04)) -+ return hid_input_report(ctx->hid, HID_INPUT_REPORT, event->pld, event->len, 1); ++ if (event->target_category != 0x08) ++ return 0; ++ ++ if (event->command_id == 0x03 || event->command_id == 0x04) { ++ status = hid_input_report(drvdata->hid, HID_INPUT_REPORT, (u8 *)&event->data[0], event->length, 1); ++ return ssam_notifier_from_errno(status) | SSAM_NOTIF_HANDLED; ++ } + -+ dev_warn(ctx->dev, "unsupported event (tc = %d, cid = %d)\n", event->tc, event->cid); + return 0; +} + @@ -7330,26 +11700,24 @@ index 000000000000..a00763805eca + if (status) + goto err_add_hid; + -+ drvdata->event_ctx.dev = &pdev->dev; -+ drvdata->event_ctx.hid = hid; ++ drvdata->dev = pdev; ++ drvdata->hid = hid; ++ ++ drvdata->notif.base.priority = 1; ++ drvdata->notif.base.fn = vhf_event_handler; ++ drvdata->notif.event.reg = SSAM_EVENT_REGISTRY_SAM; ++ drvdata->notif.event.id.target_category = SSAM_SSH_TC_KBD; ++ drvdata->notif.event.id.instance = 0; ++ drvdata->notif.event.flags = 0; + + platform_set_drvdata(pdev, drvdata); + -+ status = surface_sam_ssh_set_event_handler( -+ SAM_EVENT_VHF_RQID, -+ vhf_event_handler, -+ &drvdata->event_ctx); ++ status = surface_sam_ssh_notifier_register(&drvdata->notif); + if (status) + goto err_add_hid; + -+ status = surface_sam_ssh_enable_event_source(SAM_EVENT_VHF_TC, 0x01, SAM_EVENT_VHF_RQID); -+ if (status) -+ goto err_event_source; -+ + return 0; + -+err_event_source: -+ surface_sam_ssh_remove_event_handler(SAM_EVENT_VHF_RQID); +err_add_hid: + hid_destroy_device(hid); + platform_set_drvdata(pdev, NULL); @@ -7362,10 +11730,8 @@ index 000000000000..a00763805eca +{ + struct vhf_drvdata *drvdata = platform_get_drvdata(pdev); + -+ surface_sam_ssh_disable_event_source(SAM_EVENT_VHF_TC, 0x01, SAM_EVENT_VHF_RQID); -+ surface_sam_ssh_remove_event_handler(SAM_EVENT_VHF_RQID); -+ -+ hid_destroy_device(drvdata->event_ctx.hid); ++ surface_sam_ssh_notifier_unregister(&drvdata->notif); ++ hid_destroy_device(drvdata->hid); + kfree(drvdata); + + platform_set_drvdata(pdev, NULL); @@ -7384,7 +11750,7 @@ index 000000000000..a00763805eca + .remove = surface_sam_vhf_remove, + .driver = { + .name = "surface_sam_vhf", -+ .acpi_match_table = ACPI_PTR(surface_sam_vhf_match), ++ .acpi_match_table = surface_sam_vhf_match, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; @@ -7392,9 +11758,9 @@ index 000000000000..a00763805eca + +MODULE_AUTHOR("Maximilian Luz "); +MODULE_DESCRIPTION("Virtual HID Framework Driver for 5th Generation Surface Devices"); -+MODULE_LICENSE("GPL v2"); ++MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c -index c66a04d24f1d..6b48fdfb2005 100644 +index c66a04d24f1d3..6b48fdfb20059 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -496,16 +496,97 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl) @@ -7548,5 +11914,5 @@ index c66a04d24f1d..6b48fdfb2005 100644 if (!ctrl->serdev) return -ENODEV; -- -2.26.2 +2.27.0 diff --git a/patches/4.19/0006-suspend.patch b/patches/4.19/0006-suspend.patch index a03b89c24..df5e36a48 100644 --- a/patches/4.19/0006-suspend.patch +++ b/patches/4.19/0006-suspend.patch @@ -1,7 +1,7 @@ -From 9fbb880aaed6b854547a6019cdfd2e78013f8fbb Mon Sep 17 00:00:00 2001 +From e853cd24e0d7909a3ae08521cc103a5f66c1df91 Mon Sep 17 00:00:00 2001 From: kitakar5525 <34676735+kitakar5525@users.noreply.github.com> Date: Sat, 28 Sep 2019 17:48:21 +0200 -Subject: [PATCH 06/10] suspend +Subject: [PATCH 6/8] suspend --- drivers/nvme/host/core.c | 36 ++++++++++++-- @@ -12,10 +12,10 @@ Subject: [PATCH 06/10] suspend 5 files changed, 162 insertions(+), 6 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c -index d5359c7c811a..04e1568b145a 100644 +index 0d60f2f8f3eec..6dcd37c10153f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c -@@ -1055,15 +1055,15 @@ static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl, +@@ -1068,15 +1068,15 @@ static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl, return id; } @@ -34,7 +34,7 @@ index d5359c7c811a..04e1568b145a 100644 c.features.fid = cpu_to_le32(fid); c.features.dword11 = cpu_to_le32(dword11); -@@ -1074,6 +1074,24 @@ static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword +@@ -1087,6 +1087,24 @@ static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword return ret; } @@ -59,7 +59,7 @@ index d5359c7c811a..04e1568b145a 100644 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) { u32 q_count = (*count - 1) | ((*count - 1) << 16); -@@ -3601,6 +3619,18 @@ static void nvme_free_ctrl(struct device *dev) +@@ -3613,6 +3631,18 @@ static void nvme_free_ctrl(struct device *dev) nvme_put_subsystem(subsys); } @@ -79,7 +79,7 @@ index d5359c7c811a..04e1568b145a 100644 * Initialize a NVMe controller structures. This needs to be called during * earliest initialization so that we have the initialized structured around diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h -index cc4273f11989..40192b661798 100644 +index cc4273f119894..40192b6617983 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -436,6 +436,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, @@ -104,7 +104,7 @@ index cc4273f11989..40192b661798 100644 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); int nvme_reset_ctrl(struct nvme_ctrl *ctrl); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c -index 3c68a5b35ec1..0cc7bea4eb70 100644 +index 3c68a5b35ec1b..0cc7bea4eb707 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -26,6 +26,7 @@ @@ -266,10 +266,10 @@ index 3c68a5b35ec1..0cc7bea4eb70 100644 .err_handler = &nvme_err_handler, }; diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c -index db2efa219028..0d1ef41abea0 100644 +index 6e50f84733b75..b03884b6bc6f8 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c -@@ -1180,6 +1180,26 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) +@@ -1170,6 +1170,26 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy, NULL, 0644); @@ -297,7 +297,7 @@ index db2efa219028..0d1ef41abea0 100644 static ssize_t link_state_show(struct device *dev, struct device_attribute *attr, diff --git a/include/linux/pci.h b/include/linux/pci.h -index b1f297f4b7b0..94ab2fc800d3 100644 +index b1f297f4b7b0b..94ab2fc800d30 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1486,8 +1486,10 @@ extern bool pcie_ports_native; @@ -312,5 +312,5 @@ index b1f297f4b7b0..94ab2fc800d3 100644 #ifdef CONFIG_PCIEAER -- -2.26.2 +2.27.0 diff --git a/patches/4.19/0007-ipts.patch b/patches/4.19/0007-ipts.patch index a84765738..5ca870c9c 100644 --- a/patches/4.19/0007-ipts.patch +++ b/patches/4.19/0007-ipts.patch @@ -1,7 +1,7 @@ -From ec6ee78e1b8700139edd50e62e3815d12e5a889e Mon Sep 17 00:00:00 2001 +From 1be5cc4b139b8ec070d12b786fad6fba41f4c34a Mon Sep 17 00:00:00 2001 From: Maximilian Luz Date: Sat, 28 Sep 2019 17:58:17 +0200 -Subject: [PATCH 07/10] ipts +Subject: [PATCH 7/8] ipts --- drivers/gpu/drm/i915/Makefile | 3 + @@ -89,7 +89,7 @@ Subject: [PATCH 07/10] ipts create mode 100644 include/linux/ipts.h diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile -index 5794f102f9b8..6ae0e91a213a 100644 +index 5794f102f9b8f..6ae0e91a213af 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -155,6 +155,9 @@ i915-y += dvo_ch7017.o \ @@ -103,7 +103,7 @@ index 5794f102f9b8..6ae0e91a213a 100644 i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o i915-$(CONFIG_DRM_I915_SELFTEST) += \ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c -index e063e98d1e82..99becb6aed68 100644 +index e063e98d1e82e..99becb6aed688 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -31,6 +31,7 @@ @@ -191,7 +191,7 @@ index e063e98d1e82..99becb6aed68 100644 int i915_debugfs_register(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c -index b0d76a7a0946..81fba8e5ab05 100644 +index b0d76a7a0946f..81fba8e5ab050 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -47,11 +47,12 @@ @@ -229,7 +229,7 @@ index b0d76a7a0946..81fba8e5ab05 100644 if (i915_gem_suspend(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h -index db2e9af49ae6..99bc0c92c411 100644 +index db2e9af49ae6f..99bc0c92c4111 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3232,6 +3232,9 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, @@ -243,7 +243,7 @@ index db2e9af49ae6..99bc0c92c411 100644 __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id) { diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c -index ef383fd42988..89da4ff09431 100644 +index ef383fd429885..89da4ff094312 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -472,6 +472,18 @@ static bool needs_preempt_context(struct drm_i915_private *i915) @@ -266,7 +266,7 @@ index ef383fd42988..89da4ff09431 100644 { struct i915_gem_context *ctx; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c -index 29877969310d..37a58b19ec3f 100644 +index b7c3982321369..adf168aed2fe3 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -36,6 +36,7 @@ @@ -287,7 +287,7 @@ index 29877969310d..37a58b19ec3f 100644 if (tasklet) tasklet_hi_schedule(&engine->execlists.tasklet); } -@@ -4122,7 +4126,8 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) +@@ -4123,7 +4127,8 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) { /* These are interrupts we'll toggle with the ring mask register */ uint32_t gt_interrupts[] = { @@ -298,7 +298,7 @@ index 29877969310d..37a58b19ec3f 100644 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c -index 295e981e4a39..84415814c007 100644 +index 295e981e4a398..84415814c0070 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -145,7 +145,10 @@ i915_param_named_unsafe(edp_vswing, int, 0400, @@ -314,7 +314,7 @@ index 295e981e4a39..84415814c007 100644 i915_param_named(guc_log_level, int, 0400, "GuC firmware logging level. Requires GuC to be loaded. " diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h -index 6c4d4a21474b..4ab800c3de6d 100644 +index 6c4d4a21474b5..4ab800c3de6d0 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -46,7 +46,7 @@ struct drm_printer; @@ -337,7 +337,7 @@ index 6c4d4a21474b..4ab800c3de6d 100644 #define MEMBER(T, member, ...) T member; struct i915_params { diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h -index 4121928a495e..8967376accf3 100644 +index 4121928a495e0..8967376accf30 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -69,6 +69,7 @@ struct intel_guc { @@ -349,7 +349,7 @@ index 4121928a495e..8967376accf3 100644 struct guc_preempt_work preempt_work[I915_NUM_ENGINES]; struct workqueue_struct *preempt_wq; diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c -index 4aa5e6463e7b..da80c5f17fee 100644 +index 4aa5e6463e7b7..da80c5f17feea 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -88,12 +88,17 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb) @@ -481,7 +481,7 @@ index 4aa5e6463e7b..da80c5f17fee 100644 #include "selftests/intel_guc.c" #endif diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h -index fb081cefef93..71fc7986585a 100644 +index fb081cefef935..71fc7986585ab 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/intel_guc_submission.h @@ -79,5 +79,9 @@ void intel_guc_submission_disable(struct intel_guc *guc); @@ -496,7 +496,7 @@ index fb081cefef93..71fc7986585a 100644 #endif diff --git a/drivers/gpu/drm/i915/intel_ipts.c b/drivers/gpu/drm/i915/intel_ipts.c new file mode 100644 -index 000000000000..c1199074924a +index 0000000000000..c1199074924a0 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_ipts.c @@ -0,0 +1,650 @@ @@ -1152,7 +1152,7 @@ index 000000000000..c1199074924a +} diff --git a/drivers/gpu/drm/i915/intel_ipts.h b/drivers/gpu/drm/i915/intel_ipts.h new file mode 100644 -index 000000000000..67f90b72f237 +index 0000000000000..67f90b72f2378 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_ipts.h @@ -0,0 +1,34 @@ @@ -1191,7 +1191,7 @@ index 000000000000..67f90b72f237 + +#endif //_INTEL_IPTS_H_ diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c -index 13e97faabaa7..a4af67d3d6ff 100644 +index 13e97faabaa74..a4af67d3d6ffd 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -164,9 +164,6 @@ @@ -1242,7 +1242,7 @@ index 13e97faabaa7..a4af67d3d6ff 100644 struct intel_context *ce) { diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h -index 4dfb78e3ec7e..32159231a16e 100644 +index 4dfb78e3ec7e4..32159231a16e7 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -106,4 +106,12 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv); @@ -1259,7 +1259,7 @@ index 4dfb78e3ec7e..32159231a16e 100644 + #endif /* _INTEL_LRC_H_ */ diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c -index 4a9f139e7b73..c137a57f6702 100644 +index 4a9f139e7b738..c137a57f67026 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -34,6 +34,7 @@ @@ -1291,7 +1291,7 @@ index 4a9f139e7b73..c137a57f6702 100644 static void pch_enable_backlight(const struct intel_crtc_state *crtc_state, diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig -index 3726eacdf65d..77263b5f5915 100644 +index 3726eacdf65de..77263b5f5915a 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -520,6 +520,7 @@ source "drivers/misc/ti-st/Kconfig" @@ -1303,7 +1303,7 @@ index 3726eacdf65d..77263b5f5915 100644 source "drivers/misc/mic/Kconfig" source "drivers/misc/genwqe/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile -index af22bbc3d00c..eb1eb0d58c32 100644 +index af22bbc3d00cb..eb1eb0d58c327 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -44,6 +44,7 @@ obj-y += lis3lv02d/ @@ -1316,7 +1316,7 @@ index af22bbc3d00c..eb1eb0d58c32 100644 obj-$(CONFIG_SRAM) += sram.o diff --git a/drivers/misc/ipts/Kconfig b/drivers/misc/ipts/Kconfig new file mode 100644 -index 000000000000..900d2c58ca74 +index 0000000000000..900d2c58ca74c --- /dev/null +++ b/drivers/misc/ipts/Kconfig @@ -0,0 +1,12 @@ @@ -1334,7 +1334,7 @@ index 000000000000..900d2c58ca74 +source "drivers/misc/ipts/companion/Kconfig" diff --git a/drivers/misc/ipts/Makefile b/drivers/misc/ipts/Makefile new file mode 100644 -index 000000000000..bb3982f48afc +index 0000000000000..bb3982f48afcb --- /dev/null +++ b/drivers/misc/ipts/Makefile @@ -0,0 +1,19 @@ @@ -1359,7 +1359,7 @@ index 000000000000..bb3982f48afc +obj-y += companion/ diff --git a/drivers/misc/ipts/companion.c b/drivers/misc/ipts/companion.c new file mode 100644 -index 000000000000..8f66b852f137 +index 0000000000000..8f66b852f1371 --- /dev/null +++ b/drivers/misc/ipts/companion.c @@ -0,0 +1,211 @@ @@ -1576,7 +1576,7 @@ index 000000000000..8f66b852f137 +} diff --git a/drivers/misc/ipts/companion.h b/drivers/misc/ipts/companion.h new file mode 100644 -index 000000000000..7a1e4b388c40 +index 0000000000000..7a1e4b388c40a --- /dev/null +++ b/drivers/misc/ipts/companion.h @@ -0,0 +1,25 @@ @@ -1607,7 +1607,7 @@ index 000000000000..7a1e4b388c40 +#endif // _IPTS_COMPANION_H_ diff --git a/drivers/misc/ipts/companion/Kconfig b/drivers/misc/ipts/companion/Kconfig new file mode 100644 -index 000000000000..ef17d9bb5242 +index 0000000000000..ef17d9bb5242f --- /dev/null +++ b/drivers/misc/ipts/companion/Kconfig @@ -0,0 +1,8 @@ @@ -1621,7 +1621,7 @@ index 000000000000..ef17d9bb5242 + If you have a Microsoft Surface using IPTS, select y or m here. diff --git a/drivers/misc/ipts/companion/Makefile b/drivers/misc/ipts/companion/Makefile new file mode 100644 -index 000000000000..b37f2f59937a +index 0000000000000..b37f2f59937a8 --- /dev/null +++ b/drivers/misc/ipts/companion/Makefile @@ -0,0 +1,2 @@ @@ -1629,7 +1629,7 @@ index 000000000000..b37f2f59937a +obj-$(CONFIG_INTEL_IPTS_SURFACE)+= ipts-surface.o diff --git a/drivers/misc/ipts/companion/ipts-surface.c b/drivers/misc/ipts/companion/ipts-surface.c new file mode 100644 -index 000000000000..a717dfcdfeba +index 0000000000000..a717dfcdfeba7 --- /dev/null +++ b/drivers/misc/ipts/companion/ipts-surface.c @@ -0,0 +1,157 @@ @@ -1792,7 +1792,7 @@ index 000000000000..a717dfcdfeba +IPTS_SURFACE_FIRMWARE("MSHW0137"); diff --git a/drivers/misc/ipts/dbgfs.c b/drivers/misc/ipts/dbgfs.c new file mode 100644 -index 000000000000..fd9388de17e7 +index 0000000000000..fd9388de17e78 --- /dev/null +++ b/drivers/misc/ipts/dbgfs.c @@ -0,0 +1,277 @@ @@ -2075,7 +2075,7 @@ index 000000000000..fd9388de17e7 +} diff --git a/drivers/misc/ipts/gfx.c b/drivers/misc/ipts/gfx.c new file mode 100644 -index 000000000000..b8900f514c75 +index 0000000000000..b8900f514c756 --- /dev/null +++ b/drivers/misc/ipts/gfx.c @@ -0,0 +1,180 @@ @@ -2261,7 +2261,7 @@ index 000000000000..b8900f514c75 +} diff --git a/drivers/misc/ipts/gfx.h b/drivers/misc/ipts/gfx.h new file mode 100644 -index 000000000000..2880e122e9f9 +index 0000000000000..2880e122e9f96 --- /dev/null +++ b/drivers/misc/ipts/gfx.h @@ -0,0 +1,25 @@ @@ -2292,7 +2292,7 @@ index 000000000000..2880e122e9f9 +#endif // _IPTS_GFX_H_ diff --git a/drivers/misc/ipts/hid.c b/drivers/misc/ipts/hid.c new file mode 100644 -index 000000000000..1b7ad2a774a8 +index 0000000000000..1b7ad2a774a86 --- /dev/null +++ b/drivers/misc/ipts/hid.c @@ -0,0 +1,469 @@ @@ -2767,7 +2767,7 @@ index 000000000000..1b7ad2a774a8 +} diff --git a/drivers/misc/ipts/hid.h b/drivers/misc/ipts/hid.h new file mode 100644 -index 000000000000..c943979e0198 +index 0000000000000..c943979e01983 --- /dev/null +++ b/drivers/misc/ipts/hid.h @@ -0,0 +1,21 @@ @@ -2794,7 +2794,7 @@ index 000000000000..c943979e0198 +#endif // _IPTS_HID_H_ diff --git a/drivers/misc/ipts/ipts.c b/drivers/misc/ipts/ipts.c new file mode 100644 -index 000000000000..dfafabf8dd94 +index 0000000000000..dfafabf8dd949 --- /dev/null +++ b/drivers/misc/ipts/ipts.c @@ -0,0 +1,62 @@ @@ -2862,7 +2862,7 @@ index 000000000000..dfafabf8dd94 +} diff --git a/drivers/misc/ipts/ipts.h b/drivers/misc/ipts/ipts.h new file mode 100644 -index 000000000000..32eb3ffd68a3 +index 0000000000000..32eb3ffd68a3b --- /dev/null +++ b/drivers/misc/ipts/ipts.h @@ -0,0 +1,172 @@ @@ -3040,7 +3040,7 @@ index 000000000000..32eb3ffd68a3 +#endif // _IPTS_H_ diff --git a/drivers/misc/ipts/kernel.c b/drivers/misc/ipts/kernel.c new file mode 100644 -index 000000000000..a2c43228e2c7 +index 0000000000000..a2c43228e2c7d --- /dev/null +++ b/drivers/misc/ipts/kernel.c @@ -0,0 +1,1047 @@ @@ -4093,7 +4093,7 @@ index 000000000000..a2c43228e2c7 +} diff --git a/drivers/misc/ipts/kernel.h b/drivers/misc/ipts/kernel.h new file mode 100644 -index 000000000000..7be45da01cfc +index 0000000000000..7be45da01cfc0 --- /dev/null +++ b/drivers/misc/ipts/kernel.h @@ -0,0 +1,17 @@ @@ -4116,7 +4116,7 @@ index 000000000000..7be45da01cfc +#endif // _IPTS_KERNEL_H_ diff --git a/drivers/misc/ipts/mei-msgs.h b/drivers/misc/ipts/mei-msgs.h new file mode 100644 -index 000000000000..036b74f7234e +index 0000000000000..036b74f7234ef --- /dev/null +++ b/drivers/misc/ipts/mei-msgs.h @@ -0,0 +1,901 @@ @@ -5023,7 +5023,7 @@ index 000000000000..036b74f7234e +#endif // _IPTS_MEI_MSGS_H_ diff --git a/drivers/misc/ipts/mei.c b/drivers/misc/ipts/mei.c new file mode 100644 -index 000000000000..03b5d747a728 +index 0000000000000..03b5d747a728f --- /dev/null +++ b/drivers/misc/ipts/mei.c @@ -0,0 +1,238 @@ @@ -5267,7 +5267,7 @@ index 000000000000..03b5d747a728 +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/ipts/msg-handler.c b/drivers/misc/ipts/msg-handler.c new file mode 100644 -index 000000000000..9431b1dfc6e0 +index 0000000000000..9431b1dfc6e06 --- /dev/null +++ b/drivers/misc/ipts/msg-handler.c @@ -0,0 +1,405 @@ @@ -5678,7 +5678,7 @@ index 000000000000..9431b1dfc6e0 +} diff --git a/drivers/misc/ipts/msg-handler.h b/drivers/misc/ipts/msg-handler.h new file mode 100644 -index 000000000000..eca4238adf4b +index 0000000000000..eca4238adf4b1 --- /dev/null +++ b/drivers/misc/ipts/msg-handler.h @@ -0,0 +1,28 @@ @@ -5712,7 +5712,7 @@ index 000000000000..eca4238adf4b +#endif /* _IPTS_MSG_HANDLER_H */ diff --git a/drivers/misc/ipts/params.c b/drivers/misc/ipts/params.c new file mode 100644 -index 000000000000..3ea76ca8342a +index 0000000000000..3ea76ca8342a9 --- /dev/null +++ b/drivers/misc/ipts/params.c @@ -0,0 +1,42 @@ @@ -5760,7 +5760,7 @@ index 000000000000..3ea76ca8342a + diff --git a/drivers/misc/ipts/params.h b/drivers/misc/ipts/params.h new file mode 100644 -index 000000000000..c20546bacb08 +index 0000000000000..c20546bacb086 --- /dev/null +++ b/drivers/misc/ipts/params.h @@ -0,0 +1,25 @@ @@ -5791,7 +5791,7 @@ index 000000000000..c20546bacb08 +#endif // _IPTS_PARAMS_H_ diff --git a/drivers/misc/ipts/resource.c b/drivers/misc/ipts/resource.c new file mode 100644 -index 000000000000..cfd212f2cac0 +index 0000000000000..cfd212f2cac09 --- /dev/null +++ b/drivers/misc/ipts/resource.c @@ -0,0 +1,291 @@ @@ -6088,7 +6088,7 @@ index 000000000000..cfd212f2cac0 +} diff --git a/drivers/misc/ipts/resource.h b/drivers/misc/ipts/resource.h new file mode 100644 -index 000000000000..27b9c17fcb89 +index 0000000000000..27b9c17fcb89a --- /dev/null +++ b/drivers/misc/ipts/resource.h @@ -0,0 +1,26 @@ @@ -6120,7 +6120,7 @@ index 000000000000..27b9c17fcb89 +#endif // _IPTS_RESOURCE_H_ diff --git a/drivers/misc/ipts/sensor-regs.h b/drivers/misc/ipts/sensor-regs.h new file mode 100644 -index 000000000000..c1afab48249b +index 0000000000000..c1afab48249b7 --- /dev/null +++ b/drivers/misc/ipts/sensor-regs.h @@ -0,0 +1,834 @@ @@ -6960,7 +6960,7 @@ index 000000000000..c1afab48249b +#endif // _IPTS_SENSOR_REGS_H_ diff --git a/drivers/misc/ipts/state.h b/drivers/misc/ipts/state.h new file mode 100644 -index 000000000000..ef73d28db47c +index 0000000000000..ef73d28db47cc --- /dev/null +++ b/drivers/misc/ipts/state.h @@ -0,0 +1,22 @@ @@ -6987,7 +6987,7 @@ index 000000000000..ef73d28db47c + +#endif // _IPTS_STATE_H_ diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h -index 2ac1dc5104b7..5daa857a4938 100644 +index 2ac1dc5104b7a..5daa857a49389 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -119,6 +119,7 @@ @@ -6999,7 +6999,7 @@ index 2ac1dc5104b7..5daa857a4938 100644 #define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c -index b4bf12f27caf..34f4338fa641 100644 +index b4bf12f27caf5..34f4338fa6417 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -86,6 +86,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { @@ -7012,7 +7012,7 @@ index b4bf12f27caf..34f4338fa641 100644 {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)}, diff --git a/include/linux/ipts-binary.h b/include/linux/ipts-binary.h new file mode 100644 -index 000000000000..98b54d74ff88 +index 0000000000000..98b54d74ff888 --- /dev/null +++ b/include/linux/ipts-binary.h @@ -0,0 +1,140 @@ @@ -7158,7 +7158,7 @@ index 000000000000..98b54d74ff88 +#endif // IPTS_BINARY_H diff --git a/include/linux/ipts-companion.h b/include/linux/ipts-companion.h new file mode 100644 -index 000000000000..de31f5e0b186 +index 0000000000000..de31f5e0b186b --- /dev/null +++ b/include/linux/ipts-companion.h @@ -0,0 +1,29 @@ @@ -7193,7 +7193,7 @@ index 000000000000..de31f5e0b186 +#endif // IPTS_COMPANION_H diff --git a/include/linux/ipts-gfx.h b/include/linux/ipts-gfx.h new file mode 100644 -index 000000000000..cb9d98fe96e4 +index 0000000000000..cb9d98fe96e4b --- /dev/null +++ b/include/linux/ipts-gfx.h @@ -0,0 +1,86 @@ @@ -7285,7 +7285,7 @@ index 000000000000..cb9d98fe96e4 +#endif // IPTS_GFX_H diff --git a/include/linux/ipts.h b/include/linux/ipts.h new file mode 100644 -index 000000000000..f229a3436851 +index 0000000000000..f229a34368516 --- /dev/null +++ b/include/linux/ipts.h @@ -0,0 +1,19 @@ @@ -7309,5 +7309,5 @@ index 000000000000..f229a3436851 + +#endif // IPTS_H -- -2.26.2 +2.27.0 diff --git a/patches/4.19/0008-wifi.patch b/patches/4.19/0008-wifi.patch new file mode 100644 index 000000000..33a96a63e --- /dev/null +++ b/patches/4.19/0008-wifi.patch @@ -0,0 +1,258 @@ +From 2a2a34591cb7a76dd7b21c6254005e93695c4836 Mon Sep 17 00:00:00 2001 +From: kitakar5525 <34676735+kitakar5525@users.noreply.github.com> +Date: Thu, 20 Feb 2020 16:51:11 +0900 +Subject: [PATCH 8/8] wifi + +--- + .../net/wireless/marvell/mwifiex/cfg80211.c | 26 ++++++ + drivers/net/wireless/marvell/mwifiex/pcie.c | 86 +++++++++++-------- + .../net/wireless/marvell/mwifiex/sta_cmd.c | 31 ++----- + 3 files changed, 83 insertions(+), 60 deletions(-) + +diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +index 650191db25cbe..dd487fc9c1a1e 100644 +--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c ++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +@@ -25,6 +25,11 @@ + static char *reg_alpha2; + module_param(reg_alpha2, charp, 0); + ++static bool allow_ps_mode; ++module_param(allow_ps_mode, bool, 0444); ++MODULE_PARM_DESC(allow_ps_mode, ++ "allow WiFi power management to be enabled. (default: disallowed)"); ++ + static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = { + { + .max = 3, .types = BIT(NL80211_IFTYPE_STATION) | +@@ -439,6 +444,27 @@ mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy, + + ps_mode = enabled; + ++ /* Allow ps_mode to be enabled only when allow_ps_mode is set ++ * (but always allow ps_mode to be disabled in case it gets enabled ++ * for unknown reason and you want to disable it) */ ++ if (ps_mode && !allow_ps_mode) { ++ dev_info(priv->adapter->dev, ++ "Request to enable ps_mode received but it's disallowed " ++ "by module parameter. Rejecting the request.\n"); ++ ++ /* Return negative value to inform userspace tools that setting ++ * power_save to be enabled is not permitted. */ ++ return -1; ++ } ++ ++ if (ps_mode) ++ dev_warn(priv->adapter->dev, ++ "WARN: Request to enable ps_mode received. Enabling it. " ++ "Disable it if you encounter connection instability.\n"); ++ else ++ dev_info(priv->adapter->dev, ++ "Request to disable ps_mode received. Disabling it.\n"); ++ + return mwifiex_drv_set_power(priv, &ps_mode); + } + +diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c +index 991b9cc180006..2464f536192cb 100644 +--- a/drivers/net/wireless/marvell/mwifiex/pcie.c ++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c +@@ -146,40 +146,45 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter) + * + * If already not suspended, this function allocates and sends a host + * sleep activate request to the firmware and turns off the traffic. ++ * ++ * XXX: ignoring all the above comment and just removes the card to ++ * fix S0ix and "AP scanning (sometimes) not working after suspend". ++ * Required code is extracted from mwifiex_pcie_remove(). + */ + static int mwifiex_pcie_suspend(struct device *dev) + { +- struct mwifiex_adapter *adapter; +- struct pcie_service_card *card; + struct pci_dev *pdev = to_pci_dev(dev); +- +- card = pci_get_drvdata(pdev); ++ struct pcie_service_card *card = pci_get_drvdata(pdev); ++ struct mwifiex_adapter *adapter; ++ struct mwifiex_private *priv; ++ const struct mwifiex_pcie_card_reg *reg; ++ u32 fw_status; ++ int ret; + + /* Might still be loading firmware */ + wait_for_completion(&card->fw_done); + + adapter = card->adapter; +- if (!adapter) { +- dev_err(dev, "adapter is not valid\n"); ++ if (!adapter || !adapter->priv_num) + return 0; +- } + +- mwifiex_enable_wake(adapter); ++ reg = card->pcie.reg; ++ if (reg) ++ ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status); ++ else ++ fw_status = -1; + +- /* Enable the Host Sleep */ +- if (!mwifiex_enable_hs(adapter)) { +- mwifiex_dbg(adapter, ERROR, +- "cmd: failed to suspend\n"); +- clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags); +- mwifiex_disable_wake(adapter); +- return -EFAULT; +- } ++ if (fw_status == FIRMWARE_READY_PCIE && !adapter->mfg_mode) { ++ mwifiex_deauthenticate_all(adapter); + +- flush_workqueue(adapter->workqueue); ++ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); + +- /* Indicate device suspended */ +- set_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); +- clear_bit(MWIFIEX_IS_HS_ENABLING, &adapter->work_flags); ++ mwifiex_disable_auto_ds(priv); ++ ++ mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN); ++ } ++ ++ mwifiex_remove_card(adapter); + + return 0; + } +@@ -191,33 +196,35 @@ static int mwifiex_pcie_suspend(struct device *dev) + * + * If already not resumed, this function turns on the traffic and + * sends a host sleep cancel request to the firmware. ++ * ++ * XXX: ignoring all the above comment and probes the card that was ++ * removed on suspend. Required code is extracted from mwifiex_pcie_probe(). + */ + static int mwifiex_pcie_resume(struct device *dev) + { +- struct mwifiex_adapter *adapter; +- struct pcie_service_card *card; + struct pci_dev *pdev = to_pci_dev(dev); ++ struct pcie_service_card *card = pci_get_drvdata(pdev); ++ int ret; + +- card = pci_get_drvdata(pdev); ++ pr_debug("info: vendor=0x%4.04X device=0x%4.04X rev=%d\n", ++ pdev->vendor, pdev->device, pdev->revision); + +- if (!card->adapter) { +- dev_err(dev, "adapter structure is not valid\n"); +- return 0; +- } ++ init_completion(&card->fw_done); + +- adapter = card->adapter; ++ card->dev = pdev; + +- if (!test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) { +- mwifiex_dbg(adapter, WARN, +- "Device already resumed\n"); +- return 0; ++ /* device tree node parsing and platform specific configuration */ ++ if (pdev->dev.of_node) { ++ ret = mwifiex_pcie_probe_of(&pdev->dev); ++ if (ret) ++ return ret; + } + +- clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags); +- +- mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), +- MWIFIEX_ASYNC_CMD); +- mwifiex_disable_wake(adapter); ++ if (mwifiex_add_card(card, &card->fw_done, &pcie_ops, ++ MWIFIEX_PCIE, &pdev->dev)) { ++ pr_err("%s failed\n", __func__); ++ return -1; ++ } + + return 0; + } +@@ -233,8 +240,13 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) + { + struct pcie_service_card *card; ++ struct pci_dev *parent_pdev = pci_upstream_bridge(pdev); + int ret; + ++ /* disable bridge_d3 to fix driver crashing after suspend on gen4+ ++ * Surface devices */ ++ parent_pdev->bridge_d3 = false; ++ + pr_debug("info: vendor=0x%4.04X device=0x%4.04X rev=%d\n", + pdev->vendor, pdev->device, pdev->revision); + +diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +index 4ed10cf82f9a4..410bef3d6a6eb 100644 +--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c ++++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +@@ -2254,7 +2254,6 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, + * - Function init (for first interface only) + * - Read MAC address (for first interface only) + * - Reconfigure Tx buffer size (for first interface only) +- * - Enable auto deep sleep (for first interface only) + * - Get Tx rate + * - Get Tx power + * - Set IBSS coalescing status +@@ -2267,7 +2266,6 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) + struct mwifiex_adapter *adapter = priv->adapter; + int ret; + struct mwifiex_ds_11n_amsdu_aggr_ctrl amsdu_aggr_ctrl; +- struct mwifiex_ds_auto_ds auto_ds; + enum state_11d_t state_11d; + struct mwifiex_ds_11n_tx_cfg tx_cfg; + u8 sdio_sp_rx_aggr_enable; +@@ -2339,16 +2337,10 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) + if (ret) + return -1; + +- if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { +- /* Enable IEEE PS by default */ +- priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP; +- ret = mwifiex_send_cmd(priv, +- HostCmd_CMD_802_11_PS_MODE_ENH, +- EN_AUTO_PS, BITMAP_STA_PS, NULL, +- true); +- if (ret) +- return -1; +- } ++ /* Not enabling ps_mode (IEEE power_save) by default. Enabling ++ * this causes connection instability, especially on 5GHz APs ++ * and eventually causes "firmware wakeup failed". Therefore, ++ * the relevant code was removed from here. */ + + if (drcs) { + adapter->drcs_enabled = true; +@@ -2395,17 +2387,10 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) + if (ret) + return -1; + +- if (!disable_auto_ds && first_sta && +- priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { +- /* Enable auto deep sleep */ +- auto_ds.auto_ds = DEEP_SLEEP_ON; +- auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME; +- ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH, +- EN_AUTO_PS, BITMAP_AUTO_DS, +- &auto_ds, true); +- if (ret) +- return -1; +- } ++ /* Not enabling auto deep sleep (auto_ds) by default. Enabling ++ * this reportedly causes "suspend/resume fails when not connected ++ * to an Access Point." Therefore, the relevant code was removed ++ * from here. */ + + if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) { + /* Send cmd to FW to enable/disable 11D function */ +-- +2.27.0 +