updating to mainline 4.14.15
This commit is contained in:
parent
a3e28e347b
commit
12be17a524
2
config
2
config
|
@ -1,6 +1,6 @@
|
||||||
#
|
#
|
||||||
# Automatically generated file; DO NOT EDIT.
|
# Automatically generated file; DO NOT EDIT.
|
||||||
# Linux/x86_64 4.14.14-jakeday Kernel Configuration
|
# Linux/x86_64 4.14.15-jakeday Kernel Configuration
|
||||||
#
|
#
|
||||||
CONFIG_64BIT=y
|
CONFIG_64BIT=y
|
||||||
CONFIG_X86_64=y
|
CONFIG_X86_64=y
|
||||||
|
|
|
@ -78,7 +78,7 @@ this protection comes at a cost:
|
||||||
non-PTI SYSCALL entry code, so requires mapping fewer
|
non-PTI SYSCALL entry code, so requires mapping fewer
|
||||||
things into the userspace page tables. The downside is
|
things into the userspace page tables. The downside is
|
||||||
that stacks must be switched at entry time.
|
that stacks must be switched at entry time.
|
||||||
d. Global pages are disabled for all kernel structures not
|
c. Global pages are disabled for all kernel structures not
|
||||||
mapped into both kernel and userspace page tables. This
|
mapped into both kernel and userspace page tables. This
|
||||||
feature of the MMU allows different processes to share TLB
|
feature of the MMU allows different processes to share TLB
|
||||||
entries mapping the kernel. Losing the feature means more
|
entries mapping the kernel. Losing the feature means more
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 14
|
PATCHLEVEL = 14
|
||||||
SUBLEVEL = 14
|
SUBLEVEL = 15
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Petit Gorille
|
NAME = Petit Gorille
|
||||||
|
|
||||||
|
|
|
@ -102,6 +102,15 @@ sio_pci_route(void)
|
||||||
alpha_mv.sys.sio.route_tab);
|
alpha_mv.sys.sio.route_tab);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool sio_pci_dev_irq_needs_level(const struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
|
||||||
|
(dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static unsigned int __init
|
static unsigned int __init
|
||||||
sio_collect_irq_levels(void)
|
sio_collect_irq_levels(void)
|
||||||
{
|
{
|
||||||
|
@ -110,8 +119,7 @@ sio_collect_irq_levels(void)
|
||||||
|
|
||||||
/* Iterate through the devices, collecting IRQ levels. */
|
/* Iterate through the devices, collecting IRQ levels. */
|
||||||
for_each_pci_dev(dev) {
|
for_each_pci_dev(dev) {
|
||||||
if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
|
if (!sio_pci_dev_irq_needs_level(dev))
|
||||||
(dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (dev->irq)
|
if (dev->irq)
|
||||||
|
@ -120,8 +128,7 @@ sio_collect_irq_levels(void)
|
||||||
return level_bits;
|
return level_bits;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init
|
static void __sio_fixup_irq_levels(unsigned int level_bits, bool reset)
|
||||||
sio_fixup_irq_levels(unsigned int level_bits)
|
|
||||||
{
|
{
|
||||||
unsigned int old_level_bits;
|
unsigned int old_level_bits;
|
||||||
|
|
||||||
|
@ -139,12 +146,21 @@ sio_fixup_irq_levels(unsigned int level_bits)
|
||||||
*/
|
*/
|
||||||
old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8);
|
old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8);
|
||||||
|
|
||||||
level_bits |= (old_level_bits & 0x71ff);
|
if (reset)
|
||||||
|
old_level_bits &= 0x71ff;
|
||||||
|
|
||||||
|
level_bits |= old_level_bits;
|
||||||
|
|
||||||
outb((level_bits >> 0) & 0xff, 0x4d0);
|
outb((level_bits >> 0) & 0xff, 0x4d0);
|
||||||
outb((level_bits >> 8) & 0xff, 0x4d1);
|
outb((level_bits >> 8) & 0xff, 0x4d1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
sio_fixup_irq_levels(unsigned int level_bits)
|
||||||
|
{
|
||||||
|
__sio_fixup_irq_levels(level_bits, true);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||||
{
|
{
|
||||||
|
@ -181,7 +197,14 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||||
const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5;
|
const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5;
|
||||||
int irq = COMMON_TABLE_LOOKUP, tmp;
|
int irq = COMMON_TABLE_LOOKUP, tmp;
|
||||||
tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
|
tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
|
||||||
return irq >= 0 ? tmp : -1;
|
|
||||||
|
irq = irq >= 0 ? tmp : -1;
|
||||||
|
|
||||||
|
/* Fixup IRQ level if an actual IRQ mapping is detected */
|
||||||
|
if (sio_pci_dev_irq_needs_level(dev) && irq >= 0)
|
||||||
|
__sio_fixup_irq_levels(1 << irq, false);
|
||||||
|
|
||||||
|
return irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
|
|
|
@ -53,7 +53,8 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
pinctrl: pin-controller@10000 {
|
pinctrl: pin-controller@10000 {
|
||||||
pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>;
|
pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header
|
||||||
|
&pmx_gpio_header_gpo>;
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
|
|
||||||
pmx_uart0: pmx-uart0 {
|
pmx_uart0: pmx-uart0 {
|
||||||
|
@ -85,11 +86,16 @@
|
||||||
* ground.
|
* ground.
|
||||||
*/
|
*/
|
||||||
pmx_gpio_header: pmx-gpio-header {
|
pmx_gpio_header: pmx-gpio-header {
|
||||||
marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28",
|
marvell,pins = "mpp17", "mpp29", "mpp28",
|
||||||
"mpp35", "mpp34", "mpp40";
|
"mpp35", "mpp34", "mpp40";
|
||||||
marvell,function = "gpio";
|
marvell,function = "gpio";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pmx_gpio_header_gpo: pxm-gpio-header-gpo {
|
||||||
|
marvell,pins = "mpp7";
|
||||||
|
marvell,function = "gpo";
|
||||||
|
};
|
||||||
|
|
||||||
pmx_gpio_init: pmx-init {
|
pmx_gpio_init: pmx-init {
|
||||||
marvell,pins = "mpp38";
|
marvell,pins = "mpp38";
|
||||||
marvell,function = "gpio";
|
marvell,function = "gpio";
|
||||||
|
|
|
@ -10,6 +10,7 @@ CONFIG_SMP=y
|
||||||
CONFIG_NR_CPUS=8
|
CONFIG_NR_CPUS=8
|
||||||
CONFIG_AEABI=y
|
CONFIG_AEABI=y
|
||||||
CONFIG_HIGHMEM=y
|
CONFIG_HIGHMEM=y
|
||||||
|
CONFIG_CMA=y
|
||||||
CONFIG_ARM_APPENDED_DTB=y
|
CONFIG_ARM_APPENDED_DTB=y
|
||||||
CONFIG_ARM_ATAG_DTB_COMPAT=y
|
CONFIG_ARM_ATAG_DTB_COMPAT=y
|
||||||
CONFIG_CPU_FREQ=y
|
CONFIG_CPU_FREQ=y
|
||||||
|
@ -33,6 +34,7 @@ CONFIG_CAN_SUN4I=y
|
||||||
# CONFIG_WIRELESS is not set
|
# CONFIG_WIRELESS is not set
|
||||||
CONFIG_DEVTMPFS=y
|
CONFIG_DEVTMPFS=y
|
||||||
CONFIG_DEVTMPFS_MOUNT=y
|
CONFIG_DEVTMPFS_MOUNT=y
|
||||||
|
CONFIG_DMA_CMA=y
|
||||||
CONFIG_BLK_DEV_SD=y
|
CONFIG_BLK_DEV_SD=y
|
||||||
CONFIG_ATA=y
|
CONFIG_ATA=y
|
||||||
CONFIG_AHCI_SUNXI=y
|
CONFIG_AHCI_SUNXI=y
|
||||||
|
|
|
@ -1656,6 +1656,7 @@ static struct omap_hwmod omap3xxx_mmc3_hwmod = {
|
||||||
.main_clk = "mmchs3_fck",
|
.main_clk = "mmchs3_fck",
|
||||||
.prcm = {
|
.prcm = {
|
||||||
.omap2 = {
|
.omap2 = {
|
||||||
|
.module_offs = CORE_MOD,
|
||||||
.prcm_reg_id = 1,
|
.prcm_reg_id = 1,
|
||||||
.module_bit = OMAP3430_EN_MMC3_SHIFT,
|
.module_bit = OMAP3430_EN_MMC3_SHIFT,
|
||||||
.idlest_reg_id = 1,
|
.idlest_reg_id = 1,
|
||||||
|
|
|
@ -63,8 +63,10 @@
|
||||||
cpm_ethernet: ethernet@0 {
|
cpm_ethernet: ethernet@0 {
|
||||||
compatible = "marvell,armada-7k-pp22";
|
compatible = "marvell,armada-7k-pp22";
|
||||||
reg = <0x0 0x100000>, <0x129000 0xb000>;
|
reg = <0x0 0x100000>, <0x129000 0xb000>;
|
||||||
clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>, <&cpm_clk 1 5>;
|
clocks = <&cpm_clk 1 3>, <&cpm_clk 1 9>,
|
||||||
clock-names = "pp_clk", "gop_clk", "mg_clk";
|
<&cpm_clk 1 5>, <&cpm_clk 1 18>;
|
||||||
|
clock-names = "pp_clk", "gop_clk",
|
||||||
|
"mg_clk","axi_clk";
|
||||||
marvell,system-controller = <&cpm_syscon0>;
|
marvell,system-controller = <&cpm_syscon0>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
dma-coherent;
|
dma-coherent;
|
||||||
|
@ -114,7 +116,8 @@
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
compatible = "marvell,orion-mdio";
|
compatible = "marvell,orion-mdio";
|
||||||
reg = <0x12a200 0x10>;
|
reg = <0x12a200 0x10>;
|
||||||
clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>;
|
clocks = <&cpm_clk 1 9>, <&cpm_clk 1 5>,
|
||||||
|
<&cpm_clk 1 6>, <&cpm_clk 1 18>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -295,8 +298,8 @@
|
||||||
compatible = "marvell,armada-cp110-sdhci";
|
compatible = "marvell,armada-cp110-sdhci";
|
||||||
reg = <0x780000 0x300>;
|
reg = <0x780000 0x300>;
|
||||||
interrupts = <ICU_GRP_NSR 27 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <ICU_GRP_NSR 27 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clock-names = "core";
|
clock-names = "core","axi";
|
||||||
clocks = <&cpm_clk 1 4>;
|
clocks = <&cpm_clk 1 4>, <&cpm_clk 1 18>;
|
||||||
dma-coherent;
|
dma-coherent;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
|
@ -63,8 +63,10 @@
|
||||||
cps_ethernet: ethernet@0 {
|
cps_ethernet: ethernet@0 {
|
||||||
compatible = "marvell,armada-7k-pp22";
|
compatible = "marvell,armada-7k-pp22";
|
||||||
reg = <0x0 0x100000>, <0x129000 0xb000>;
|
reg = <0x0 0x100000>, <0x129000 0xb000>;
|
||||||
clocks = <&cps_clk 1 3>, <&cps_clk 1 9>, <&cps_clk 1 5>;
|
clocks = <&cps_clk 1 3>, <&cps_clk 1 9>,
|
||||||
clock-names = "pp_clk", "gop_clk", "mg_clk";
|
<&cps_clk 1 5>, <&cps_clk 1 18>;
|
||||||
|
clock-names = "pp_clk", "gop_clk",
|
||||||
|
"mg_clk", "axi_clk";
|
||||||
marvell,system-controller = <&cps_syscon0>;
|
marvell,system-controller = <&cps_syscon0>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
dma-coherent;
|
dma-coherent;
|
||||||
|
@ -114,7 +116,8 @@
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
compatible = "marvell,orion-mdio";
|
compatible = "marvell,orion-mdio";
|
||||||
reg = <0x12a200 0x10>;
|
reg = <0x12a200 0x10>;
|
||||||
clocks = <&cps_clk 1 9>, <&cps_clk 1 5>;
|
clocks = <&cps_clk 1 9>, <&cps_clk 1 5>,
|
||||||
|
<&cps_clk 1 6>, <&cps_clk 1 18>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||||
|
|
||||||
ret = kvm_psci_call(vcpu);
|
ret = kvm_psci_call(vcpu);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
kvm_inject_undefined(vcpu);
|
vcpu_set_reg(vcpu, 0, ~0UL);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||||
|
|
||||||
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||||
{
|
{
|
||||||
kvm_inject_undefined(vcpu);
|
vcpu_set_reg(vcpu, 0, ~0UL);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -575,7 +575,7 @@ static int __init ar7_register_uarts(void)
|
||||||
uart_port.type = PORT_AR7;
|
uart_port.type = PORT_AR7;
|
||||||
uart_port.uartclk = clk_get_rate(bus_clk) / 2;
|
uart_port.uartclk = clk_get_rate(bus_clk) / 2;
|
||||||
uart_port.iotype = UPIO_MEM32;
|
uart_port.iotype = UPIO_MEM32;
|
||||||
uart_port.flags = UPF_FIXED_TYPE;
|
uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
|
||||||
uart_port.regshift = 2;
|
uart_port.regshift = 2;
|
||||||
|
|
||||||
uart_port.line = 0;
|
uart_port.line = 0;
|
||||||
|
|
|
@ -292,7 +292,6 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core,
|
||||||
*this_cpu_ptr(&cm_core_lock_flags));
|
*this_cpu_ptr(&cm_core_lock_flags));
|
||||||
} else {
|
} else {
|
||||||
WARN_ON(cluster != 0);
|
WARN_ON(cluster != 0);
|
||||||
WARN_ON(vp != 0);
|
|
||||||
WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
|
WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -209,5 +209,11 @@ exc_##label##_book3e:
|
||||||
ori r3,r3,vector_offset@l; \
|
ori r3,r3,vector_offset@l; \
|
||||||
mtspr SPRN_IVOR##vector_number,r3;
|
mtspr SPRN_IVOR##vector_number,r3;
|
||||||
|
|
||||||
|
#define RFI_TO_KERNEL \
|
||||||
|
rfi
|
||||||
|
|
||||||
|
#define RFI_TO_USER \
|
||||||
|
rfi
|
||||||
|
|
||||||
#endif /* _ASM_POWERPC_EXCEPTION_64E_H */
|
#endif /* _ASM_POWERPC_EXCEPTION_64E_H */
|
||||||
|
|
||||||
|
|
|
@ -69,6 +69,59 @@
|
||||||
*/
|
*/
|
||||||
#define EX_R3 EX_DAR
|
#define EX_R3 EX_DAR
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Macros for annotating the expected destination of (h)rfid
|
||||||
|
*
|
||||||
|
* The nop instructions allow us to insert one or more instructions to flush the
|
||||||
|
* L1-D cache when returning to userspace or a guest.
|
||||||
|
*/
|
||||||
|
#define RFI_FLUSH_SLOT \
|
||||||
|
RFI_FLUSH_FIXUP_SECTION; \
|
||||||
|
nop; \
|
||||||
|
nop; \
|
||||||
|
nop
|
||||||
|
|
||||||
|
#define RFI_TO_KERNEL \
|
||||||
|
rfid
|
||||||
|
|
||||||
|
#define RFI_TO_USER \
|
||||||
|
RFI_FLUSH_SLOT; \
|
||||||
|
rfid; \
|
||||||
|
b rfi_flush_fallback
|
||||||
|
|
||||||
|
#define RFI_TO_USER_OR_KERNEL \
|
||||||
|
RFI_FLUSH_SLOT; \
|
||||||
|
rfid; \
|
||||||
|
b rfi_flush_fallback
|
||||||
|
|
||||||
|
#define RFI_TO_GUEST \
|
||||||
|
RFI_FLUSH_SLOT; \
|
||||||
|
rfid; \
|
||||||
|
b rfi_flush_fallback
|
||||||
|
|
||||||
|
#define HRFI_TO_KERNEL \
|
||||||
|
hrfid
|
||||||
|
|
||||||
|
#define HRFI_TO_USER \
|
||||||
|
RFI_FLUSH_SLOT; \
|
||||||
|
hrfid; \
|
||||||
|
b hrfi_flush_fallback
|
||||||
|
|
||||||
|
#define HRFI_TO_USER_OR_KERNEL \
|
||||||
|
RFI_FLUSH_SLOT; \
|
||||||
|
hrfid; \
|
||||||
|
b hrfi_flush_fallback
|
||||||
|
|
||||||
|
#define HRFI_TO_GUEST \
|
||||||
|
RFI_FLUSH_SLOT; \
|
||||||
|
hrfid; \
|
||||||
|
b hrfi_flush_fallback
|
||||||
|
|
||||||
|
#define HRFI_TO_UNKNOWN \
|
||||||
|
RFI_FLUSH_SLOT; \
|
||||||
|
hrfid; \
|
||||||
|
b hrfi_flush_fallback
|
||||||
|
|
||||||
#ifdef CONFIG_RELOCATABLE
|
#ifdef CONFIG_RELOCATABLE
|
||||||
#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
|
#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
|
||||||
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
|
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
|
||||||
|
@ -213,7 +266,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
|
||||||
mtspr SPRN_##h##SRR0,r12; \
|
mtspr SPRN_##h##SRR0,r12; \
|
||||||
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
|
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
|
||||||
mtspr SPRN_##h##SRR1,r10; \
|
mtspr SPRN_##h##SRR1,r10; \
|
||||||
h##rfid; \
|
h##RFI_TO_KERNEL; \
|
||||||
b . /* prevent speculative execution */
|
b . /* prevent speculative execution */
|
||||||
#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
|
#define EXCEPTION_PROLOG_PSERIES_1(label, h) \
|
||||||
__EXCEPTION_PROLOG_PSERIES_1(label, h)
|
__EXCEPTION_PROLOG_PSERIES_1(label, h)
|
||||||
|
@ -227,7 +280,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
|
||||||
mtspr SPRN_##h##SRR0,r12; \
|
mtspr SPRN_##h##SRR0,r12; \
|
||||||
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
|
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
|
||||||
mtspr SPRN_##h##SRR1,r10; \
|
mtspr SPRN_##h##SRR1,r10; \
|
||||||
h##rfid; \
|
h##RFI_TO_KERNEL; \
|
||||||
b . /* prevent speculative execution */
|
b . /* prevent speculative execution */
|
||||||
|
|
||||||
#define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
|
#define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
|
||||||
|
|
|
@ -187,7 +187,20 @@ label##3: \
|
||||||
FTR_ENTRY_OFFSET label##1b-label##3b; \
|
FTR_ENTRY_OFFSET label##1b-label##3b; \
|
||||||
.popsection;
|
.popsection;
|
||||||
|
|
||||||
|
#define RFI_FLUSH_FIXUP_SECTION \
|
||||||
|
951: \
|
||||||
|
.pushsection __rfi_flush_fixup,"a"; \
|
||||||
|
.align 2; \
|
||||||
|
952: \
|
||||||
|
FTR_ENTRY_OFFSET 951b-952b; \
|
||||||
|
.popsection;
|
||||||
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
|
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
|
||||||
|
|
||||||
void apply_feature_fixups(void);
|
void apply_feature_fixups(void);
|
||||||
void setup_feature_keys(void);
|
void setup_feature_keys(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -241,6 +241,7 @@
|
||||||
#define H_GET_HCA_INFO 0x1B8
|
#define H_GET_HCA_INFO 0x1B8
|
||||||
#define H_GET_PERF_COUNT 0x1BC
|
#define H_GET_PERF_COUNT 0x1BC
|
||||||
#define H_MANAGE_TRACE 0x1C0
|
#define H_MANAGE_TRACE 0x1C0
|
||||||
|
#define H_GET_CPU_CHARACTERISTICS 0x1C8
|
||||||
#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
|
#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
|
||||||
#define H_QUERY_INT_STATE 0x1E4
|
#define H_QUERY_INT_STATE 0x1E4
|
||||||
#define H_POLL_PENDING 0x1D8
|
#define H_POLL_PENDING 0x1D8
|
||||||
|
@ -330,6 +331,17 @@
|
||||||
#define H_SIGNAL_SYS_RESET_ALL_OTHERS -2
|
#define H_SIGNAL_SYS_RESET_ALL_OTHERS -2
|
||||||
/* >= 0 values are CPU number */
|
/* >= 0 values are CPU number */
|
||||||
|
|
||||||
|
/* H_GET_CPU_CHARACTERISTICS return values */
|
||||||
|
#define H_CPU_CHAR_SPEC_BAR_ORI31 (1ull << 63) // IBM bit 0
|
||||||
|
#define H_CPU_CHAR_BCCTRL_SERIALISED (1ull << 62) // IBM bit 1
|
||||||
|
#define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
|
||||||
|
#define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
|
||||||
|
#define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
|
||||||
|
|
||||||
|
#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
|
||||||
|
#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
|
||||||
|
#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
|
||||||
|
|
||||||
/* Flag values used in H_REGISTER_PROC_TBL hcall */
|
/* Flag values used in H_REGISTER_PROC_TBL hcall */
|
||||||
#define PROC_TABLE_OP_MASK 0x18
|
#define PROC_TABLE_OP_MASK 0x18
|
||||||
#define PROC_TABLE_DEREG 0x10
|
#define PROC_TABLE_DEREG 0x10
|
||||||
|
@ -436,6 +448,11 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct h_cpu_char_result {
|
||||||
|
u64 character;
|
||||||
|
u64 behaviour;
|
||||||
|
};
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* _ASM_POWERPC_HVCALL_H */
|
#endif /* _ASM_POWERPC_HVCALL_H */
|
||||||
|
|
|
@ -231,6 +231,16 @@ struct paca_struct {
|
||||||
struct sibling_subcore_state *sibling_subcore_state;
|
struct sibling_subcore_state *sibling_subcore_state;
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
|
/*
|
||||||
|
* rfi fallback flush must be in its own cacheline to prevent
|
||||||
|
* other paca data leaking into the L1d
|
||||||
|
*/
|
||||||
|
u64 exrfi[EX_SIZE] __aligned(0x80);
|
||||||
|
void *rfi_flush_fallback_area;
|
||||||
|
u64 l1d_flush_congruence;
|
||||||
|
u64 l1d_flush_sets;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
extern void copy_mm_to_paca(struct mm_struct *mm);
|
extern void copy_mm_to_paca(struct mm_struct *mm);
|
||||||
|
|
|
@ -326,4 +326,18 @@ static inline long plapr_signal_sys_reset(long cpu)
|
||||||
return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
|
return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
|
||||||
|
{
|
||||||
|
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||||
|
long rc;
|
||||||
|
|
||||||
|
rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
|
||||||
|
if (rc == H_SUCCESS) {
|
||||||
|
p->character = retbuf[0];
|
||||||
|
p->behaviour = retbuf[1];
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
|
#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
|
||||||
|
|
|
@ -39,6 +39,19 @@ static inline void pseries_big_endian_exceptions(void) {}
|
||||||
static inline void pseries_little_endian_exceptions(void) {}
|
static inline void pseries_little_endian_exceptions(void) {}
|
||||||
#endif /* CONFIG_PPC_PSERIES */
|
#endif /* CONFIG_PPC_PSERIES */
|
||||||
|
|
||||||
|
void rfi_flush_enable(bool enable);
|
||||||
|
|
||||||
|
/* These are bit flags */
|
||||||
|
enum l1d_flush_type {
|
||||||
|
L1D_FLUSH_NONE = 0x1,
|
||||||
|
L1D_FLUSH_FALLBACK = 0x2,
|
||||||
|
L1D_FLUSH_ORI = 0x4,
|
||||||
|
L1D_FLUSH_MTTRIG = 0x8,
|
||||||
|
};
|
||||||
|
|
||||||
|
void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
|
||||||
|
void do_rfi_flush_fixups(enum l1d_flush_type types);
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
#endif /* _ASM_POWERPC_SETUP_H */
|
#endif /* _ASM_POWERPC_SETUP_H */
|
||||||
|
|
|
@ -237,6 +237,11 @@ int main(void)
|
||||||
OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp);
|
OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp);
|
||||||
OFFSET(PACA_IN_MCE, paca_struct, in_mce);
|
OFFSET(PACA_IN_MCE, paca_struct, in_mce);
|
||||||
OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
|
OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
|
||||||
|
OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area);
|
||||||
|
OFFSET(PACA_EXRFI, paca_struct, exrfi);
|
||||||
|
OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence);
|
||||||
|
OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
|
OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
|
||||||
OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
|
OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
|
||||||
|
|
|
@ -37,6 +37,11 @@
|
||||||
#include <asm/tm.h>
|
#include <asm/tm.h>
|
||||||
#include <asm/ppc-opcode.h>
|
#include <asm/ppc-opcode.h>
|
||||||
#include <asm/export.h>
|
#include <asm/export.h>
|
||||||
|
#ifdef CONFIG_PPC_BOOK3S
|
||||||
|
#include <asm/exception-64s.h>
|
||||||
|
#else
|
||||||
|
#include <asm/exception-64e.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* System calls.
|
* System calls.
|
||||||
|
@ -262,13 +267,23 @@ BEGIN_FTR_SECTION
|
||||||
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||||
|
|
||||||
ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
|
ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
|
||||||
|
ld r2,GPR2(r1)
|
||||||
|
ld r1,GPR1(r1)
|
||||||
|
mtlr r4
|
||||||
|
mtcr r5
|
||||||
|
mtspr SPRN_SRR0,r7
|
||||||
|
mtspr SPRN_SRR1,r8
|
||||||
|
RFI_TO_USER
|
||||||
|
b . /* prevent speculative execution */
|
||||||
|
|
||||||
|
/* exit to kernel */
|
||||||
1: ld r2,GPR2(r1)
|
1: ld r2,GPR2(r1)
|
||||||
ld r1,GPR1(r1)
|
ld r1,GPR1(r1)
|
||||||
mtlr r4
|
mtlr r4
|
||||||
mtcr r5
|
mtcr r5
|
||||||
mtspr SPRN_SRR0,r7
|
mtspr SPRN_SRR0,r7
|
||||||
mtspr SPRN_SRR1,r8
|
mtspr SPRN_SRR1,r8
|
||||||
RFI
|
RFI_TO_KERNEL
|
||||||
b . /* prevent speculative execution */
|
b . /* prevent speculative execution */
|
||||||
|
|
||||||
.Lsyscall_error:
|
.Lsyscall_error:
|
||||||
|
@ -397,8 +412,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||||
mtmsrd r10, 1
|
mtmsrd r10, 1
|
||||||
mtspr SPRN_SRR0, r11
|
mtspr SPRN_SRR0, r11
|
||||||
mtspr SPRN_SRR1, r12
|
mtspr SPRN_SRR1, r12
|
||||||
|
RFI_TO_USER
|
||||||
rfid
|
|
||||||
b . /* prevent speculative execution */
|
b . /* prevent speculative execution */
|
||||||
#endif
|
#endif
|
||||||
_ASM_NOKPROBE_SYMBOL(system_call_common);
|
_ASM_NOKPROBE_SYMBOL(system_call_common);
|
||||||
|
@ -878,7 +892,7 @@ BEGIN_FTR_SECTION
|
||||||
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||||
ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
|
ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
|
||||||
REST_GPR(13, r1)
|
REST_GPR(13, r1)
|
||||||
1:
|
|
||||||
mtspr SPRN_SRR1,r3
|
mtspr SPRN_SRR1,r3
|
||||||
|
|
||||||
ld r2,_CCR(r1)
|
ld r2,_CCR(r1)
|
||||||
|
@ -891,8 +905,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||||
ld r3,GPR3(r1)
|
ld r3,GPR3(r1)
|
||||||
ld r4,GPR4(r1)
|
ld r4,GPR4(r1)
|
||||||
ld r1,GPR1(r1)
|
ld r1,GPR1(r1)
|
||||||
|
RFI_TO_USER
|
||||||
|
b . /* prevent speculative execution */
|
||||||
|
|
||||||
rfid
|
1: mtspr SPRN_SRR1,r3
|
||||||
|
|
||||||
|
ld r2,_CCR(r1)
|
||||||
|
mtcrf 0xFF,r2
|
||||||
|
ld r2,_NIP(r1)
|
||||||
|
mtspr SPRN_SRR0,r2
|
||||||
|
|
||||||
|
ld r0,GPR0(r1)
|
||||||
|
ld r2,GPR2(r1)
|
||||||
|
ld r3,GPR3(r1)
|
||||||
|
ld r4,GPR4(r1)
|
||||||
|
ld r1,GPR1(r1)
|
||||||
|
RFI_TO_KERNEL
|
||||||
b . /* prevent speculative execution */
|
b . /* prevent speculative execution */
|
||||||
|
|
||||||
#endif /* CONFIG_PPC_BOOK3E */
|
#endif /* CONFIG_PPC_BOOK3E */
|
||||||
|
@ -1073,7 +1101,7 @@ __enter_rtas:
|
||||||
|
|
||||||
mtspr SPRN_SRR0,r5
|
mtspr SPRN_SRR0,r5
|
||||||
mtspr SPRN_SRR1,r6
|
mtspr SPRN_SRR1,r6
|
||||||
rfid
|
RFI_TO_KERNEL
|
||||||
b . /* prevent speculative execution */
|
b . /* prevent speculative execution */
|
||||||
|
|
||||||
rtas_return_loc:
|
rtas_return_loc:
|
||||||
|
@ -1098,7 +1126,7 @@ rtas_return_loc:
|
||||||
|
|
||||||
mtspr SPRN_SRR0,r3
|
mtspr SPRN_SRR0,r3
|
||||||
mtspr SPRN_SRR1,r4
|
mtspr SPRN_SRR1,r4
|
||||||
rfid
|
RFI_TO_KERNEL
|
||||||
b . /* prevent speculative execution */
|
b . /* prevent speculative execution */
|
||||||
_ASM_NOKPROBE_SYMBOL(__enter_rtas)
|
_ASM_NOKPROBE_SYMBOL(__enter_rtas)
|
||||||
_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
|
_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
|
||||||
|
@ -1171,7 +1199,7 @@ _GLOBAL(enter_prom)
|
||||||
LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
|
LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
|
||||||
andc r11,r11,r12
|
andc r11,r11,r12
|
||||||
mtsrr1 r11
|
mtsrr1 r11
|
||||||
rfid
|
RFI_TO_KERNEL
|
||||||
#endif /* CONFIG_PPC_BOOK3E */
|
#endif /* CONFIG_PPC_BOOK3E */
|
||||||
|
|
||||||
1: /* Return from OF */
|
1: /* Return from OF */
|
||||||
|
|
|
@ -254,7 +254,7 @@ BEGIN_FTR_SECTION
|
||||||
LOAD_HANDLER(r12, machine_check_handle_early)
|
LOAD_HANDLER(r12, machine_check_handle_early)
|
||||||
1: mtspr SPRN_SRR0,r12
|
1: mtspr SPRN_SRR0,r12
|
||||||
mtspr SPRN_SRR1,r11
|
mtspr SPRN_SRR1,r11
|
||||||
rfid
|
RFI_TO_KERNEL
|
||||||
b . /* prevent speculative execution */
|
b . /* prevent speculative execution */
|
||||||
2:
|
2:
|
||||||
/* Stack overflow. Stay on emergency stack and panic.
|
/* Stack overflow. Stay on emergency stack and panic.
|
||||||
|
@ -443,7 +443,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
|
||||||
li r3,MSR_ME
|
li r3,MSR_ME
|
||||||
andc r10,r10,r3 /* Turn off MSR_ME */
|
andc r10,r10,r3 /* Turn off MSR_ME */
|
||||||
mtspr SPRN_SRR1,r10
|
mtspr SPRN_SRR1,r10
|
||||||
rfid
|
RFI_TO_KERNEL
|
||||||
b .
|
b .
|
||||||
2:
|
2:
|
||||||
/*
|
/*
|
||||||
|
@ -461,7 +461,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
|
||||||
*/
|
*/
|
||||||
bl machine_check_queue_event
|
bl machine_check_queue_event
|
||||||
MACHINE_CHECK_HANDLER_WINDUP
|
MACHINE_CHECK_HANDLER_WINDUP
|
||||||
rfid
|
RFI_TO_USER_OR_KERNEL
|
||||||
9:
|
9:
|
||||||
/* Deliver the machine check to host kernel in V mode. */
|
/* Deliver the machine check to host kernel in V mode. */
|
||||||
MACHINE_CHECK_HANDLER_WINDUP
|
MACHINE_CHECK_HANDLER_WINDUP
|
||||||
|
@ -596,6 +596,9 @@ EXC_COMMON_BEGIN(slb_miss_common)
|
||||||
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
|
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
|
||||||
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
|
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
|
||||||
|
|
||||||
|
andi. r9,r11,MSR_PR // Check for exception from userspace
|
||||||
|
cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Test MSR_RI before calling slb_allocate_realmode, because the
|
* Test MSR_RI before calling slb_allocate_realmode, because the
|
||||||
* MSR in r11 gets clobbered. However we still want to allocate
|
* MSR in r11 gets clobbered. However we still want to allocate
|
||||||
|
@ -622,9 +625,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
|
||||||
|
|
||||||
/* All done -- return from exception. */
|
/* All done -- return from exception. */
|
||||||
|
|
||||||
|
bne cr4,1f /* returning to kernel */
|
||||||
|
|
||||||
.machine push
|
.machine push
|
||||||
.machine "power4"
|
.machine "power4"
|
||||||
mtcrf 0x80,r9
|
mtcrf 0x80,r9
|
||||||
|
mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
|
||||||
mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
|
mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
|
||||||
mtcrf 0x02,r9 /* I/D indication is in cr6 */
|
mtcrf 0x02,r9 /* I/D indication is in cr6 */
|
||||||
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
|
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
|
||||||
|
@ -638,8 +644,29 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
|
||||||
ld r11,PACA_EXSLB+EX_R11(r13)
|
ld r11,PACA_EXSLB+EX_R11(r13)
|
||||||
ld r12,PACA_EXSLB+EX_R12(r13)
|
ld r12,PACA_EXSLB+EX_R12(r13)
|
||||||
ld r13,PACA_EXSLB+EX_R13(r13)
|
ld r13,PACA_EXSLB+EX_R13(r13)
|
||||||
rfid
|
RFI_TO_USER
|
||||||
b . /* prevent speculative execution */
|
b . /* prevent speculative execution */
|
||||||
|
1:
|
||||||
|
.machine push
|
||||||
|
.machine "power4"
|
||||||
|
mtcrf 0x80,r9
|
||||||
|
mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
|
||||||
|
mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
|
||||||
|
mtcrf 0x02,r9 /* I/D indication is in cr6 */
|
||||||
|
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
|
||||||
|
.machine pop
|
||||||
|
|
||||||
|
RESTORE_CTR(r9, PACA_EXSLB)
|
||||||
|
RESTORE_PPR_PACA(PACA_EXSLB, r9)
|
||||||
|
mr r3,r12
|
||||||
|
ld r9,PACA_EXSLB+EX_R9(r13)
|
||||||
|
ld r10,PACA_EXSLB+EX_R10(r13)
|
||||||
|
ld r11,PACA_EXSLB+EX_R11(r13)
|
||||||
|
ld r12,PACA_EXSLB+EX_R12(r13)
|
||||||
|
ld r13,PACA_EXSLB+EX_R13(r13)
|
||||||
|
RFI_TO_KERNEL
|
||||||
|
b . /* prevent speculative execution */
|
||||||
|
|
||||||
|
|
||||||
2: std r3,PACA_EXSLB+EX_DAR(r13)
|
2: std r3,PACA_EXSLB+EX_DAR(r13)
|
||||||
mr r3,r12
|
mr r3,r12
|
||||||
|
@ -649,7 +676,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
|
||||||
mtspr SPRN_SRR0,r10
|
mtspr SPRN_SRR0,r10
|
||||||
ld r10,PACAKMSR(r13)
|
ld r10,PACAKMSR(r13)
|
||||||
mtspr SPRN_SRR1,r10
|
mtspr SPRN_SRR1,r10
|
||||||
rfid
|
RFI_TO_KERNEL
|
||||||
b .
|
b .
|
||||||
|
|
||||||
8: std r3,PACA_EXSLB+EX_DAR(r13)
|
8: std r3,PACA_EXSLB+EX_DAR(r13)
|
||||||
|
@ -660,7 +687,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
|
||||||
mtspr SPRN_SRR0,r10
|
mtspr SPRN_SRR0,r10
|
||||||
ld r10,PACAKMSR(r13)
|
ld r10,PACAKMSR(r13)
|
||||||
mtspr SPRN_SRR1,r10
|
mtspr SPRN_SRR1,r10
|
||||||
rfid
|
RFI_TO_KERNEL
|
||||||
b .
|
b .
|
||||||
|
|
||||||
EXC_COMMON_BEGIN(unrecov_slb)
|
EXC_COMMON_BEGIN(unrecov_slb)
|
||||||
|
@ -905,7 +932,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
|
||||||
mtspr SPRN_SRR0,r10 ; \
|
mtspr SPRN_SRR0,r10 ; \
|
||||||
ld r10,PACAKMSR(r13) ; \
|
ld r10,PACAKMSR(r13) ; \
|
||||||
mtspr SPRN_SRR1,r10 ; \
|
mtspr SPRN_SRR1,r10 ; \
|
||||||
rfid ; \
|
RFI_TO_KERNEL ; \
|
||||||
b . ; /* prevent speculative execution */
|
b . ; /* prevent speculative execution */
|
||||||
|
|
||||||
#define SYSCALL_FASTENDIAN \
|
#define SYSCALL_FASTENDIAN \
|
||||||
|
@ -914,7 +941,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
|
||||||
xori r12,r12,MSR_LE ; \
|
xori r12,r12,MSR_LE ; \
|
||||||
mtspr SPRN_SRR1,r12 ; \
|
mtspr SPRN_SRR1,r12 ; \
|
||||||
mr r13,r9 ; \
|
mr r13,r9 ; \
|
||||||
rfid ; /* return to userspace */ \
|
RFI_TO_USER ; /* return to userspace */ \
|
||||||
b . ; /* prevent speculative execution */
|
b . ; /* prevent speculative execution */
|
||||||
|
|
||||||
#if defined(CONFIG_RELOCATABLE)
|
#if defined(CONFIG_RELOCATABLE)
|
||||||
|
@ -1299,7 +1326,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
|
||||||
ld r11,PACA_EXGEN+EX_R11(r13)
|
ld r11,PACA_EXGEN+EX_R11(r13)
|
||||||
ld r12,PACA_EXGEN+EX_R12(r13)
|
ld r12,PACA_EXGEN+EX_R12(r13)
|
||||||
ld r13,PACA_EXGEN+EX_R13(r13)
|
ld r13,PACA_EXGEN+EX_R13(r13)
|
||||||
HRFID
|
HRFI_TO_UNKNOWN
|
||||||
b .
|
b .
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1403,10 +1430,94 @@ masked_##_H##interrupt: \
|
||||||
ld r10,PACA_EXGEN+EX_R10(r13); \
|
ld r10,PACA_EXGEN+EX_R10(r13); \
|
||||||
ld r11,PACA_EXGEN+EX_R11(r13); \
|
ld r11,PACA_EXGEN+EX_R11(r13); \
|
||||||
/* returns to kernel where r13 must be set up, so don't restore it */ \
|
/* returns to kernel where r13 must be set up, so don't restore it */ \
|
||||||
##_H##rfid; \
|
##_H##RFI_TO_KERNEL; \
|
||||||
b .; \
|
b .; \
|
||||||
MASKED_DEC_HANDLER(_H)
|
MASKED_DEC_HANDLER(_H)
|
||||||
|
|
||||||
|
TRAMP_REAL_BEGIN(rfi_flush_fallback)
|
||||||
|
SET_SCRATCH0(r13);
|
||||||
|
GET_PACA(r13);
|
||||||
|
std r9,PACA_EXRFI+EX_R9(r13)
|
||||||
|
std r10,PACA_EXRFI+EX_R10(r13)
|
||||||
|
std r11,PACA_EXRFI+EX_R11(r13)
|
||||||
|
std r12,PACA_EXRFI+EX_R12(r13)
|
||||||
|
std r8,PACA_EXRFI+EX_R13(r13)
|
||||||
|
mfctr r9
|
||||||
|
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
|
||||||
|
ld r11,PACA_L1D_FLUSH_SETS(r13)
|
||||||
|
ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
|
||||||
|
/*
|
||||||
|
* The load adresses are at staggered offsets within cachelines,
|
||||||
|
* which suits some pipelines better (on others it should not
|
||||||
|
* hurt).
|
||||||
|
*/
|
||||||
|
addi r12,r12,8
|
||||||
|
mtctr r11
|
||||||
|
DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
|
||||||
|
|
||||||
|
/* order ld/st prior to dcbt stop all streams with flushing */
|
||||||
|
sync
|
||||||
|
1: li r8,0
|
||||||
|
.rept 8 /* 8-way set associative */
|
||||||
|
ldx r11,r10,r8
|
||||||
|
add r8,r8,r12
|
||||||
|
xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
|
||||||
|
add r8,r8,r11 // Add 0, this creates a dependency on the ldx
|
||||||
|
.endr
|
||||||
|
addi r10,r10,128 /* 128 byte cache line */
|
||||||
|
bdnz 1b
|
||||||
|
|
||||||
|
mtctr r9
|
||||||
|
ld r9,PACA_EXRFI+EX_R9(r13)
|
||||||
|
ld r10,PACA_EXRFI+EX_R10(r13)
|
||||||
|
ld r11,PACA_EXRFI+EX_R11(r13)
|
||||||
|
ld r12,PACA_EXRFI+EX_R12(r13)
|
||||||
|
ld r8,PACA_EXRFI+EX_R13(r13)
|
||||||
|
GET_SCRATCH0(r13);
|
||||||
|
rfid
|
||||||
|
|
||||||
|
TRAMP_REAL_BEGIN(hrfi_flush_fallback)
|
||||||
|
SET_SCRATCH0(r13);
|
||||||
|
GET_PACA(r13);
|
||||||
|
std r9,PACA_EXRFI+EX_R9(r13)
|
||||||
|
std r10,PACA_EXRFI+EX_R10(r13)
|
||||||
|
std r11,PACA_EXRFI+EX_R11(r13)
|
||||||
|
std r12,PACA_EXRFI+EX_R12(r13)
|
||||||
|
std r8,PACA_EXRFI+EX_R13(r13)
|
||||||
|
mfctr r9
|
||||||
|
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
|
||||||
|
ld r11,PACA_L1D_FLUSH_SETS(r13)
|
||||||
|
ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
|
||||||
|
/*
|
||||||
|
* The load adresses are at staggered offsets within cachelines,
|
||||||
|
* which suits some pipelines better (on others it should not
|
||||||
|
* hurt).
|
||||||
|
*/
|
||||||
|
addi r12,r12,8
|
||||||
|
mtctr r11
|
||||||
|
DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
|
||||||
|
|
||||||
|
/* order ld/st prior to dcbt stop all streams with flushing */
|
||||||
|
sync
|
||||||
|
1: li r8,0
|
||||||
|
.rept 8 /* 8-way set associative */
|
||||||
|
ldx r11,r10,r8
|
||||||
|
add r8,r8,r12
|
||||||
|
xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
|
||||||
|
add r8,r8,r11 // Add 0, this creates a dependency on the ldx
|
||||||
|
.endr
|
||||||
|
addi r10,r10,128 /* 128 byte cache line */
|
||||||
|
bdnz 1b
|
||||||
|
|
||||||
|
mtctr r9
|
||||||
|
ld r9,PACA_EXRFI+EX_R9(r13)
|
||||||
|
ld r10,PACA_EXRFI+EX_R10(r13)
|
||||||
|
ld r11,PACA_EXRFI+EX_R11(r13)
|
||||||
|
ld r12,PACA_EXRFI+EX_R12(r13)
|
||||||
|
ld r8,PACA_EXRFI+EX_R13(r13)
|
||||||
|
GET_SCRATCH0(r13);
|
||||||
|
hrfid
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Real mode exceptions actually use this too, but alternate
|
* Real mode exceptions actually use this too, but alternate
|
||||||
* instruction code patches (which end up in the common .text area)
|
* instruction code patches (which end up in the common .text area)
|
||||||
|
@ -1426,7 +1537,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
|
||||||
addi r13, r13, 4
|
addi r13, r13, 4
|
||||||
mtspr SPRN_SRR0, r13
|
mtspr SPRN_SRR0, r13
|
||||||
GET_SCRATCH0(r13)
|
GET_SCRATCH0(r13)
|
||||||
rfid
|
RFI_TO_KERNEL
|
||||||
b .
|
b .
|
||||||
|
|
||||||
TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
|
TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
|
||||||
|
@ -1438,7 +1549,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
|
||||||
addi r13, r13, 4
|
addi r13, r13, 4
|
||||||
mtspr SPRN_HSRR0, r13
|
mtspr SPRN_HSRR0, r13
|
||||||
GET_SCRATCH0(r13)
|
GET_SCRATCH0(r13)
|
||||||
hrfid
|
HRFI_TO_KERNEL
|
||||||
b .
|
b .
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -784,3 +784,104 @@ static int __init disable_hardlockup_detector(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
early_initcall(disable_hardlockup_detector);
|
early_initcall(disable_hardlockup_detector);
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
|
static enum l1d_flush_type enabled_flush_types;
|
||||||
|
static void *l1d_flush_fallback_area;
|
||||||
|
static bool no_rfi_flush;
|
||||||
|
bool rfi_flush;
|
||||||
|
|
||||||
|
static int __init handle_no_rfi_flush(char *p)
|
||||||
|
{
|
||||||
|
pr_info("rfi-flush: disabled on command line.");
|
||||||
|
no_rfi_flush = true;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_param("no_rfi_flush", handle_no_rfi_flush);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The RFI flush is not KPTI, but because users will see doco that says to use
|
||||||
|
* nopti we hijack that option here to also disable the RFI flush.
|
||||||
|
*/
|
||||||
|
static int __init handle_no_pti(char *p)
|
||||||
|
{
|
||||||
|
pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
|
||||||
|
handle_no_rfi_flush(NULL);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_param("nopti", handle_no_pti);
|
||||||
|
|
||||||
|
static void do_nothing(void *unused)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* We don't need to do the flush explicitly, just enter+exit kernel is
|
||||||
|
* sufficient, the RFI exit handlers will do the right thing.
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
void rfi_flush_enable(bool enable)
|
||||||
|
{
|
||||||
|
if (rfi_flush == enable)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (enable) {
|
||||||
|
do_rfi_flush_fixups(enabled_flush_types);
|
||||||
|
on_each_cpu(do_nothing, NULL, 1);
|
||||||
|
} else
|
||||||
|
do_rfi_flush_fixups(L1D_FLUSH_NONE);
|
||||||
|
|
||||||
|
rfi_flush = enable;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void init_fallback_flush(void)
|
||||||
|
{
|
||||||
|
u64 l1d_size, limit;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
l1d_size = ppc64_caches.l1d.size;
|
||||||
|
limit = min(safe_stack_limit(), ppc64_rma_size);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Align to L1d size, and size it at 2x L1d size, to catch possible
|
||||||
|
* hardware prefetch runoff. We don't have a recipe for load patterns to
|
||||||
|
* reliably avoid the prefetcher.
|
||||||
|
*/
|
||||||
|
l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
|
||||||
|
memset(l1d_flush_fallback_area, 0, l1d_size * 2);
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
/*
|
||||||
|
* The fallback flush is currently coded for 8-way
|
||||||
|
* associativity. Different associativity is possible, but it
|
||||||
|
* will be treated as 8-way and may not evict the lines as
|
||||||
|
* effectively.
|
||||||
|
*
|
||||||
|
* 128 byte lines are mandatory.
|
||||||
|
*/
|
||||||
|
u64 c = l1d_size / 8;
|
||||||
|
|
||||||
|
paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
|
||||||
|
paca[cpu].l1d_flush_congruence = c;
|
||||||
|
paca[cpu].l1d_flush_sets = c / 128;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
|
||||||
|
{
|
||||||
|
if (types & L1D_FLUSH_FALLBACK) {
|
||||||
|
pr_info("rfi-flush: Using fallback displacement flush\n");
|
||||||
|
init_fallback_flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (types & L1D_FLUSH_ORI)
|
||||||
|
pr_info("rfi-flush: Using ori type flush\n");
|
||||||
|
|
||||||
|
if (types & L1D_FLUSH_MTTRIG)
|
||||||
|
pr_info("rfi-flush: Using mttrig type flush\n");
|
||||||
|
|
||||||
|
enabled_flush_types = types;
|
||||||
|
|
||||||
|
if (!no_rfi_flush)
|
||||||
|
rfi_flush_enable(enable);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||||
|
|
|
@ -132,6 +132,15 @@ SECTIONS
|
||||||
/* Read-only data */
|
/* Read-only data */
|
||||||
RO_DATA(PAGE_SIZE)
|
RO_DATA(PAGE_SIZE)
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC64
|
||||||
|
. = ALIGN(8);
|
||||||
|
__rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
|
||||||
|
__start___rfi_flush_fixup = .;
|
||||||
|
*(__rfi_flush_fixup)
|
||||||
|
__stop___rfi_flush_fixup = .;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
EXCEPTION_TABLE(0)
|
EXCEPTION_TABLE(0)
|
||||||
|
|
||||||
NOTES :kernel :notes
|
NOTES :kernel :notes
|
||||||
|
|
|
@ -78,7 +78,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
|
||||||
mtmsrd r0,1 /* clear RI in MSR */
|
mtmsrd r0,1 /* clear RI in MSR */
|
||||||
mtsrr0 r5
|
mtsrr0 r5
|
||||||
mtsrr1 r6
|
mtsrr1 r6
|
||||||
RFI
|
RFI_TO_KERNEL
|
||||||
|
|
||||||
kvmppc_call_hv_entry:
|
kvmppc_call_hv_entry:
|
||||||
ld r4, HSTATE_KVM_VCPU(r13)
|
ld r4, HSTATE_KVM_VCPU(r13)
|
||||||
|
@ -187,7 +187,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
|
||||||
mtmsrd r6, 1 /* Clear RI in MSR */
|
mtmsrd r6, 1 /* Clear RI in MSR */
|
||||||
mtsrr0 r8
|
mtsrr0 r8
|
||||||
mtsrr1 r7
|
mtsrr1 r7
|
||||||
RFI
|
RFI_TO_KERNEL
|
||||||
|
|
||||||
/* Virtual-mode return */
|
/* Virtual-mode return */
|
||||||
.Lvirt_return:
|
.Lvirt_return:
|
||||||
|
@ -1131,8 +1131,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||||
|
|
||||||
ld r0, VCPU_GPR(R0)(r4)
|
ld r0, VCPU_GPR(R0)(r4)
|
||||||
ld r4, VCPU_GPR(R4)(r4)
|
ld r4, VCPU_GPR(R4)(r4)
|
||||||
|
HRFI_TO_GUEST
|
||||||
hrfid
|
|
||||||
b .
|
b .
|
||||||
|
|
||||||
secondary_too_late:
|
secondary_too_late:
|
||||||
|
|
|
@ -46,6 +46,9 @@
|
||||||
|
|
||||||
#define FUNC(name) name
|
#define FUNC(name) name
|
||||||
|
|
||||||
|
#define RFI_TO_KERNEL RFI
|
||||||
|
#define RFI_TO_GUEST RFI
|
||||||
|
|
||||||
.macro INTERRUPT_TRAMPOLINE intno
|
.macro INTERRUPT_TRAMPOLINE intno
|
||||||
|
|
||||||
.global kvmppc_trampoline_\intno
|
.global kvmppc_trampoline_\intno
|
||||||
|
@ -141,7 +144,7 @@ kvmppc_handler_skip_ins:
|
||||||
GET_SCRATCH0(r13)
|
GET_SCRATCH0(r13)
|
||||||
|
|
||||||
/* And get back into the code */
|
/* And get back into the code */
|
||||||
RFI
|
RFI_TO_KERNEL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline)
|
||||||
ori r5, r5, MSR_EE
|
ori r5, r5, MSR_EE
|
||||||
mtsrr0 r7
|
mtsrr0 r7
|
||||||
mtsrr1 r6
|
mtsrr1 r6
|
||||||
RFI
|
RFI_TO_KERNEL
|
||||||
|
|
||||||
#include "book3s_segment.S"
|
#include "book3s_segment.S"
|
||||||
|
|
|
@ -156,7 +156,7 @@ no_dcbz32_on:
|
||||||
PPC_LL r9, SVCPU_R9(r3)
|
PPC_LL r9, SVCPU_R9(r3)
|
||||||
PPC_LL r3, (SVCPU_R3)(r3)
|
PPC_LL r3, (SVCPU_R3)(r3)
|
||||||
|
|
||||||
RFI
|
RFI_TO_GUEST
|
||||||
kvmppc_handler_trampoline_enter_end:
|
kvmppc_handler_trampoline_enter_end:
|
||||||
|
|
||||||
|
|
||||||
|
@ -407,5 +407,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
|
||||||
cmpwi r12, BOOK3S_INTERRUPT_DOORBELL
|
cmpwi r12, BOOK3S_INTERRUPT_DOORBELL
|
||||||
beqa BOOK3S_INTERRUPT_DOORBELL
|
beqa BOOK3S_INTERRUPT_DOORBELL
|
||||||
|
|
||||||
RFI
|
RFI_TO_KERNEL
|
||||||
kvmppc_handler_trampoline_exit_end:
|
kvmppc_handler_trampoline_exit_end:
|
||||||
|
|
|
@ -116,6 +116,47 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
|
void do_rfi_flush_fixups(enum l1d_flush_type types)
|
||||||
|
{
|
||||||
|
unsigned int instrs[3], *dest;
|
||||||
|
long *start, *end;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
start = PTRRELOC(&__start___rfi_flush_fixup),
|
||||||
|
end = PTRRELOC(&__stop___rfi_flush_fixup);
|
||||||
|
|
||||||
|
instrs[0] = 0x60000000; /* nop */
|
||||||
|
instrs[1] = 0x60000000; /* nop */
|
||||||
|
instrs[2] = 0x60000000; /* nop */
|
||||||
|
|
||||||
|
if (types & L1D_FLUSH_FALLBACK)
|
||||||
|
/* b .+16 to fallback flush */
|
||||||
|
instrs[0] = 0x48000010;
|
||||||
|
|
||||||
|
i = 0;
|
||||||
|
if (types & L1D_FLUSH_ORI) {
|
||||||
|
instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
|
||||||
|
instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
|
||||||
|
}
|
||||||
|
|
||||||
|
if (types & L1D_FLUSH_MTTRIG)
|
||||||
|
instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
|
||||||
|
|
||||||
|
for (i = 0; start < end; start++, i++) {
|
||||||
|
dest = (void *)start + *start;
|
||||||
|
|
||||||
|
pr_devel("patching dest %lx\n", (unsigned long)dest);
|
||||||
|
|
||||||
|
patch_instruction(dest, instrs[0]);
|
||||||
|
patch_instruction(dest + 1, instrs[1]);
|
||||||
|
patch_instruction(dest + 2, instrs[2]);
|
||||||
|
}
|
||||||
|
|
||||||
|
printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||||
|
|
||||||
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
|
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
|
||||||
{
|
{
|
||||||
long *start, *end;
|
long *start, *end;
|
||||||
|
|
|
@ -36,13 +36,62 @@
|
||||||
#include <asm/opal.h>
|
#include <asm/opal.h>
|
||||||
#include <asm/kexec.h>
|
#include <asm/kexec.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
|
#include <asm/setup.h>
|
||||||
|
|
||||||
#include "powernv.h"
|
#include "powernv.h"
|
||||||
|
|
||||||
|
static void pnv_setup_rfi_flush(void)
|
||||||
|
{
|
||||||
|
struct device_node *np, *fw_features;
|
||||||
|
enum l1d_flush_type type;
|
||||||
|
int enable;
|
||||||
|
|
||||||
|
/* Default to fallback in case fw-features are not available */
|
||||||
|
type = L1D_FLUSH_FALLBACK;
|
||||||
|
enable = 1;
|
||||||
|
|
||||||
|
np = of_find_node_by_name(NULL, "ibm,opal");
|
||||||
|
fw_features = of_get_child_by_name(np, "fw-features");
|
||||||
|
of_node_put(np);
|
||||||
|
|
||||||
|
if (fw_features) {
|
||||||
|
np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
|
||||||
|
if (np && of_property_read_bool(np, "enabled"))
|
||||||
|
type = L1D_FLUSH_MTTRIG;
|
||||||
|
|
||||||
|
of_node_put(np);
|
||||||
|
|
||||||
|
np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
|
||||||
|
if (np && of_property_read_bool(np, "enabled"))
|
||||||
|
type = L1D_FLUSH_ORI;
|
||||||
|
|
||||||
|
of_node_put(np);
|
||||||
|
|
||||||
|
/* Enable unless firmware says NOT to */
|
||||||
|
enable = 2;
|
||||||
|
np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
|
||||||
|
if (np && of_property_read_bool(np, "disabled"))
|
||||||
|
enable--;
|
||||||
|
|
||||||
|
of_node_put(np);
|
||||||
|
|
||||||
|
np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
|
||||||
|
if (np && of_property_read_bool(np, "disabled"))
|
||||||
|
enable--;
|
||||||
|
|
||||||
|
of_node_put(np);
|
||||||
|
of_node_put(fw_features);
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_rfi_flush(type, enable > 0);
|
||||||
|
}
|
||||||
|
|
||||||
static void __init pnv_setup_arch(void)
|
static void __init pnv_setup_arch(void)
|
||||||
{
|
{
|
||||||
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
|
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
|
||||||
|
|
||||||
|
pnv_setup_rfi_flush();
|
||||||
|
|
||||||
/* Initialize SMP */
|
/* Initialize SMP */
|
||||||
pnv_smp_init();
|
pnv_smp_init();
|
||||||
|
|
||||||
|
|
|
@ -459,6 +459,39 @@ static void __init find_and_init_phbs(void)
|
||||||
of_pci_check_probe_only();
|
of_pci_check_probe_only();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pseries_setup_rfi_flush(void)
|
||||||
|
{
|
||||||
|
struct h_cpu_char_result result;
|
||||||
|
enum l1d_flush_type types;
|
||||||
|
bool enable;
|
||||||
|
long rc;
|
||||||
|
|
||||||
|
/* Enable by default */
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
rc = plpar_get_cpu_characteristics(&result);
|
||||||
|
if (rc == H_SUCCESS) {
|
||||||
|
types = L1D_FLUSH_NONE;
|
||||||
|
|
||||||
|
if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
|
||||||
|
types |= L1D_FLUSH_MTTRIG;
|
||||||
|
if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
|
||||||
|
types |= L1D_FLUSH_ORI;
|
||||||
|
|
||||||
|
/* Use fallback if nothing set in hcall */
|
||||||
|
if (types == L1D_FLUSH_NONE)
|
||||||
|
types = L1D_FLUSH_FALLBACK;
|
||||||
|
|
||||||
|
if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
|
||||||
|
enable = false;
|
||||||
|
} else {
|
||||||
|
/* Default to fallback if case hcall is not available */
|
||||||
|
types = L1D_FLUSH_FALLBACK;
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_rfi_flush(types, enable);
|
||||||
|
}
|
||||||
|
|
||||||
static void __init pSeries_setup_arch(void)
|
static void __init pSeries_setup_arch(void)
|
||||||
{
|
{
|
||||||
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
|
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
|
||||||
|
@ -476,6 +509,8 @@ static void __init pSeries_setup_arch(void)
|
||||||
|
|
||||||
fwnmi_init();
|
fwnmi_init();
|
||||||
|
|
||||||
|
pseries_setup_rfi_flush();
|
||||||
|
|
||||||
/* By default, only probe PCI (can be overridden by rtas_pci) */
|
/* By default, only probe PCI (can be overridden by rtas_pci) */
|
||||||
pci_add_flags(PCI_PROBE_ONLY);
|
pci_add_flags(PCI_PROBE_ONLY);
|
||||||
|
|
||||||
|
|
|
@ -244,6 +244,17 @@ ENTRY(__switch_to_asm)
|
||||||
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
|
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_RETPOLINE
|
||||||
|
/*
|
||||||
|
* When switching from a shallower to a deeper call stack
|
||||||
|
* the RSB may either underflow or use entries populated
|
||||||
|
* with userspace addresses. On CPUs where those concerns
|
||||||
|
* exist, overwrite the RSB with entries which capture
|
||||||
|
* speculative execution to prevent attack.
|
||||||
|
*/
|
||||||
|
FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
|
||||||
|
#endif
|
||||||
|
|
||||||
/* restore callee-saved registers */
|
/* restore callee-saved registers */
|
||||||
popl %esi
|
popl %esi
|
||||||
popl %edi
|
popl %edi
|
||||||
|
|
|
@ -487,6 +487,17 @@ ENTRY(__switch_to_asm)
|
||||||
movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
|
movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_RETPOLINE
|
||||||
|
/*
|
||||||
|
* When switching from a shallower to a deeper call stack
|
||||||
|
* the RSB may either underflow or use entries populated
|
||||||
|
* with userspace addresses. On CPUs where those concerns
|
||||||
|
* exist, overwrite the RSB with entries which capture
|
||||||
|
* speculative execution to prevent attack.
|
||||||
|
*/
|
||||||
|
FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
|
||||||
|
#endif
|
||||||
|
|
||||||
/* restore callee-saved registers */
|
/* restore callee-saved registers */
|
||||||
popq %r15
|
popq %r15
|
||||||
popq %r14
|
popq %r14
|
||||||
|
@ -1247,7 +1258,7 @@ idtentry async_page_fault do_async_page_fault has_error_code=1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_X86_MCE
|
#ifdef CONFIG_X86_MCE
|
||||||
idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip)
|
idtentry machine_check do_mce has_error_code=0 paranoid=1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -206,11 +206,11 @@
|
||||||
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
|
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
|
||||||
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
|
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
|
||||||
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
||||||
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
|
|
||||||
#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
|
#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
|
||||||
#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
|
#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
|
||||||
|
|
||||||
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
|
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
|
||||||
|
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
|
||||||
|
|
||||||
/* Virtualization flags: Linux defined, word 8 */
|
/* Virtualization flags: Linux defined, word 8 */
|
||||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
||||||
|
@ -245,6 +245,7 @@
|
||||||
#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
|
#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
|
||||||
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
|
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
|
||||||
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
|
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
|
||||||
|
#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */
|
||||||
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
|
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
|
||||||
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
|
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
|
||||||
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
|
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
|
||||||
|
|
|
@ -39,7 +39,7 @@ void __init sme_unmap_bootdata(char *real_mode_data);
|
||||||
|
|
||||||
void __init sme_early_init(void);
|
void __init sme_early_init(void);
|
||||||
|
|
||||||
void __init sme_encrypt_kernel(void);
|
void __init sme_encrypt_kernel(struct boot_params *bp);
|
||||||
void __init sme_enable(struct boot_params *bp);
|
void __init sme_enable(struct boot_params *bp);
|
||||||
|
|
||||||
/* Architecture __weak replacement functions */
|
/* Architecture __weak replacement functions */
|
||||||
|
@ -61,7 +61,7 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
|
||||||
|
|
||||||
static inline void __init sme_early_init(void) { }
|
static inline void __init sme_early_init(void) { }
|
||||||
|
|
||||||
static inline void __init sme_encrypt_kernel(void) { }
|
static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
|
||||||
static inline void __init sme_enable(struct boot_params *bp) { }
|
static inline void __init sme_enable(struct boot_params *bp) { }
|
||||||
|
|
||||||
#endif /* CONFIG_AMD_MEM_ENCRYPT */
|
#endif /* CONFIG_AMD_MEM_ENCRYPT */
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
* Fill the CPU return stack buffer.
|
* Fill the CPU return stack buffer.
|
||||||
*
|
*
|
||||||
* Each entry in the RSB, if used for a speculative 'ret', contains an
|
* Each entry in the RSB, if used for a speculative 'ret', contains an
|
||||||
* infinite 'pause; jmp' loop to capture speculative execution.
|
* infinite 'pause; lfence; jmp' loop to capture speculative execution.
|
||||||
*
|
*
|
||||||
* This is required in various cases for retpoline and IBRS-based
|
* This is required in various cases for retpoline and IBRS-based
|
||||||
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
|
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
|
||||||
|
@ -38,11 +38,13 @@
|
||||||
call 772f; \
|
call 772f; \
|
||||||
773: /* speculation trap */ \
|
773: /* speculation trap */ \
|
||||||
pause; \
|
pause; \
|
||||||
|
lfence; \
|
||||||
jmp 773b; \
|
jmp 773b; \
|
||||||
772: \
|
772: \
|
||||||
call 774f; \
|
call 774f; \
|
||||||
775: /* speculation trap */ \
|
775: /* speculation trap */ \
|
||||||
pause; \
|
pause; \
|
||||||
|
lfence; \
|
||||||
jmp 775b; \
|
jmp 775b; \
|
||||||
774: \
|
774: \
|
||||||
dec reg; \
|
dec reg; \
|
||||||
|
@ -73,6 +75,7 @@
|
||||||
call .Ldo_rop_\@
|
call .Ldo_rop_\@
|
||||||
.Lspec_trap_\@:
|
.Lspec_trap_\@:
|
||||||
pause
|
pause
|
||||||
|
lfence
|
||||||
jmp .Lspec_trap_\@
|
jmp .Lspec_trap_\@
|
||||||
.Ldo_rop_\@:
|
.Ldo_rop_\@:
|
||||||
mov \reg, (%_ASM_SP)
|
mov \reg, (%_ASM_SP)
|
||||||
|
@ -165,6 +168,7 @@
|
||||||
" .align 16\n" \
|
" .align 16\n" \
|
||||||
"901: call 903f;\n" \
|
"901: call 903f;\n" \
|
||||||
"902: pause;\n" \
|
"902: pause;\n" \
|
||||||
|
" lfence;\n" \
|
||||||
" jmp 902b;\n" \
|
" jmp 902b;\n" \
|
||||||
" .align 16\n" \
|
" .align 16\n" \
|
||||||
"903: addl $4, %%esp;\n" \
|
"903: addl $4, %%esp;\n" \
|
||||||
|
@ -190,6 +194,9 @@ enum spectre_v2_mitigation {
|
||||||
SPECTRE_V2_IBRS,
|
SPECTRE_V2_IBRS,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
extern char __indirect_thunk_start[];
|
||||||
|
extern char __indirect_thunk_end[];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On VMEXIT we must ensure that no RSB predictions learned in the guest
|
* On VMEXIT we must ensure that no RSB predictions learned in the guest
|
||||||
* can be followed in the host, by overwriting the RSB completely. Both
|
* can be followed in the host, by overwriting the RSB completely. Both
|
||||||
|
@ -199,16 +206,17 @@ enum spectre_v2_mitigation {
|
||||||
static inline void vmexit_fill_RSB(void)
|
static inline void vmexit_fill_RSB(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_RETPOLINE
|
#ifdef CONFIG_RETPOLINE
|
||||||
unsigned long loops = RSB_CLEAR_LOOPS / 2;
|
unsigned long loops;
|
||||||
|
|
||||||
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
|
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
|
||||||
ALTERNATIVE("jmp 910f",
|
ALTERNATIVE("jmp 910f",
|
||||||
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
|
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
|
||||||
X86_FEATURE_RETPOLINE)
|
X86_FEATURE_RETPOLINE)
|
||||||
"910:"
|
"910:"
|
||||||
: "=&r" (loops), ASM_CALL_CONSTRAINT
|
: "=r" (loops), ASM_CALL_CONSTRAINT
|
||||||
: "r" (loops) : "memory" );
|
: : "memory" );
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
#endif /* __NOSPEC_BRANCH_H__ */
|
#endif /* __NOSPEC_BRANCH_H__ */
|
||||||
|
|
|
@ -88,6 +88,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
dotraplinkage void do_iret_error(struct pt_regs *, long);
|
dotraplinkage void do_iret_error(struct pt_regs *, long);
|
||||||
#endif
|
#endif
|
||||||
|
dotraplinkage void do_mce(struct pt_regs *, long);
|
||||||
|
|
||||||
static inline int get_si_code(unsigned long condition)
|
static inline int get_si_code(unsigned long condition)
|
||||||
{
|
{
|
||||||
|
|
|
@ -369,8 +369,11 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
|
||||||
irq_data->hwirq = virq + i;
|
irq_data->hwirq = virq + i;
|
||||||
err = assign_irq_vector_policy(virq + i, node, data, info,
|
err = assign_irq_vector_policy(virq + i, node, data, info,
|
||||||
irq_data);
|
irq_data);
|
||||||
if (err)
|
if (err) {
|
||||||
|
irq_data->chip_data = NULL;
|
||||||
|
free_apic_chip_data(data);
|
||||||
goto error;
|
goto error;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* If the apic destination mode is physical, then the
|
* If the apic destination mode is physical, then the
|
||||||
* effective affinity is restricted to a single target
|
* effective affinity is restricted to a single target
|
||||||
|
@ -383,7 +386,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
x86_vector_free_irqs(domain, virq, i + 1);
|
x86_vector_free_irqs(domain, virq, i);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
#include <asm/alternative.h>
|
#include <asm/alternative.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/set_memory.h>
|
#include <asm/set_memory.h>
|
||||||
|
#include <asm/intel-family.h>
|
||||||
|
|
||||||
static void __init spectre_v2_select_mitigation(void);
|
static void __init spectre_v2_select_mitigation(void);
|
||||||
|
|
||||||
|
@ -155,6 +156,23 @@ disable:
|
||||||
return SPECTRE_V2_CMD_NONE;
|
return SPECTRE_V2_CMD_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Check for Skylake-like CPUs (for RSB handling) */
|
||||||
|
static bool __init is_skylake_era(void)
|
||||||
|
{
|
||||||
|
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
||||||
|
boot_cpu_data.x86 == 6) {
|
||||||
|
switch (boot_cpu_data.x86_model) {
|
||||||
|
case INTEL_FAM6_SKYLAKE_MOBILE:
|
||||||
|
case INTEL_FAM6_SKYLAKE_DESKTOP:
|
||||||
|
case INTEL_FAM6_SKYLAKE_X:
|
||||||
|
case INTEL_FAM6_KABYLAKE_MOBILE:
|
||||||
|
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static void __init spectre_v2_select_mitigation(void)
|
static void __init spectre_v2_select_mitigation(void)
|
||||||
{
|
{
|
||||||
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
||||||
|
@ -213,6 +231,24 @@ retpoline_auto:
|
||||||
|
|
||||||
spectre_v2_enabled = mode;
|
spectre_v2_enabled = mode;
|
||||||
pr_info("%s\n", spectre_v2_strings[mode]);
|
pr_info("%s\n", spectre_v2_strings[mode]);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If neither SMEP or KPTI are available, there is a risk of
|
||||||
|
* hitting userspace addresses in the RSB after a context switch
|
||||||
|
* from a shallow call stack to a deeper one. To prevent this fill
|
||||||
|
* the entire RSB, even when using IBRS.
|
||||||
|
*
|
||||||
|
* Skylake era CPUs have a separate issue with *underflow* of the
|
||||||
|
* RSB, when they will predict 'ret' targets from the generic BTB.
|
||||||
|
* The proper mitigation for this is IBRS. If IBRS is not supported
|
||||||
|
* or deactivated in favour of retpolines the RSB fill on context
|
||||||
|
* switch is required.
|
||||||
|
*/
|
||||||
|
if ((!boot_cpu_has(X86_FEATURE_PTI) &&
|
||||||
|
!boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||||
|
pr_info("Filling RSB on context switch\n");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef pr_fmt
|
#undef pr_fmt
|
||||||
|
|
|
@ -525,10 +525,6 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
|
||||||
*/
|
*/
|
||||||
if (static_branch_unlikely(&rdt_mon_enable_key))
|
if (static_branch_unlikely(&rdt_mon_enable_key))
|
||||||
rmdir_mondata_subdir_allrdtgrp(r, d->id);
|
rmdir_mondata_subdir_allrdtgrp(r, d->id);
|
||||||
kfree(d->ctrl_val);
|
|
||||||
kfree(d->rmid_busy_llc);
|
|
||||||
kfree(d->mbm_total);
|
|
||||||
kfree(d->mbm_local);
|
|
||||||
list_del(&d->list);
|
list_del(&d->list);
|
||||||
if (is_mbm_enabled())
|
if (is_mbm_enabled())
|
||||||
cancel_delayed_work(&d->mbm_over);
|
cancel_delayed_work(&d->mbm_over);
|
||||||
|
@ -545,6 +541,10 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
|
||||||
cancel_delayed_work(&d->cqm_limbo);
|
cancel_delayed_work(&d->cqm_limbo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kfree(d->ctrl_val);
|
||||||
|
kfree(d->rmid_busy_llc);
|
||||||
|
kfree(d->mbm_total);
|
||||||
|
kfree(d->mbm_local);
|
||||||
kfree(d);
|
kfree(d);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1788,6 +1788,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
|
||||||
void (*machine_check_vector)(struct pt_regs *, long error_code) =
|
void (*machine_check_vector)(struct pt_regs *, long error_code) =
|
||||||
unexpected_machine_check;
|
unexpected_machine_check;
|
||||||
|
|
||||||
|
dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
|
||||||
|
{
|
||||||
|
machine_check_vector(regs, error_code);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called for each booted CPU to set up machine checks.
|
* Called for each booted CPU to set up machine checks.
|
||||||
* Must be called with preempt off:
|
* Must be called with preempt off:
|
||||||
|
|
|
@ -21,7 +21,6 @@ struct cpuid_bit {
|
||||||
static const struct cpuid_bit cpuid_bits[] = {
|
static const struct cpuid_bit cpuid_bits[] = {
|
||||||
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
|
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
|
||||||
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
|
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
|
||||||
{ X86_FEATURE_INTEL_PT, CPUID_EBX, 25, 0x00000007, 0 },
|
|
||||||
{ X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
|
{ X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
|
||||||
{ X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
|
{ X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
|
||||||
{ X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
|
{ X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
|
||||||
|
|
|
@ -157,8 +157,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
|
||||||
p = fixup_pointer(&phys_base, physaddr);
|
p = fixup_pointer(&phys_base, physaddr);
|
||||||
*p += load_delta - sme_get_me_mask();
|
*p += load_delta - sme_get_me_mask();
|
||||||
|
|
||||||
/* Encrypt the kernel (if SME is active) */
|
/* Encrypt the kernel and related (if SME is active) */
|
||||||
sme_encrypt_kernel();
|
sme_encrypt_kernel(bp);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the SME encryption mask (if SME is active) to be used as a
|
* Return the SME encryption mask (if SME is active) to be used as a
|
||||||
|
|
|
@ -56,7 +56,7 @@ struct idt_data {
|
||||||
* Early traps running on the DEFAULT_STACK because the other interrupt
|
* Early traps running on the DEFAULT_STACK because the other interrupt
|
||||||
* stacks work only after cpu_init().
|
* stacks work only after cpu_init().
|
||||||
*/
|
*/
|
||||||
static const __initdata struct idt_data early_idts[] = {
|
static const __initconst struct idt_data early_idts[] = {
|
||||||
INTG(X86_TRAP_DB, debug),
|
INTG(X86_TRAP_DB, debug),
|
||||||
SYSG(X86_TRAP_BP, int3),
|
SYSG(X86_TRAP_BP, int3),
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
|
@ -70,7 +70,7 @@ static const __initdata struct idt_data early_idts[] = {
|
||||||
* the traps which use them are reinitialized with IST after cpu_init() has
|
* the traps which use them are reinitialized with IST after cpu_init() has
|
||||||
* set up TSS.
|
* set up TSS.
|
||||||
*/
|
*/
|
||||||
static const __initdata struct idt_data def_idts[] = {
|
static const __initconst struct idt_data def_idts[] = {
|
||||||
INTG(X86_TRAP_DE, divide_error),
|
INTG(X86_TRAP_DE, divide_error),
|
||||||
INTG(X86_TRAP_NMI, nmi),
|
INTG(X86_TRAP_NMI, nmi),
|
||||||
INTG(X86_TRAP_BR, bounds),
|
INTG(X86_TRAP_BR, bounds),
|
||||||
|
@ -108,7 +108,7 @@ static const __initdata struct idt_data def_idts[] = {
|
||||||
/*
|
/*
|
||||||
* The APIC and SMP idt entries
|
* The APIC and SMP idt entries
|
||||||
*/
|
*/
|
||||||
static const __initdata struct idt_data apic_idts[] = {
|
static const __initconst struct idt_data apic_idts[] = {
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
INTG(RESCHEDULE_VECTOR, reschedule_interrupt),
|
INTG(RESCHEDULE_VECTOR, reschedule_interrupt),
|
||||||
INTG(CALL_FUNCTION_VECTOR, call_function_interrupt),
|
INTG(CALL_FUNCTION_VECTOR, call_function_interrupt),
|
||||||
|
@ -150,7 +150,7 @@ static const __initdata struct idt_data apic_idts[] = {
|
||||||
* Early traps running on the DEFAULT_STACK because the other interrupt
|
* Early traps running on the DEFAULT_STACK because the other interrupt
|
||||||
* stacks work only after cpu_init().
|
* stacks work only after cpu_init().
|
||||||
*/
|
*/
|
||||||
static const __initdata struct idt_data early_pf_idts[] = {
|
static const __initconst struct idt_data early_pf_idts[] = {
|
||||||
INTG(X86_TRAP_PF, page_fault),
|
INTG(X86_TRAP_PF, page_fault),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -158,7 +158,7 @@ static const __initdata struct idt_data early_pf_idts[] = {
|
||||||
* Override for the debug_idt. Same as the default, but with interrupt
|
* Override for the debug_idt. Same as the default, but with interrupt
|
||||||
* stack set to DEFAULT_STACK (0). Required for NMI trap handling.
|
* stack set to DEFAULT_STACK (0). Required for NMI trap handling.
|
||||||
*/
|
*/
|
||||||
static const __initdata struct idt_data dbg_idts[] = {
|
static const __initconst struct idt_data dbg_idts[] = {
|
||||||
INTG(X86_TRAP_DB, debug),
|
INTG(X86_TRAP_DB, debug),
|
||||||
INTG(X86_TRAP_BP, int3),
|
INTG(X86_TRAP_BP, int3),
|
||||||
};
|
};
|
||||||
|
@ -180,7 +180,7 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
|
||||||
* The exceptions which use Interrupt stacks. They are setup after
|
* The exceptions which use Interrupt stacks. They are setup after
|
||||||
* cpu_init() when the TSS has been initialized.
|
* cpu_init() when the TSS has been initialized.
|
||||||
*/
|
*/
|
||||||
static const __initdata struct idt_data ist_idts[] = {
|
static const __initconst struct idt_data ist_idts[] = {
|
||||||
ISTG(X86_TRAP_DB, debug, DEBUG_STACK),
|
ISTG(X86_TRAP_DB, debug, DEBUG_STACK),
|
||||||
ISTG(X86_TRAP_NMI, nmi, NMI_STACK),
|
ISTG(X86_TRAP_NMI, nmi, NMI_STACK),
|
||||||
SISTG(X86_TRAP_BP, int3, DEBUG_STACK),
|
SISTG(X86_TRAP_BP, int3, DEBUG_STACK),
|
||||||
|
|
|
@ -40,6 +40,7 @@
|
||||||
#include <asm/debugreg.h>
|
#include <asm/debugreg.h>
|
||||||
#include <asm/set_memory.h>
|
#include <asm/set_memory.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
|
#include <asm/nospec-branch.h>
|
||||||
|
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
|
||||||
|
@ -205,7 +206,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check whether insn is indirect jump */
|
/* Check whether insn is indirect jump */
|
||||||
static int insn_is_indirect_jump(struct insn *insn)
|
static int __insn_is_indirect_jump(struct insn *insn)
|
||||||
{
|
{
|
||||||
return ((insn->opcode.bytes[0] == 0xff &&
|
return ((insn->opcode.bytes[0] == 0xff &&
|
||||||
(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
|
(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
|
||||||
|
@ -239,6 +240,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
|
||||||
return (start <= target && target <= start + len);
|
return (start <= target && target <= start + len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int insn_is_indirect_jump(struct insn *insn)
|
||||||
|
{
|
||||||
|
int ret = __insn_is_indirect_jump(insn);
|
||||||
|
|
||||||
|
#ifdef CONFIG_RETPOLINE
|
||||||
|
/*
|
||||||
|
* Jump to x86_indirect_thunk_* is treated as an indirect jump.
|
||||||
|
* Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
|
||||||
|
* older gcc may use indirect jump. So we add this check instead of
|
||||||
|
* replace indirect-jump check.
|
||||||
|
*/
|
||||||
|
if (!ret)
|
||||||
|
ret = insn_jump_into_range(insn,
|
||||||
|
(unsigned long)__indirect_thunk_start,
|
||||||
|
(unsigned long)__indirect_thunk_end -
|
||||||
|
(unsigned long)__indirect_thunk_start);
|
||||||
|
#endif
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* Decode whole function to ensure any instructions don't jump into target */
|
/* Decode whole function to ensure any instructions don't jump into target */
|
||||||
static int can_optimize(unsigned long paddr)
|
static int can_optimize(unsigned long paddr)
|
||||||
{
|
{
|
||||||
|
|
|
@ -380,19 +380,24 @@ void stop_this_cpu(void *dummy)
|
||||||
disable_local_APIC();
|
disable_local_APIC();
|
||||||
mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
|
mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Use wbinvd on processors that support SME. This provides support
|
||||||
|
* for performing a successful kexec when going from SME inactive
|
||||||
|
* to SME active (or vice-versa). The cache must be cleared so that
|
||||||
|
* if there are entries with the same physical address, both with and
|
||||||
|
* without the encryption bit, they don't race each other when flushed
|
||||||
|
* and potentially end up with the wrong entry being committed to
|
||||||
|
* memory.
|
||||||
|
*/
|
||||||
|
if (boot_cpu_has(X86_FEATURE_SME))
|
||||||
|
native_wbinvd();
|
||||||
for (;;) {
|
for (;;) {
|
||||||
/*
|
/*
|
||||||
* Use wbinvd followed by hlt to stop the processor. This
|
* Use native_halt() so that memory contents don't change
|
||||||
* provides support for kexec on a processor that supports
|
* (stack usage and variables) after possibly issuing the
|
||||||
* SME. With kexec, going from SME inactive to SME active
|
* native_wbinvd() above.
|
||||||
* requires clearing cache entries so that addresses without
|
|
||||||
* the encryption bit set don't corrupt the same physical
|
|
||||||
* address that has the encryption bit set when caches are
|
|
||||||
* flushed. To achieve this a wbinvd is performed followed by
|
|
||||||
* a hlt. Even if the processor is not in the kexec/SME
|
|
||||||
* scenario this only adds a wbinvd to a halting processor.
|
|
||||||
*/
|
*/
|
||||||
asm volatile("wbinvd; hlt" : : : "memory");
|
native_halt();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -376,14 +376,6 @@ static void __init reserve_initrd(void)
|
||||||
!ramdisk_image || !ramdisk_size)
|
!ramdisk_image || !ramdisk_size)
|
||||||
return; /* No initrd provided by bootloader */
|
return; /* No initrd provided by bootloader */
|
||||||
|
|
||||||
/*
|
|
||||||
* If SME is active, this memory will be marked encrypted by the
|
|
||||||
* kernel when it is accessed (including relocation). However, the
|
|
||||||
* ramdisk image was loaded decrypted by the bootloader, so make
|
|
||||||
* sure that it is encrypted before accessing it.
|
|
||||||
*/
|
|
||||||
sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
|
|
||||||
|
|
||||||
initrd_start = 0;
|
initrd_start = 0;
|
||||||
|
|
||||||
mapped_size = memblock_mem_size(max_pfn_mapped);
|
mapped_size = memblock_mem_size(max_pfn_mapped);
|
||||||
|
|
|
@ -602,7 +602,6 @@ unsigned long native_calibrate_tsc(void)
|
||||||
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||||
crystal_khz = 24000; /* 24.0 MHz */
|
crystal_khz = 24000; /* 24.0 MHz */
|
||||||
break;
|
break;
|
||||||
case INTEL_FAM6_SKYLAKE_X:
|
|
||||||
case INTEL_FAM6_ATOM_DENVERTON:
|
case INTEL_FAM6_ATOM_DENVERTON:
|
||||||
crystal_khz = 25000; /* 25.0 MHz */
|
crystal_khz = 25000; /* 25.0 MHz */
|
||||||
break;
|
break;
|
||||||
|
@ -612,6 +611,8 @@ unsigned long native_calibrate_tsc(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (crystal_khz == 0)
|
||||||
|
return 0;
|
||||||
/*
|
/*
|
||||||
* TSC frequency determined by CPUID is a "hardware reported"
|
* TSC frequency determined by CPUID is a "hardware reported"
|
||||||
* frequency and is the most accurate one so far we have. This
|
* frequency and is the most accurate one so far we have. This
|
||||||
|
|
|
@ -124,6 +124,12 @@ SECTIONS
|
||||||
ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
|
ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_RETPOLINE
|
||||||
|
__indirect_thunk_start = .;
|
||||||
|
*(.text.__x86.indirect_thunk)
|
||||||
|
__indirect_thunk_end = .;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* End of text section */
|
/* End of text section */
|
||||||
_etext = .;
|
_etext = .;
|
||||||
} :text = 0x9090
|
} :text = 0x9090
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
#include <asm/nospec-branch.h>
|
#include <asm/nospec-branch.h>
|
||||||
|
|
||||||
.macro THUNK reg
|
.macro THUNK reg
|
||||||
.section .text.__x86.indirect_thunk.\reg
|
.section .text.__x86.indirect_thunk
|
||||||
|
|
||||||
ENTRY(__x86_indirect_thunk_\reg)
|
ENTRY(__x86_indirect_thunk_\reg)
|
||||||
CFI_STARTPROC
|
CFI_STARTPROC
|
||||||
|
@ -25,7 +25,8 @@ ENDPROC(__x86_indirect_thunk_\reg)
|
||||||
* than one per register with the correct names. So we do it
|
* than one per register with the correct names. So we do it
|
||||||
* the simple and nasty way...
|
* the simple and nasty way...
|
||||||
*/
|
*/
|
||||||
#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg)
|
#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
|
||||||
|
#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
|
||||||
#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
|
#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
|
||||||
|
|
||||||
GENERATE_THUNK(_ASM_AX)
|
GENERATE_THUNK(_ASM_AX)
|
||||||
|
|
|
@ -173,14 +173,15 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
|
||||||
* 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
|
* 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
|
||||||
* faulted on a pte with its pkey=4.
|
* faulted on a pte with its pkey=4.
|
||||||
*/
|
*/
|
||||||
static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
|
static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info,
|
||||||
|
u32 *pkey)
|
||||||
{
|
{
|
||||||
/* This is effectively an #ifdef */
|
/* This is effectively an #ifdef */
|
||||||
if (!boot_cpu_has(X86_FEATURE_OSPKE))
|
if (!boot_cpu_has(X86_FEATURE_OSPKE))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Fault not from Protection Keys: nothing to do */
|
/* Fault not from Protection Keys: nothing to do */
|
||||||
if (si_code != SEGV_PKUERR)
|
if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV))
|
||||||
return;
|
return;
|
||||||
/*
|
/*
|
||||||
* force_sig_info_fault() is called from a number of
|
* force_sig_info_fault() is called from a number of
|
||||||
|
@ -219,7 +220,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
|
||||||
lsb = PAGE_SHIFT;
|
lsb = PAGE_SHIFT;
|
||||||
info.si_addr_lsb = lsb;
|
info.si_addr_lsb = lsb;
|
||||||
|
|
||||||
fill_sig_info_pkey(si_code, &info, pkey);
|
fill_sig_info_pkey(si_signo, si_code, &info, pkey);
|
||||||
|
|
||||||
force_sig_info(si_signo, &info, tsk);
|
force_sig_info(si_signo, &info, tsk);
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,10 +21,14 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
|
||||||
|
|
||||||
static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
|
static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
|
||||||
|
|
||||||
static __init void *early_alloc(size_t size, int nid)
|
static __init void *early_alloc(size_t size, int nid, bool panic)
|
||||||
{
|
{
|
||||||
return memblock_virt_alloc_try_nid_nopanic(size, size,
|
if (panic)
|
||||||
__pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
|
return memblock_virt_alloc_try_nid(size, size,
|
||||||
|
__pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
|
||||||
|
else
|
||||||
|
return memblock_virt_alloc_try_nid_nopanic(size, size,
|
||||||
|
__pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
|
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
|
||||||
|
@ -38,14 +42,14 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
|
||||||
if (boot_cpu_has(X86_FEATURE_PSE) &&
|
if (boot_cpu_has(X86_FEATURE_PSE) &&
|
||||||
((end - addr) == PMD_SIZE) &&
|
((end - addr) == PMD_SIZE) &&
|
||||||
IS_ALIGNED(addr, PMD_SIZE)) {
|
IS_ALIGNED(addr, PMD_SIZE)) {
|
||||||
p = early_alloc(PMD_SIZE, nid);
|
p = early_alloc(PMD_SIZE, nid, false);
|
||||||
if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
|
if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
|
||||||
return;
|
return;
|
||||||
else if (p)
|
else if (p)
|
||||||
memblock_free(__pa(p), PMD_SIZE);
|
memblock_free(__pa(p), PMD_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
p = early_alloc(PAGE_SIZE, nid);
|
p = early_alloc(PAGE_SIZE, nid, true);
|
||||||
pmd_populate_kernel(&init_mm, pmd, p);
|
pmd_populate_kernel(&init_mm, pmd, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +61,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
|
||||||
if (!pte_none(*pte))
|
if (!pte_none(*pte))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
p = early_alloc(PAGE_SIZE, nid);
|
p = early_alloc(PAGE_SIZE, nid, true);
|
||||||
entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
|
entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
|
||||||
set_pte_at(&init_mm, addr, pte, entry);
|
set_pte_at(&init_mm, addr, pte, entry);
|
||||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||||
|
@ -75,14 +79,14 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
|
||||||
if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
|
if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
|
||||||
((end - addr) == PUD_SIZE) &&
|
((end - addr) == PUD_SIZE) &&
|
||||||
IS_ALIGNED(addr, PUD_SIZE)) {
|
IS_ALIGNED(addr, PUD_SIZE)) {
|
||||||
p = early_alloc(PUD_SIZE, nid);
|
p = early_alloc(PUD_SIZE, nid, false);
|
||||||
if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
|
if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
|
||||||
return;
|
return;
|
||||||
else if (p)
|
else if (p)
|
||||||
memblock_free(__pa(p), PUD_SIZE);
|
memblock_free(__pa(p), PUD_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
p = early_alloc(PAGE_SIZE, nid);
|
p = early_alloc(PAGE_SIZE, nid, true);
|
||||||
pud_populate(&init_mm, pud, p);
|
pud_populate(&init_mm, pud, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,7 +105,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
|
|
||||||
if (p4d_none(*p4d)) {
|
if (p4d_none(*p4d)) {
|
||||||
void *p = early_alloc(PAGE_SIZE, nid);
|
void *p = early_alloc(PAGE_SIZE, nid, true);
|
||||||
|
|
||||||
p4d_populate(&init_mm, p4d, p);
|
p4d_populate(&init_mm, p4d, p);
|
||||||
}
|
}
|
||||||
|
@ -122,7 +126,7 @@ static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
|
|
||||||
if (pgd_none(*pgd)) {
|
if (pgd_none(*pgd)) {
|
||||||
p = early_alloc(PAGE_SIZE, nid);
|
p = early_alloc(PAGE_SIZE, nid, true);
|
||||||
pgd_populate(&init_mm, pgd, p);
|
pgd_populate(&init_mm, pgd, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -213,37 +213,62 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
|
||||||
set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
|
set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
|
struct sme_populate_pgd_data {
|
||||||
unsigned long end)
|
void *pgtable_area;
|
||||||
|
pgd_t *pgd;
|
||||||
|
|
||||||
|
pmdval_t pmd_flags;
|
||||||
|
pteval_t pte_flags;
|
||||||
|
unsigned long paddr;
|
||||||
|
|
||||||
|
unsigned long vaddr;
|
||||||
|
unsigned long vaddr_end;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
|
||||||
{
|
{
|
||||||
unsigned long pgd_start, pgd_end, pgd_size;
|
unsigned long pgd_start, pgd_end, pgd_size;
|
||||||
pgd_t *pgd_p;
|
pgd_t *pgd_p;
|
||||||
|
|
||||||
pgd_start = start & PGDIR_MASK;
|
pgd_start = ppd->vaddr & PGDIR_MASK;
|
||||||
pgd_end = end & PGDIR_MASK;
|
pgd_end = ppd->vaddr_end & PGDIR_MASK;
|
||||||
|
|
||||||
pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1);
|
pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
|
||||||
pgd_size *= sizeof(pgd_t);
|
|
||||||
|
|
||||||
pgd_p = pgd_base + pgd_index(start);
|
pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
|
||||||
|
|
||||||
memset(pgd_p, 0, pgd_size);
|
memset(pgd_p, 0, pgd_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define PGD_FLAGS _KERNPG_TABLE_NOENC
|
#define PGD_FLAGS _KERNPG_TABLE_NOENC
|
||||||
#define P4D_FLAGS _KERNPG_TABLE_NOENC
|
#define P4D_FLAGS _KERNPG_TABLE_NOENC
|
||||||
#define PUD_FLAGS _KERNPG_TABLE_NOENC
|
#define PUD_FLAGS _KERNPG_TABLE_NOENC
|
||||||
#define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
|
#define PMD_FLAGS _KERNPG_TABLE_NOENC
|
||||||
|
|
||||||
static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
|
#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
|
||||||
unsigned long vaddr, pmdval_t pmd_val)
|
|
||||||
|
#define PMD_FLAGS_DEC PMD_FLAGS_LARGE
|
||||||
|
#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
|
||||||
|
(_PAGE_PAT | _PAGE_PWT))
|
||||||
|
|
||||||
|
#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
|
||||||
|
|
||||||
|
#define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
|
||||||
|
|
||||||
|
#define PTE_FLAGS_DEC PTE_FLAGS
|
||||||
|
#define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
|
||||||
|
(_PAGE_PAT | _PAGE_PWT))
|
||||||
|
|
||||||
|
#define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC)
|
||||||
|
|
||||||
|
static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
|
||||||
{
|
{
|
||||||
pgd_t *pgd_p;
|
pgd_t *pgd_p;
|
||||||
p4d_t *p4d_p;
|
p4d_t *p4d_p;
|
||||||
pud_t *pud_p;
|
pud_t *pud_p;
|
||||||
pmd_t *pmd_p;
|
pmd_t *pmd_p;
|
||||||
|
|
||||||
pgd_p = pgd_base + pgd_index(vaddr);
|
pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
|
||||||
if (native_pgd_val(*pgd_p)) {
|
if (native_pgd_val(*pgd_p)) {
|
||||||
if (IS_ENABLED(CONFIG_X86_5LEVEL))
|
if (IS_ENABLED(CONFIG_X86_5LEVEL))
|
||||||
p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK);
|
p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK);
|
||||||
|
@ -253,15 +278,15 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
|
||||||
pgd_t pgd;
|
pgd_t pgd;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
|
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
|
||||||
p4d_p = pgtable_area;
|
p4d_p = ppd->pgtable_area;
|
||||||
memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
|
memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
|
||||||
pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
|
ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
|
||||||
|
|
||||||
pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS);
|
pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS);
|
||||||
} else {
|
} else {
|
||||||
pud_p = pgtable_area;
|
pud_p = ppd->pgtable_area;
|
||||||
memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
|
memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
|
||||||
pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
|
ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
|
||||||
|
|
||||||
pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS);
|
pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS);
|
||||||
}
|
}
|
||||||
|
@ -269,58 +294,160 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
|
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
|
||||||
p4d_p += p4d_index(vaddr);
|
p4d_p += p4d_index(ppd->vaddr);
|
||||||
if (native_p4d_val(*p4d_p)) {
|
if (native_p4d_val(*p4d_p)) {
|
||||||
pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK);
|
pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK);
|
||||||
} else {
|
} else {
|
||||||
p4d_t p4d;
|
p4d_t p4d;
|
||||||
|
|
||||||
pud_p = pgtable_area;
|
pud_p = ppd->pgtable_area;
|
||||||
memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
|
memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
|
||||||
pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
|
ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
|
||||||
|
|
||||||
p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS);
|
p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS);
|
||||||
native_set_p4d(p4d_p, p4d);
|
native_set_p4d(p4d_p, p4d);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pud_p += pud_index(vaddr);
|
pud_p += pud_index(ppd->vaddr);
|
||||||
if (native_pud_val(*pud_p)) {
|
if (native_pud_val(*pud_p)) {
|
||||||
if (native_pud_val(*pud_p) & _PAGE_PSE)
|
if (native_pud_val(*pud_p) & _PAGE_PSE)
|
||||||
goto out;
|
return NULL;
|
||||||
|
|
||||||
pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
|
pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
|
||||||
} else {
|
} else {
|
||||||
pud_t pud;
|
pud_t pud;
|
||||||
|
|
||||||
pmd_p = pgtable_area;
|
pmd_p = ppd->pgtable_area;
|
||||||
memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
|
memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
|
||||||
pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
|
ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
|
||||||
|
|
||||||
pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS);
|
pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS);
|
||||||
native_set_pud(pud_p, pud);
|
native_set_pud(pud_p, pud);
|
||||||
}
|
}
|
||||||
|
|
||||||
pmd_p += pmd_index(vaddr);
|
return pmd_p;
|
||||||
if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
|
}
|
||||||
native_set_pmd(pmd_p, native_make_pmd(pmd_val));
|
|
||||||
|
|
||||||
out:
|
static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
|
||||||
return pgtable_area;
|
{
|
||||||
|
pmd_t *pmd_p;
|
||||||
|
|
||||||
|
pmd_p = sme_prepare_pgd(ppd);
|
||||||
|
if (!pmd_p)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pmd_p += pmd_index(ppd->vaddr);
|
||||||
|
if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
|
||||||
|
native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
|
||||||
|
{
|
||||||
|
pmd_t *pmd_p;
|
||||||
|
pte_t *pte_p;
|
||||||
|
|
||||||
|
pmd_p = sme_prepare_pgd(ppd);
|
||||||
|
if (!pmd_p)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pmd_p += pmd_index(ppd->vaddr);
|
||||||
|
if (native_pmd_val(*pmd_p)) {
|
||||||
|
if (native_pmd_val(*pmd_p) & _PAGE_PSE)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK);
|
||||||
|
} else {
|
||||||
|
pmd_t pmd;
|
||||||
|
|
||||||
|
pte_p = ppd->pgtable_area;
|
||||||
|
memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE);
|
||||||
|
ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE;
|
||||||
|
|
||||||
|
pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS);
|
||||||
|
native_set_pmd(pmd_p, pmd);
|
||||||
|
}
|
||||||
|
|
||||||
|
pte_p += pte_index(ppd->vaddr);
|
||||||
|
if (!native_pte_val(*pte_p))
|
||||||
|
native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
|
||||||
|
{
|
||||||
|
while (ppd->vaddr < ppd->vaddr_end) {
|
||||||
|
sme_populate_pgd_large(ppd);
|
||||||
|
|
||||||
|
ppd->vaddr += PMD_PAGE_SIZE;
|
||||||
|
ppd->paddr += PMD_PAGE_SIZE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
|
||||||
|
{
|
||||||
|
while (ppd->vaddr < ppd->vaddr_end) {
|
||||||
|
sme_populate_pgd(ppd);
|
||||||
|
|
||||||
|
ppd->vaddr += PAGE_SIZE;
|
||||||
|
ppd->paddr += PAGE_SIZE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
|
||||||
|
pmdval_t pmd_flags, pteval_t pte_flags)
|
||||||
|
{
|
||||||
|
unsigned long vaddr_end;
|
||||||
|
|
||||||
|
ppd->pmd_flags = pmd_flags;
|
||||||
|
ppd->pte_flags = pte_flags;
|
||||||
|
|
||||||
|
/* Save original end value since we modify the struct value */
|
||||||
|
vaddr_end = ppd->vaddr_end;
|
||||||
|
|
||||||
|
/* If start is not 2MB aligned, create PTE entries */
|
||||||
|
ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
|
||||||
|
__sme_map_range_pte(ppd);
|
||||||
|
|
||||||
|
/* Create PMD entries */
|
||||||
|
ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
|
||||||
|
__sme_map_range_pmd(ppd);
|
||||||
|
|
||||||
|
/* If end is not 2MB aligned, create PTE entries */
|
||||||
|
ppd->vaddr_end = vaddr_end;
|
||||||
|
__sme_map_range_pte(ppd);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
|
||||||
|
{
|
||||||
|
__sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
|
||||||
|
{
|
||||||
|
__sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
|
||||||
|
{
|
||||||
|
__sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long __init sme_pgtable_calc(unsigned long len)
|
static unsigned long __init sme_pgtable_calc(unsigned long len)
|
||||||
{
|
{
|
||||||
unsigned long p4d_size, pud_size, pmd_size;
|
unsigned long p4d_size, pud_size, pmd_size, pte_size;
|
||||||
unsigned long total;
|
unsigned long total;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Perform a relatively simplistic calculation of the pagetable
|
* Perform a relatively simplistic calculation of the pagetable
|
||||||
* entries that are needed. That mappings will be covered by 2MB
|
* entries that are needed. Those mappings will be covered mostly
|
||||||
* PMD entries so we can conservatively calculate the required
|
* by 2MB PMD entries so we can conservatively calculate the required
|
||||||
* number of P4D, PUD and PMD structures needed to perform the
|
* number of P4D, PUD and PMD structures needed to perform the
|
||||||
* mappings. Incrementing the count for each covers the case where
|
* mappings. For mappings that are not 2MB aligned, PTE mappings
|
||||||
* the addresses cross entries.
|
* would be needed for the start and end portion of the address range
|
||||||
|
* that fall outside of the 2MB alignment. This results in, at most,
|
||||||
|
* two extra pages to hold PTE entries for each range that is mapped.
|
||||||
|
* Incrementing the count for each covers the case where the addresses
|
||||||
|
* cross entries.
|
||||||
*/
|
*/
|
||||||
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
|
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
|
||||||
p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
|
p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
|
||||||
|
@ -334,8 +461,9 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
|
||||||
}
|
}
|
||||||
pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1;
|
pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1;
|
||||||
pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
|
pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
|
||||||
|
pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE;
|
||||||
|
|
||||||
total = p4d_size + pud_size + pmd_size;
|
total = p4d_size + pud_size + pmd_size + pte_size;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now calculate the added pagetable structures needed to populate
|
* Now calculate the added pagetable structures needed to populate
|
||||||
|
@ -359,29 +487,29 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
|
||||||
return total;
|
return total;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init sme_encrypt_kernel(void)
|
void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp)
|
||||||
{
|
{
|
||||||
unsigned long workarea_start, workarea_end, workarea_len;
|
unsigned long workarea_start, workarea_end, workarea_len;
|
||||||
unsigned long execute_start, execute_end, execute_len;
|
unsigned long execute_start, execute_end, execute_len;
|
||||||
unsigned long kernel_start, kernel_end, kernel_len;
|
unsigned long kernel_start, kernel_end, kernel_len;
|
||||||
|
unsigned long initrd_start, initrd_end, initrd_len;
|
||||||
|
struct sme_populate_pgd_data ppd;
|
||||||
unsigned long pgtable_area_len;
|
unsigned long pgtable_area_len;
|
||||||
unsigned long paddr, pmd_flags;
|
|
||||||
unsigned long decrypted_base;
|
unsigned long decrypted_base;
|
||||||
void *pgtable_area;
|
|
||||||
pgd_t *pgd;
|
|
||||||
|
|
||||||
if (!sme_active())
|
if (!sme_active())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Prepare for encrypting the kernel by building new pagetables with
|
* Prepare for encrypting the kernel and initrd by building new
|
||||||
* the necessary attributes needed to encrypt the kernel in place.
|
* pagetables with the necessary attributes needed to encrypt the
|
||||||
|
* kernel in place.
|
||||||
*
|
*
|
||||||
* One range of virtual addresses will map the memory occupied
|
* One range of virtual addresses will map the memory occupied
|
||||||
* by the kernel as encrypted.
|
* by the kernel and initrd as encrypted.
|
||||||
*
|
*
|
||||||
* Another range of virtual addresses will map the memory occupied
|
* Another range of virtual addresses will map the memory occupied
|
||||||
* by the kernel as decrypted and write-protected.
|
* by the kernel and initrd as decrypted and write-protected.
|
||||||
*
|
*
|
||||||
* The use of write-protect attribute will prevent any of the
|
* The use of write-protect attribute will prevent any of the
|
||||||
* memory from being cached.
|
* memory from being cached.
|
||||||
|
@ -392,6 +520,20 @@ void __init sme_encrypt_kernel(void)
|
||||||
kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
|
kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
|
||||||
kernel_len = kernel_end - kernel_start;
|
kernel_len = kernel_end - kernel_start;
|
||||||
|
|
||||||
|
initrd_start = 0;
|
||||||
|
initrd_end = 0;
|
||||||
|
initrd_len = 0;
|
||||||
|
#ifdef CONFIG_BLK_DEV_INITRD
|
||||||
|
initrd_len = (unsigned long)bp->hdr.ramdisk_size |
|
||||||
|
((unsigned long)bp->ext_ramdisk_size << 32);
|
||||||
|
if (initrd_len) {
|
||||||
|
initrd_start = (unsigned long)bp->hdr.ramdisk_image |
|
||||||
|
((unsigned long)bp->ext_ramdisk_image << 32);
|
||||||
|
initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
|
||||||
|
initrd_len = initrd_end - initrd_start;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Set the encryption workarea to be immediately after the kernel */
|
/* Set the encryption workarea to be immediately after the kernel */
|
||||||
workarea_start = kernel_end;
|
workarea_start = kernel_end;
|
||||||
|
|
||||||
|
@ -414,16 +556,21 @@ void __init sme_encrypt_kernel(void)
|
||||||
*/
|
*/
|
||||||
pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
|
pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
|
||||||
pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
|
pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
|
||||||
|
if (initrd_len)
|
||||||
|
pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
|
||||||
|
|
||||||
/* PUDs and PMDs needed in the current pagetables for the workarea */
|
/* PUDs and PMDs needed in the current pagetables for the workarea */
|
||||||
pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
|
pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The total workarea includes the executable encryption area and
|
* The total workarea includes the executable encryption area and
|
||||||
* the pagetable area.
|
* the pagetable area. The start of the workarea is already 2MB
|
||||||
|
* aligned, align the end of the workarea on a 2MB boundary so that
|
||||||
|
* we don't try to create/allocate PTE entries from the workarea
|
||||||
|
* before it is mapped.
|
||||||
*/
|
*/
|
||||||
workarea_len = execute_len + pgtable_area_len;
|
workarea_len = execute_len + pgtable_area_len;
|
||||||
workarea_end = workarea_start + workarea_len;
|
workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set the address to the start of where newly created pagetable
|
* Set the address to the start of where newly created pagetable
|
||||||
|
@ -432,45 +579,30 @@ void __init sme_encrypt_kernel(void)
|
||||||
* pagetables and when the new encrypted and decrypted kernel
|
* pagetables and when the new encrypted and decrypted kernel
|
||||||
* mappings are populated.
|
* mappings are populated.
|
||||||
*/
|
*/
|
||||||
pgtable_area = (void *)execute_end;
|
ppd.pgtable_area = (void *)execute_end;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure the current pagetable structure has entries for
|
* Make sure the current pagetable structure has entries for
|
||||||
* addressing the workarea.
|
* addressing the workarea.
|
||||||
*/
|
*/
|
||||||
pgd = (pgd_t *)native_read_cr3_pa();
|
ppd.pgd = (pgd_t *)native_read_cr3_pa();
|
||||||
paddr = workarea_start;
|
ppd.paddr = workarea_start;
|
||||||
while (paddr < workarea_end) {
|
ppd.vaddr = workarea_start;
|
||||||
pgtable_area = sme_populate_pgd(pgd, pgtable_area,
|
ppd.vaddr_end = workarea_end;
|
||||||
paddr,
|
sme_map_range_decrypted(&ppd);
|
||||||
paddr + PMD_FLAGS);
|
|
||||||
|
|
||||||
paddr += PMD_PAGE_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Flush the TLB - no globals so cr3 is enough */
|
/* Flush the TLB - no globals so cr3 is enough */
|
||||||
native_write_cr3(__native_read_cr3());
|
native_write_cr3(__native_read_cr3());
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A new pagetable structure is being built to allow for the kernel
|
* A new pagetable structure is being built to allow for the kernel
|
||||||
* to be encrypted. It starts with an empty PGD that will then be
|
* and initrd to be encrypted. It starts with an empty PGD that will
|
||||||
* populated with new PUDs and PMDs as the encrypted and decrypted
|
* then be populated with new PUDs and PMDs as the encrypted and
|
||||||
* kernel mappings are created.
|
* decrypted kernel mappings are created.
|
||||||
*/
|
*/
|
||||||
pgd = pgtable_area;
|
ppd.pgd = ppd.pgtable_area;
|
||||||
memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD);
|
memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
|
||||||
pgtable_area += sizeof(*pgd) * PTRS_PER_PGD;
|
ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
|
||||||
|
|
||||||
/* Add encrypted kernel (identity) mappings */
|
|
||||||
pmd_flags = PMD_FLAGS | _PAGE_ENC;
|
|
||||||
paddr = kernel_start;
|
|
||||||
while (paddr < kernel_end) {
|
|
||||||
pgtable_area = sme_populate_pgd(pgd, pgtable_area,
|
|
||||||
paddr,
|
|
||||||
paddr + pmd_flags);
|
|
||||||
|
|
||||||
paddr += PMD_PAGE_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A different PGD index/entry must be used to get different
|
* A different PGD index/entry must be used to get different
|
||||||
|
@ -479,47 +611,79 @@ void __init sme_encrypt_kernel(void)
|
||||||
* the base of the mapping.
|
* the base of the mapping.
|
||||||
*/
|
*/
|
||||||
decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
|
decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
|
||||||
|
if (initrd_len) {
|
||||||
|
unsigned long check_base;
|
||||||
|
|
||||||
|
check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
|
||||||
|
decrypted_base = max(decrypted_base, check_base);
|
||||||
|
}
|
||||||
decrypted_base <<= PGDIR_SHIFT;
|
decrypted_base <<= PGDIR_SHIFT;
|
||||||
|
|
||||||
/* Add decrypted, write-protected kernel (non-identity) mappings */
|
/* Add encrypted kernel (identity) mappings */
|
||||||
pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT);
|
ppd.paddr = kernel_start;
|
||||||
paddr = kernel_start;
|
ppd.vaddr = kernel_start;
|
||||||
while (paddr < kernel_end) {
|
ppd.vaddr_end = kernel_end;
|
||||||
pgtable_area = sme_populate_pgd(pgd, pgtable_area,
|
sme_map_range_encrypted(&ppd);
|
||||||
paddr + decrypted_base,
|
|
||||||
paddr + pmd_flags);
|
|
||||||
|
|
||||||
paddr += PMD_PAGE_SIZE;
|
/* Add decrypted, write-protected kernel (non-identity) mappings */
|
||||||
|
ppd.paddr = kernel_start;
|
||||||
|
ppd.vaddr = kernel_start + decrypted_base;
|
||||||
|
ppd.vaddr_end = kernel_end + decrypted_base;
|
||||||
|
sme_map_range_decrypted_wp(&ppd);
|
||||||
|
|
||||||
|
if (initrd_len) {
|
||||||
|
/* Add encrypted initrd (identity) mappings */
|
||||||
|
ppd.paddr = initrd_start;
|
||||||
|
ppd.vaddr = initrd_start;
|
||||||
|
ppd.vaddr_end = initrd_end;
|
||||||
|
sme_map_range_encrypted(&ppd);
|
||||||
|
/*
|
||||||
|
* Add decrypted, write-protected initrd (non-identity) mappings
|
||||||
|
*/
|
||||||
|
ppd.paddr = initrd_start;
|
||||||
|
ppd.vaddr = initrd_start + decrypted_base;
|
||||||
|
ppd.vaddr_end = initrd_end + decrypted_base;
|
||||||
|
sme_map_range_decrypted_wp(&ppd);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add decrypted workarea mappings to both kernel mappings */
|
/* Add decrypted workarea mappings to both kernel mappings */
|
||||||
paddr = workarea_start;
|
ppd.paddr = workarea_start;
|
||||||
while (paddr < workarea_end) {
|
ppd.vaddr = workarea_start;
|
||||||
pgtable_area = sme_populate_pgd(pgd, pgtable_area,
|
ppd.vaddr_end = workarea_end;
|
||||||
paddr,
|
sme_map_range_decrypted(&ppd);
|
||||||
paddr + PMD_FLAGS);
|
|
||||||
|
|
||||||
pgtable_area = sme_populate_pgd(pgd, pgtable_area,
|
ppd.paddr = workarea_start;
|
||||||
paddr + decrypted_base,
|
ppd.vaddr = workarea_start + decrypted_base;
|
||||||
paddr + PMD_FLAGS);
|
ppd.vaddr_end = workarea_end + decrypted_base;
|
||||||
|
sme_map_range_decrypted(&ppd);
|
||||||
paddr += PMD_PAGE_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Perform the encryption */
|
/* Perform the encryption */
|
||||||
sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
|
sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
|
||||||
kernel_len, workarea_start, (unsigned long)pgd);
|
kernel_len, workarea_start, (unsigned long)ppd.pgd);
|
||||||
|
|
||||||
|
if (initrd_len)
|
||||||
|
sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
|
||||||
|
initrd_len, workarea_start,
|
||||||
|
(unsigned long)ppd.pgd);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At this point we are running encrypted. Remove the mappings for
|
* At this point we are running encrypted. Remove the mappings for
|
||||||
* the decrypted areas - all that is needed for this is to remove
|
* the decrypted areas - all that is needed for this is to remove
|
||||||
* the PGD entry/entries.
|
* the PGD entry/entries.
|
||||||
*/
|
*/
|
||||||
sme_clear_pgd(pgd, kernel_start + decrypted_base,
|
ppd.vaddr = kernel_start + decrypted_base;
|
||||||
kernel_end + decrypted_base);
|
ppd.vaddr_end = kernel_end + decrypted_base;
|
||||||
|
sme_clear_pgd(&ppd);
|
||||||
|
|
||||||
sme_clear_pgd(pgd, workarea_start + decrypted_base,
|
if (initrd_len) {
|
||||||
workarea_end + decrypted_base);
|
ppd.vaddr = initrd_start + decrypted_base;
|
||||||
|
ppd.vaddr_end = initrd_end + decrypted_base;
|
||||||
|
sme_clear_pgd(&ppd);
|
||||||
|
}
|
||||||
|
|
||||||
|
ppd.vaddr = workarea_start + decrypted_base;
|
||||||
|
ppd.vaddr_end = workarea_end + decrypted_base;
|
||||||
|
sme_clear_pgd(&ppd);
|
||||||
|
|
||||||
/* Flush the TLB - no globals so cr3 is enough */
|
/* Flush the TLB - no globals so cr3 is enough */
|
||||||
native_write_cr3(__native_read_cr3());
|
native_write_cr3(__native_read_cr3());
|
||||||
|
|
|
@ -22,9 +22,9 @@ ENTRY(sme_encrypt_execute)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Entry parameters:
|
* Entry parameters:
|
||||||
* RDI - virtual address for the encrypted kernel mapping
|
* RDI - virtual address for the encrypted mapping
|
||||||
* RSI - virtual address for the decrypted kernel mapping
|
* RSI - virtual address for the decrypted mapping
|
||||||
* RDX - length of kernel
|
* RDX - length to encrypt
|
||||||
* RCX - virtual address of the encryption workarea, including:
|
* RCX - virtual address of the encryption workarea, including:
|
||||||
* - stack page (PAGE_SIZE)
|
* - stack page (PAGE_SIZE)
|
||||||
* - encryption routine page (PAGE_SIZE)
|
* - encryption routine page (PAGE_SIZE)
|
||||||
|
@ -41,9 +41,9 @@ ENTRY(sme_encrypt_execute)
|
||||||
addq $PAGE_SIZE, %rax /* Workarea encryption routine */
|
addq $PAGE_SIZE, %rax /* Workarea encryption routine */
|
||||||
|
|
||||||
push %r12
|
push %r12
|
||||||
movq %rdi, %r10 /* Encrypted kernel */
|
movq %rdi, %r10 /* Encrypted area */
|
||||||
movq %rsi, %r11 /* Decrypted kernel */
|
movq %rsi, %r11 /* Decrypted area */
|
||||||
movq %rdx, %r12 /* Kernel length */
|
movq %rdx, %r12 /* Area length */
|
||||||
|
|
||||||
/* Copy encryption routine into the workarea */
|
/* Copy encryption routine into the workarea */
|
||||||
movq %rax, %rdi /* Workarea encryption routine */
|
movq %rax, %rdi /* Workarea encryption routine */
|
||||||
|
@ -52,10 +52,10 @@ ENTRY(sme_encrypt_execute)
|
||||||
rep movsb
|
rep movsb
|
||||||
|
|
||||||
/* Setup registers for call */
|
/* Setup registers for call */
|
||||||
movq %r10, %rdi /* Encrypted kernel */
|
movq %r10, %rdi /* Encrypted area */
|
||||||
movq %r11, %rsi /* Decrypted kernel */
|
movq %r11, %rsi /* Decrypted area */
|
||||||
movq %r8, %rdx /* Pagetables used for encryption */
|
movq %r8, %rdx /* Pagetables used for encryption */
|
||||||
movq %r12, %rcx /* Kernel length */
|
movq %r12, %rcx /* Area length */
|
||||||
movq %rax, %r8 /* Workarea encryption routine */
|
movq %rax, %r8 /* Workarea encryption routine */
|
||||||
addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */
|
addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */
|
||||||
|
|
||||||
|
@ -71,7 +71,7 @@ ENDPROC(sme_encrypt_execute)
|
||||||
|
|
||||||
ENTRY(__enc_copy)
|
ENTRY(__enc_copy)
|
||||||
/*
|
/*
|
||||||
* Routine used to encrypt kernel.
|
* Routine used to encrypt memory in place.
|
||||||
* This routine must be run outside of the kernel proper since
|
* This routine must be run outside of the kernel proper since
|
||||||
* the kernel will be encrypted during the process. So this
|
* the kernel will be encrypted during the process. So this
|
||||||
* routine is defined here and then copied to an area outside
|
* routine is defined here and then copied to an area outside
|
||||||
|
@ -79,19 +79,19 @@ ENTRY(__enc_copy)
|
||||||
* during execution.
|
* during execution.
|
||||||
*
|
*
|
||||||
* On entry the registers must be:
|
* On entry the registers must be:
|
||||||
* RDI - virtual address for the encrypted kernel mapping
|
* RDI - virtual address for the encrypted mapping
|
||||||
* RSI - virtual address for the decrypted kernel mapping
|
* RSI - virtual address for the decrypted mapping
|
||||||
* RDX - address of the pagetables to use for encryption
|
* RDX - address of the pagetables to use for encryption
|
||||||
* RCX - length of kernel
|
* RCX - length of area
|
||||||
* R8 - intermediate copy buffer
|
* R8 - intermediate copy buffer
|
||||||
*
|
*
|
||||||
* RAX - points to this routine
|
* RAX - points to this routine
|
||||||
*
|
*
|
||||||
* The kernel will be encrypted by copying from the non-encrypted
|
* The area will be encrypted by copying from the non-encrypted
|
||||||
* kernel space to an intermediate buffer and then copying from the
|
* memory space to an intermediate buffer and then copying from the
|
||||||
* intermediate buffer back to the encrypted kernel space. The physical
|
* intermediate buffer back to the encrypted memory space. The physical
|
||||||
* addresses of the two kernel space mappings are the same which
|
* addresses of the two mappings are the same which results in the area
|
||||||
* results in the kernel being encrypted "in place".
|
* being encrypted "in place".
|
||||||
*/
|
*/
|
||||||
/* Enable the new page tables */
|
/* Enable the new page tables */
|
||||||
mov %rdx, %cr3
|
mov %rdx, %cr3
|
||||||
|
@ -103,47 +103,55 @@ ENTRY(__enc_copy)
|
||||||
orq $X86_CR4_PGE, %rdx
|
orq $X86_CR4_PGE, %rdx
|
||||||
mov %rdx, %cr4
|
mov %rdx, %cr4
|
||||||
|
|
||||||
|
push %r15
|
||||||
|
push %r12
|
||||||
|
|
||||||
|
movq %rcx, %r9 /* Save area length */
|
||||||
|
movq %rdi, %r10 /* Save encrypted area address */
|
||||||
|
movq %rsi, %r11 /* Save decrypted area address */
|
||||||
|
|
||||||
/* Set the PAT register PA5 entry to write-protect */
|
/* Set the PAT register PA5 entry to write-protect */
|
||||||
push %rcx
|
|
||||||
movl $MSR_IA32_CR_PAT, %ecx
|
movl $MSR_IA32_CR_PAT, %ecx
|
||||||
rdmsr
|
rdmsr
|
||||||
push %rdx /* Save original PAT value */
|
mov %rdx, %r15 /* Save original PAT value */
|
||||||
andl $0xffff00ff, %edx /* Clear PA5 */
|
andl $0xffff00ff, %edx /* Clear PA5 */
|
||||||
orl $0x00000500, %edx /* Set PA5 to WP */
|
orl $0x00000500, %edx /* Set PA5 to WP */
|
||||||
wrmsr
|
wrmsr
|
||||||
pop %rdx /* RDX contains original PAT value */
|
|
||||||
pop %rcx
|
|
||||||
|
|
||||||
movq %rcx, %r9 /* Save kernel length */
|
|
||||||
movq %rdi, %r10 /* Save encrypted kernel address */
|
|
||||||
movq %rsi, %r11 /* Save decrypted kernel address */
|
|
||||||
|
|
||||||
wbinvd /* Invalidate any cache entries */
|
wbinvd /* Invalidate any cache entries */
|
||||||
|
|
||||||
/* Copy/encrypt 2MB at a time */
|
/* Copy/encrypt up to 2MB at a time */
|
||||||
|
movq $PMD_PAGE_SIZE, %r12
|
||||||
1:
|
1:
|
||||||
movq %r11, %rsi /* Source - decrypted kernel */
|
cmpq %r12, %r9
|
||||||
|
jnb 2f
|
||||||
|
movq %r9, %r12
|
||||||
|
|
||||||
|
2:
|
||||||
|
movq %r11, %rsi /* Source - decrypted area */
|
||||||
movq %r8, %rdi /* Dest - intermediate copy buffer */
|
movq %r8, %rdi /* Dest - intermediate copy buffer */
|
||||||
movq $PMD_PAGE_SIZE, %rcx /* 2MB length */
|
movq %r12, %rcx
|
||||||
rep movsb
|
rep movsb
|
||||||
|
|
||||||
movq %r8, %rsi /* Source - intermediate copy buffer */
|
movq %r8, %rsi /* Source - intermediate copy buffer */
|
||||||
movq %r10, %rdi /* Dest - encrypted kernel */
|
movq %r10, %rdi /* Dest - encrypted area */
|
||||||
movq $PMD_PAGE_SIZE, %rcx /* 2MB length */
|
movq %r12, %rcx
|
||||||
rep movsb
|
rep movsb
|
||||||
|
|
||||||
addq $PMD_PAGE_SIZE, %r11
|
addq %r12, %r11
|
||||||
addq $PMD_PAGE_SIZE, %r10
|
addq %r12, %r10
|
||||||
subq $PMD_PAGE_SIZE, %r9 /* Kernel length decrement */
|
subq %r12, %r9 /* Kernel length decrement */
|
||||||
jnz 1b /* Kernel length not zero? */
|
jnz 1b /* Kernel length not zero? */
|
||||||
|
|
||||||
/* Restore PAT register */
|
/* Restore PAT register */
|
||||||
push %rdx /* Save original PAT value */
|
|
||||||
movl $MSR_IA32_CR_PAT, %ecx
|
movl $MSR_IA32_CR_PAT, %ecx
|
||||||
rdmsr
|
rdmsr
|
||||||
pop %rdx /* Restore original PAT value */
|
mov %r15, %rdx /* Restore original PAT value */
|
||||||
wrmsr
|
wrmsr
|
||||||
|
|
||||||
|
pop %r12
|
||||||
|
pop %r15
|
||||||
|
|
||||||
ret
|
ret
|
||||||
.L__enc_copy_end:
|
.L__enc_copy_end:
|
||||||
ENDPROC(__enc_copy)
|
ENDPROC(__enc_copy)
|
||||||
|
|
|
@ -4439,6 +4439,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||||
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
|
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
|
||||||
*/
|
*/
|
||||||
{ "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
|
{ "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
|
||||||
|
{ "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
|
||||||
|
|
||||||
/* Devices we expect to fail diagnostics */
|
/* Devices we expect to fail diagnostics */
|
||||||
|
|
||||||
|
|
|
@ -174,6 +174,7 @@ gf119_sor = {
|
||||||
.links = gf119_sor_dp_links,
|
.links = gf119_sor_dp_links,
|
||||||
.power = g94_sor_dp_power,
|
.power = g94_sor_dp_power,
|
||||||
.pattern = gf119_sor_dp_pattern,
|
.pattern = gf119_sor_dp_pattern,
|
||||||
|
.drive = gf119_sor_dp_drive,
|
||||||
.vcpi = gf119_sor_dp_vcpi,
|
.vcpi = gf119_sor_dp_vcpi,
|
||||||
.audio = gf119_sor_dp_audio,
|
.audio = gf119_sor_dp_audio,
|
||||||
.audio_sym = gf119_sor_dp_audio_sym,
|
.audio_sym = gf119_sor_dp_audio_sym,
|
||||||
|
|
|
@ -266,8 +266,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
|
||||||
.set_property = vmw_du_connector_set_property,
|
.set_property = vmw_du_connector_set_property,
|
||||||
.destroy = vmw_ldu_connector_destroy,
|
.destroy = vmw_ldu_connector_destroy,
|
||||||
.reset = vmw_du_connector_reset,
|
.reset = vmw_du_connector_reset,
|
||||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
|
||||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
.atomic_destroy_state = vmw_du_connector_destroy_state,
|
||||||
.atomic_set_property = vmw_du_connector_atomic_set_property,
|
.atomic_set_property = vmw_du_connector_atomic_set_property,
|
||||||
.atomic_get_property = vmw_du_connector_atomic_get_property,
|
.atomic_get_property = vmw_du_connector_atomic_get_property,
|
||||||
};
|
};
|
||||||
|
|
|
@ -420,8 +420,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
|
||||||
.set_property = vmw_du_connector_set_property,
|
.set_property = vmw_du_connector_set_property,
|
||||||
.destroy = vmw_sou_connector_destroy,
|
.destroy = vmw_sou_connector_destroy,
|
||||||
.reset = vmw_du_connector_reset,
|
.reset = vmw_du_connector_reset,
|
||||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
|
||||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
.atomic_destroy_state = vmw_du_connector_destroy_state,
|
||||||
.atomic_set_property = vmw_du_connector_atomic_set_property,
|
.atomic_set_property = vmw_du_connector_atomic_set_property,
|
||||||
.atomic_get_property = vmw_du_connector_atomic_get_property,
|
.atomic_get_property = vmw_du_connector_atomic_get_property,
|
||||||
};
|
};
|
||||||
|
|
|
@ -396,16 +396,17 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
|
||||||
the underlying bus driver */
|
the underlying bus driver */
|
||||||
break;
|
break;
|
||||||
case I2C_SMBUS_I2C_BLOCK_DATA:
|
case I2C_SMBUS_I2C_BLOCK_DATA:
|
||||||
|
if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
|
||||||
|
dev_err(&adapter->dev, "Invalid block %s size %d\n",
|
||||||
|
read_write == I2C_SMBUS_READ ? "read" : "write",
|
||||||
|
data->block[0]);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (read_write == I2C_SMBUS_READ) {
|
if (read_write == I2C_SMBUS_READ) {
|
||||||
msg[1].len = data->block[0];
|
msg[1].len = data->block[0];
|
||||||
} else {
|
} else {
|
||||||
msg[0].len = data->block[0] + 1;
|
msg[0].len = data->block[0] + 1;
|
||||||
if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) {
|
|
||||||
dev_err(&adapter->dev,
|
|
||||||
"Invalid block write size %d\n",
|
|
||||||
data->block[0]);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
for (i = 1; i <= data->block[0]; i++)
|
for (i = 1; i <= data->block[0]; i++)
|
||||||
msgbuf0[i] = data->block[i];
|
msgbuf0[i] = data->block[i];
|
||||||
}
|
}
|
||||||
|
|
|
@ -881,11 +881,11 @@ static int complete_subctxt(struct hfi1_filedata *fd)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
hfi1_rcd_put(fd->uctxt);
|
|
||||||
fd->uctxt = NULL;
|
|
||||||
spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
|
spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
|
||||||
__clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
|
__clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
|
||||||
spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
|
spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
|
||||||
|
hfi1_rcd_put(fd->uctxt);
|
||||||
|
fd->uctxt = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -4303,12 +4303,11 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
|
||||||
|
|
||||||
memset(ah_attr, 0, sizeof(*ah_attr));
|
memset(ah_attr, 0, sizeof(*ah_attr));
|
||||||
|
|
||||||
ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
|
if (!path->port || path->port > MLX5_CAP_GEN(dev, num_ports))
|
||||||
rdma_ah_set_port_num(ah_attr, path->port);
|
|
||||||
if (rdma_ah_get_port_num(ah_attr) == 0 ||
|
|
||||||
rdma_ah_get_port_num(ah_attr) > MLX5_CAP_GEN(dev, num_ports))
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
|
||||||
|
|
||||||
rdma_ah_set_port_num(ah_attr, path->port);
|
rdma_ah_set_port_num(ah_attr, path->port);
|
||||||
rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
|
rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
|
||||||
|
|
||||||
|
|
|
@ -741,6 +741,7 @@ isert_connect_error(struct rdma_cm_id *cma_id)
|
||||||
{
|
{
|
||||||
struct isert_conn *isert_conn = cma_id->qp->qp_context;
|
struct isert_conn *isert_conn = cma_id->qp->qp_context;
|
||||||
|
|
||||||
|
ib_drain_qp(isert_conn->qp);
|
||||||
list_del_init(&isert_conn->node);
|
list_del_init(&isert_conn->node);
|
||||||
isert_conn->cm_id = NULL;
|
isert_conn->cm_id = NULL;
|
||||||
isert_put_conn(isert_conn);
|
isert_put_conn(isert_conn);
|
||||||
|
|
|
@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
|
||||||
twl4030_vibra_suspend, twl4030_vibra_resume);
|
twl4030_vibra_suspend, twl4030_vibra_resume);
|
||||||
|
|
||||||
static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
|
static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
|
||||||
struct device_node *node)
|
struct device_node *parent)
|
||||||
{
|
{
|
||||||
|
struct device_node *node;
|
||||||
|
|
||||||
if (pdata && pdata->coexist)
|
if (pdata && pdata->coexist)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
node = of_find_node_by_name(node, "codec");
|
node = of_get_child_by_name(parent, "codec");
|
||||||
if (node) {
|
if (node) {
|
||||||
of_node_put(node);
|
of_node_put(node);
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -248,8 +248,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
|
||||||
int vddvibr_uV = 0;
|
int vddvibr_uV = 0;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
of_node_get(twl6040_core_dev->of_node);
|
twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node,
|
||||||
twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
|
|
||||||
"vibra");
|
"vibra");
|
||||||
if (!twl6040_core_node) {
|
if (!twl6040_core_node) {
|
||||||
dev_err(&pdev->dev, "parent of node is missing?\n");
|
dev_err(&pdev->dev, "parent of node is missing?\n");
|
||||||
|
|
|
@ -1250,29 +1250,32 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
|
||||||
case SS4_PACKET_ID_MULTI:
|
case SS4_PACKET_ID_MULTI:
|
||||||
if (priv->flags & ALPS_BUTTONPAD) {
|
if (priv->flags & ALPS_BUTTONPAD) {
|
||||||
if (IS_SS4PLUS_DEV(priv->dev_id)) {
|
if (IS_SS4PLUS_DEV(priv->dev_id)) {
|
||||||
f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
|
f->mt[2].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
|
||||||
f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
|
f->mt[3].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
|
||||||
|
no_data_x = SS4_PLUS_MFPACKET_NO_AX_BL;
|
||||||
} else {
|
} else {
|
||||||
f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
|
f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
|
||||||
f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
|
f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
|
||||||
|
no_data_x = SS4_MFPACKET_NO_AX_BL;
|
||||||
}
|
}
|
||||||
|
no_data_y = SS4_MFPACKET_NO_AY_BL;
|
||||||
|
|
||||||
f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
|
f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
|
||||||
f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
|
f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
|
||||||
no_data_x = SS4_MFPACKET_NO_AX_BL;
|
|
||||||
no_data_y = SS4_MFPACKET_NO_AY_BL;
|
|
||||||
} else {
|
} else {
|
||||||
if (IS_SS4PLUS_DEV(priv->dev_id)) {
|
if (IS_SS4PLUS_DEV(priv->dev_id)) {
|
||||||
f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
|
f->mt[2].x = SS4_PLUS_STD_MF_X_V2(p, 0);
|
||||||
f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
|
f->mt[3].x = SS4_PLUS_STD_MF_X_V2(p, 1);
|
||||||
|
no_data_x = SS4_PLUS_MFPACKET_NO_AX;
|
||||||
} else {
|
} else {
|
||||||
f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
|
f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
|
||||||
f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
|
f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
|
||||||
|
no_data_x = SS4_MFPACKET_NO_AX;
|
||||||
}
|
}
|
||||||
|
no_data_y = SS4_MFPACKET_NO_AY;
|
||||||
|
|
||||||
f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
|
f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
|
||||||
f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
|
f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
|
||||||
no_data_x = SS4_MFPACKET_NO_AX;
|
|
||||||
no_data_y = SS4_MFPACKET_NO_AY;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
f->first_mp = 0;
|
f->first_mp = 0;
|
||||||
|
|
|
@ -141,10 +141,12 @@ enum SS4_PACKET_ID {
|
||||||
#define SS4_TS_Z_V2(_b) (s8)(_b[4] & 0x7F)
|
#define SS4_TS_Z_V2(_b) (s8)(_b[4] & 0x7F)
|
||||||
|
|
||||||
|
|
||||||
#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
|
#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
|
||||||
#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
|
#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
|
||||||
#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coordinate value */
|
#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coord value */
|
||||||
#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coordinate value */
|
#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coord value */
|
||||||
|
#define SS4_PLUS_MFPACKET_NO_AX 4080 /* SS4 PLUS, X */
|
||||||
|
#define SS4_PLUS_MFPACKET_NO_AX_BL 4088 /* Buttonless SS4 PLUS, X */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* enum V7_PACKET_ID - defines the packet type for V7
|
* enum V7_PACKET_ID - defines the packet type for V7
|
||||||
|
|
|
@ -230,8 +230,10 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
|
||||||
rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
|
rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
|
||||||
"Failed to process interrupt request: %d\n", ret);
|
"Failed to process interrupt request: %d\n", ret);
|
||||||
|
|
||||||
if (count)
|
if (count) {
|
||||||
kfree(attn_data.data);
|
kfree(attn_data.data);
|
||||||
|
attn_data.data = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if (!kfifo_is_empty(&drvdata->attn_fifo))
|
if (!kfifo_is_empty(&drvdata->attn_fifo))
|
||||||
return rmi_irq_fn(irq, dev_id);
|
return rmi_irq_fn(irq, dev_id);
|
||||||
|
|
|
@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
|
||||||
int data, n, ret;
|
int data, n, ret;
|
||||||
if (!np)
|
if (!np)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
np = of_find_node_by_name(np, "touch");
|
np = of_get_child_by_name(np, "touch");
|
||||||
if (!np) {
|
if (!np) {
|
||||||
dev_err(&pdev->dev, "Can't find touch node\n");
|
dev_err(&pdev->dev, "Can't find touch node\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
|
||||||
if (data) {
|
if (data) {
|
||||||
ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
|
ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return -EINVAL;
|
goto err_put_node;
|
||||||
}
|
}
|
||||||
/* set tsi prebias time */
|
/* set tsi prebias time */
|
||||||
if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) {
|
if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) {
|
||||||
ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
|
ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return -EINVAL;
|
goto err_put_node;
|
||||||
}
|
}
|
||||||
/* set prebias & prechg time of pen detect */
|
/* set prebias & prechg time of pen detect */
|
||||||
data = 0;
|
data = 0;
|
||||||
|
@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
|
||||||
if (data) {
|
if (data) {
|
||||||
ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
|
ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return -EINVAL;
|
goto err_put_node;
|
||||||
}
|
}
|
||||||
of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x);
|
of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x);
|
||||||
|
|
||||||
|
of_node_put(np);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_put_node:
|
||||||
|
of_node_put(np);
|
||||||
|
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
#define pm860x_touch_dt_init(x, y, z) (-1)
|
#define pm860x_touch_dt_init(x, y, z) (-1)
|
||||||
|
|
|
@ -1954,10 +1954,15 @@ static int crypt_setkey(struct crypt_config *cc)
|
||||||
/* Ignore extra keys (which are used for IV etc) */
|
/* Ignore extra keys (which are used for IV etc) */
|
||||||
subkey_size = crypt_subkey_size(cc);
|
subkey_size = crypt_subkey_size(cc);
|
||||||
|
|
||||||
if (crypt_integrity_hmac(cc))
|
if (crypt_integrity_hmac(cc)) {
|
||||||
|
if (subkey_size < cc->key_mac_size)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
crypt_copy_authenckey(cc->authenc_key, cc->key,
|
crypt_copy_authenckey(cc->authenc_key, cc->key,
|
||||||
subkey_size - cc->key_mac_size,
|
subkey_size - cc->key_mac_size,
|
||||||
cc->key_mac_size);
|
cc->key_mac_size);
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < cc->tfms_count; i++) {
|
for (i = 0; i < cc->tfms_count; i++) {
|
||||||
if (crypt_integrity_hmac(cc))
|
if (crypt_integrity_hmac(cc))
|
||||||
r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
|
r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
|
||||||
|
@ -2053,9 +2058,6 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
|
||||||
|
|
||||||
ret = crypt_setkey(cc);
|
ret = crypt_setkey(cc);
|
||||||
|
|
||||||
/* wipe the kernel key payload copy in each case */
|
|
||||||
memset(cc->key, 0, cc->key_size * sizeof(u8));
|
|
||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
|
set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
|
||||||
kzfree(cc->key_string);
|
kzfree(cc->key_string);
|
||||||
|
@ -2523,6 +2525,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* wipe the kernel key payload copy */
|
||||||
|
if (cc->key_string)
|
||||||
|
memset(cc->key, 0, cc->key_size * sizeof(u8));
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2740,6 +2746,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
cc->tag_pool_max_sectors * cc->on_disk_tag_size);
|
cc->tag_pool_max_sectors * cc->on_disk_tag_size);
|
||||||
if (!cc->tag_pool) {
|
if (!cc->tag_pool) {
|
||||||
ti->error = "Cannot allocate integrity tags mempool";
|
ti->error = "Cannot allocate integrity tags mempool";
|
||||||
|
ret = -ENOMEM;
|
||||||
goto bad;
|
goto bad;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2961,6 +2968,9 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
|
||||||
return ret;
|
return ret;
|
||||||
if (cc->iv_gen_ops && cc->iv_gen_ops->init)
|
if (cc->iv_gen_ops && cc->iv_gen_ops->init)
|
||||||
ret = cc->iv_gen_ops->init(cc);
|
ret = cc->iv_gen_ops->init(cc);
|
||||||
|
/* wipe the kernel key payload copy */
|
||||||
|
if (cc->key_string)
|
||||||
|
memset(cc->key, 0, cc->key_size * sizeof(u8));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
|
if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
|
||||||
|
@ -3007,7 +3017,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||||
|
|
||||||
static struct target_type crypt_target = {
|
static struct target_type crypt_target = {
|
||||||
.name = "crypt",
|
.name = "crypt",
|
||||||
.version = {1, 18, 0},
|
.version = {1, 18, 1},
|
||||||
.module = THIS_MODULE,
|
.module = THIS_MODULE,
|
||||||
.ctr = crypt_ctr,
|
.ctr = crypt_ctr,
|
||||||
.dtr = crypt_dtr,
|
.dtr = crypt_dtr,
|
||||||
|
|
|
@ -2558,7 +2558,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
|
||||||
int r = 0;
|
int r = 0;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
__u64 journal_pages, journal_desc_size, journal_tree_size;
|
__u64 journal_pages, journal_desc_size, journal_tree_size;
|
||||||
unsigned char *crypt_data = NULL;
|
unsigned char *crypt_data = NULL, *crypt_iv = NULL;
|
||||||
|
struct skcipher_request *req = NULL;
|
||||||
|
|
||||||
ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
|
ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
|
||||||
ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
|
ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
|
||||||
|
@ -2616,9 +2617,20 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
|
||||||
|
|
||||||
if (blocksize == 1) {
|
if (blocksize == 1) {
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
|
|
||||||
unsigned char iv[ivsize];
|
req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
|
||||||
skcipher_request_set_tfm(req, ic->journal_crypt);
|
if (!req) {
|
||||||
|
*error = "Could not allocate crypt request";
|
||||||
|
r = -ENOMEM;
|
||||||
|
goto bad;
|
||||||
|
}
|
||||||
|
|
||||||
|
crypt_iv = kmalloc(ivsize, GFP_KERNEL);
|
||||||
|
if (!crypt_iv) {
|
||||||
|
*error = "Could not allocate iv";
|
||||||
|
r = -ENOMEM;
|
||||||
|
goto bad;
|
||||||
|
}
|
||||||
|
|
||||||
ic->journal_xor = dm_integrity_alloc_page_list(ic);
|
ic->journal_xor = dm_integrity_alloc_page_list(ic);
|
||||||
if (!ic->journal_xor) {
|
if (!ic->journal_xor) {
|
||||||
|
@ -2640,9 +2652,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
|
||||||
sg_set_buf(&sg[i], va, PAGE_SIZE);
|
sg_set_buf(&sg[i], va, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
|
sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
|
||||||
memset(iv, 0x00, ivsize);
|
memset(crypt_iv, 0x00, ivsize);
|
||||||
|
|
||||||
skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv);
|
skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
|
||||||
init_completion(&comp.comp);
|
init_completion(&comp.comp);
|
||||||
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
|
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
|
||||||
if (do_crypt(true, req, &comp))
|
if (do_crypt(true, req, &comp))
|
||||||
|
@ -2658,10 +2670,22 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
|
||||||
crypto_free_skcipher(ic->journal_crypt);
|
crypto_free_skcipher(ic->journal_crypt);
|
||||||
ic->journal_crypt = NULL;
|
ic->journal_crypt = NULL;
|
||||||
} else {
|
} else {
|
||||||
SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
|
|
||||||
unsigned char iv[ivsize];
|
|
||||||
unsigned crypt_len = roundup(ivsize, blocksize);
|
unsigned crypt_len = roundup(ivsize, blocksize);
|
||||||
|
|
||||||
|
req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
|
||||||
|
if (!req) {
|
||||||
|
*error = "Could not allocate crypt request";
|
||||||
|
r = -ENOMEM;
|
||||||
|
goto bad;
|
||||||
|
}
|
||||||
|
|
||||||
|
crypt_iv = kmalloc(ivsize, GFP_KERNEL);
|
||||||
|
if (!crypt_iv) {
|
||||||
|
*error = "Could not allocate iv";
|
||||||
|
r = -ENOMEM;
|
||||||
|
goto bad;
|
||||||
|
}
|
||||||
|
|
||||||
crypt_data = kmalloc(crypt_len, GFP_KERNEL);
|
crypt_data = kmalloc(crypt_len, GFP_KERNEL);
|
||||||
if (!crypt_data) {
|
if (!crypt_data) {
|
||||||
*error = "Unable to allocate crypt data";
|
*error = "Unable to allocate crypt data";
|
||||||
|
@ -2669,8 +2693,6 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
|
||||||
goto bad;
|
goto bad;
|
||||||
}
|
}
|
||||||
|
|
||||||
skcipher_request_set_tfm(req, ic->journal_crypt);
|
|
||||||
|
|
||||||
ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
|
ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
|
||||||
if (!ic->journal_scatterlist) {
|
if (!ic->journal_scatterlist) {
|
||||||
*error = "Unable to allocate sg list";
|
*error = "Unable to allocate sg list";
|
||||||
|
@ -2694,12 +2716,12 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
|
||||||
struct skcipher_request *section_req;
|
struct skcipher_request *section_req;
|
||||||
__u32 section_le = cpu_to_le32(i);
|
__u32 section_le = cpu_to_le32(i);
|
||||||
|
|
||||||
memset(iv, 0x00, ivsize);
|
memset(crypt_iv, 0x00, ivsize);
|
||||||
memset(crypt_data, 0x00, crypt_len);
|
memset(crypt_data, 0x00, crypt_len);
|
||||||
memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le)));
|
memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le)));
|
||||||
|
|
||||||
sg_init_one(&sg, crypt_data, crypt_len);
|
sg_init_one(&sg, crypt_data, crypt_len);
|
||||||
skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv);
|
skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
|
||||||
init_completion(&comp.comp);
|
init_completion(&comp.comp);
|
||||||
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
|
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
|
||||||
if (do_crypt(true, req, &comp))
|
if (do_crypt(true, req, &comp))
|
||||||
|
@ -2757,6 +2779,9 @@ retest_commit_id:
|
||||||
}
|
}
|
||||||
bad:
|
bad:
|
||||||
kfree(crypt_data);
|
kfree(crypt_data);
|
||||||
|
kfree(crypt_iv);
|
||||||
|
skcipher_request_free(req);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -80,10 +80,14 @@
|
||||||
#define SECTOR_TO_BLOCK_SHIFT 3
|
#define SECTOR_TO_BLOCK_SHIFT 3
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
* For btree insert:
|
||||||
* 3 for btree insert +
|
* 3 for btree insert +
|
||||||
* 2 for btree lookup used within space map
|
* 2 for btree lookup used within space map
|
||||||
|
* For btree remove:
|
||||||
|
* 2 for shadow spine +
|
||||||
|
* 4 for rebalance 3 child node
|
||||||
*/
|
*/
|
||||||
#define THIN_MAX_CONCURRENT_LOCKS 5
|
#define THIN_MAX_CONCURRENT_LOCKS 6
|
||||||
|
|
||||||
/* This should be plenty */
|
/* This should be plenty */
|
||||||
#define SPACE_MAP_ROOT_SIZE 128
|
#define SPACE_MAP_ROOT_SIZE 128
|
||||||
|
|
|
@ -683,23 +683,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
|
||||||
pn->keys[1] = rn->keys[0];
|
pn->keys[1] = rn->keys[0];
|
||||||
memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
|
memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
|
||||||
|
|
||||||
/*
|
unlock_block(s->info, left);
|
||||||
* rejig the spine. This is ugly, since it knows too
|
unlock_block(s->info, right);
|
||||||
* much about the spine
|
|
||||||
*/
|
|
||||||
if (s->nodes[0] != new_parent) {
|
|
||||||
unlock_block(s->info, s->nodes[0]);
|
|
||||||
s->nodes[0] = new_parent;
|
|
||||||
}
|
|
||||||
if (key < le64_to_cpu(rn->keys[0])) {
|
|
||||||
unlock_block(s->info, right);
|
|
||||||
s->nodes[1] = left;
|
|
||||||
} else {
|
|
||||||
unlock_block(s->info, left);
|
|
||||||
s->nodes[1] = right;
|
|
||||||
}
|
|
||||||
s->count = 2;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -687,6 +687,20 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* For i.MX53 eSDHCv3, SYSCTL.SDCLKFS may not be set to 0. */
|
||||||
|
if (is_imx53_esdhc(imx_data)) {
|
||||||
|
/*
|
||||||
|
* According to the i.MX53 reference manual, if DLLCTRL[10] can
|
||||||
|
* be set, then the controller is eSDHCv3, else it is eSDHCv2.
|
||||||
|
*/
|
||||||
|
val = readl(host->ioaddr + ESDHC_DLL_CTRL);
|
||||||
|
writel(val | BIT(10), host->ioaddr + ESDHC_DLL_CTRL);
|
||||||
|
temp = readl(host->ioaddr + ESDHC_DLL_CTRL);
|
||||||
|
writel(val, host->ioaddr + ESDHC_DLL_CTRL);
|
||||||
|
if (temp & BIT(10))
|
||||||
|
pre_div = 2;
|
||||||
|
}
|
||||||
|
|
||||||
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
|
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
|
||||||
temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
|
temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
|
||||||
| ESDHC_CLOCK_MASK);
|
| ESDHC_CLOCK_MASK);
|
||||||
|
|
|
@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
|
||||||
void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
|
void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
|
||||||
int err = 0;
|
int err = 0;
|
||||||
u8 *packet_ptr;
|
u8 *packet_ptr;
|
||||||
int i, n = 1, packet_len;
|
int packet_len;
|
||||||
ptrdiff_t cmd_len;
|
ptrdiff_t cmd_len;
|
||||||
|
|
||||||
/* usb device unregistered? */
|
/* usb device unregistered? */
|
||||||
|
@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
|
||||||
}
|
}
|
||||||
|
|
||||||
packet_ptr = cmd_head;
|
packet_ptr = cmd_head;
|
||||||
|
packet_len = cmd_len;
|
||||||
|
|
||||||
/* firmware is not able to re-assemble 512 bytes buffer in full-speed */
|
/* firmware is not able to re-assemble 512 bytes buffer in full-speed */
|
||||||
if ((dev->udev->speed != USB_SPEED_HIGH) &&
|
if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
|
||||||
(cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) {
|
packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
|
||||||
packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
|
|
||||||
n += cmd_len / packet_len;
|
|
||||||
} else {
|
|
||||||
packet_len = cmd_len;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < n; i++) {
|
do {
|
||||||
err = usb_bulk_msg(dev->udev,
|
err = usb_bulk_msg(dev->udev,
|
||||||
usb_sndbulkpipe(dev->udev,
|
usb_sndbulkpipe(dev->udev,
|
||||||
PCAN_USBPRO_EP_CMDOUT),
|
PCAN_USBPRO_EP_CMDOUT),
|
||||||
|
@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
|
||||||
}
|
}
|
||||||
|
|
||||||
packet_ptr += packet_len;
|
packet_ptr += packet_len;
|
||||||
}
|
cmd_len -= packet_len;
|
||||||
|
|
||||||
|
if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
|
||||||
|
packet_len = cmd_len;
|
||||||
|
|
||||||
|
} while (packet_len > 0);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4552,11 +4552,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
|
||||||
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
|
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
|
||||||
val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
|
val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
|
||||||
writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
|
writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
|
||||||
|
|
||||||
val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
|
|
||||||
val |= MVPP2_GMAC_DISABLE_PADDING;
|
|
||||||
val &= ~MVPP2_GMAC_FLOW_CTRL_MASK;
|
|
||||||
writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
|
|
||||||
} else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
|
} else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
|
||||||
val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
|
val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
|
||||||
val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
|
val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
|
||||||
|
@ -4564,10 +4559,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
|
||||||
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
|
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
|
||||||
val &= ~MVPP22_CTRL4_DP_CLK_SEL;
|
val &= ~MVPP22_CTRL4_DP_CLK_SEL;
|
||||||
writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
|
writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
|
||||||
|
|
||||||
val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
|
|
||||||
val &= ~MVPP2_GMAC_DISABLE_PADDING;
|
|
||||||
writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The port is connected to a copper PHY */
|
/* The port is connected to a copper PHY */
|
||||||
|
|
|
@ -395,6 +395,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(-ENODEV);
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
|
/* This phy type handled by the usb-phy subsystem for now */
|
||||||
|
if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
|
||||||
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
mutex_lock(&phy_provider_mutex);
|
mutex_lock(&phy_provider_mutex);
|
||||||
phy_provider = of_phy_provider_lookup(args.np);
|
phy_provider = of_phy_provider_lookup(args.np);
|
||||||
if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
|
if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
|
||||||
|
|
|
@ -486,15 +486,28 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
|
||||||
|
|
||||||
int sas_eh_abort_handler(struct scsi_cmnd *cmd)
|
int sas_eh_abort_handler(struct scsi_cmnd *cmd)
|
||||||
{
|
{
|
||||||
int res;
|
int res = TMF_RESP_FUNC_FAILED;
|
||||||
struct sas_task *task = TO_SAS_TASK(cmd);
|
struct sas_task *task = TO_SAS_TASK(cmd);
|
||||||
struct Scsi_Host *host = cmd->device->host;
|
struct Scsi_Host *host = cmd->device->host;
|
||||||
|
struct domain_device *dev = cmd_to_domain_dev(cmd);
|
||||||
struct sas_internal *i = to_sas_internal(host->transportt);
|
struct sas_internal *i = to_sas_internal(host->transportt);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if (!i->dft->lldd_abort_task)
|
if (!i->dft->lldd_abort_task)
|
||||||
return FAILED;
|
return FAILED;
|
||||||
|
|
||||||
res = i->dft->lldd_abort_task(task);
|
spin_lock_irqsave(host->host_lock, flags);
|
||||||
|
/* We cannot do async aborts for SATA devices */
|
||||||
|
if (dev_is_sata(dev) && !host->host_eh_scheduled) {
|
||||||
|
spin_unlock_irqrestore(host->host_lock, flags);
|
||||||
|
return FAILED;
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(host->host_lock, flags);
|
||||||
|
|
||||||
|
if (task)
|
||||||
|
res = i->dft->lldd_abort_task(task);
|
||||||
|
else
|
||||||
|
SAS_DPRINTK("no task to abort\n");
|
||||||
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
|
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
|
||||||
return SUCCESS;
|
return SUCCESS;
|
||||||
|
|
||||||
|
|
|
@ -1018,13 +1018,19 @@ const struct file_operations pipefifo_fops = {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Currently we rely on the pipe array holding a power-of-2 number
|
* Currently we rely on the pipe array holding a power-of-2 number
|
||||||
* of pages.
|
* of pages. Returns 0 on error.
|
||||||
*/
|
*/
|
||||||
static inline unsigned int round_pipe_size(unsigned int size)
|
static inline unsigned int round_pipe_size(unsigned int size)
|
||||||
{
|
{
|
||||||
unsigned long nr_pages;
|
unsigned long nr_pages;
|
||||||
|
|
||||||
|
if (size < pipe_min_size)
|
||||||
|
size = pipe_min_size;
|
||||||
|
|
||||||
nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
|
if (nr_pages == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
|
return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1040,6 +1046,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
|
||||||
long ret = 0;
|
long ret = 0;
|
||||||
|
|
||||||
size = round_pipe_size(arg);
|
size = round_pipe_size(arg);
|
||||||
|
if (size == 0)
|
||||||
|
return -EINVAL;
|
||||||
nr_pages = size >> PAGE_SHIFT;
|
nr_pages = size >> PAGE_SHIFT;
|
||||||
|
|
||||||
if (!nr_pages)
|
if (!nr_pages)
|
||||||
|
@ -1123,13 +1131,18 @@ out_revert_acct:
|
||||||
int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
|
int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
|
||||||
size_t *lenp, loff_t *ppos)
|
size_t *lenp, loff_t *ppos)
|
||||||
{
|
{
|
||||||
|
unsigned int rounded_pipe_max_size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = proc_douintvec_minmax(table, write, buf, lenp, ppos);
|
ret = proc_douintvec_minmax(table, write, buf, lenp, ppos);
|
||||||
if (ret < 0 || !write)
|
if (ret < 0 || !write)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
pipe_max_size = round_pipe_size(pipe_max_size);
|
rounded_pipe_max_size = round_pipe_size(pipe_max_size);
|
||||||
|
if (rounded_pipe_max_size == 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
pipe_max_size = rounded_pipe_max_size;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -424,8 +424,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
||||||
* safe because the task has stopped executing permanently.
|
* safe because the task has stopped executing permanently.
|
||||||
*/
|
*/
|
||||||
if (permitted && (task->flags & PF_DUMPCORE)) {
|
if (permitted && (task->flags & PF_DUMPCORE)) {
|
||||||
eip = KSTK_EIP(task);
|
if (try_get_task_stack(task)) {
|
||||||
esp = KSTK_ESP(task);
|
eip = KSTK_EIP(task);
|
||||||
|
esp = KSTK_ESP(task);
|
||||||
|
put_task_stack(task);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -71,7 +71,7 @@ extern void delayacct_init(void);
|
||||||
extern void __delayacct_tsk_init(struct task_struct *);
|
extern void __delayacct_tsk_init(struct task_struct *);
|
||||||
extern void __delayacct_tsk_exit(struct task_struct *);
|
extern void __delayacct_tsk_exit(struct task_struct *);
|
||||||
extern void __delayacct_blkio_start(void);
|
extern void __delayacct_blkio_start(void);
|
||||||
extern void __delayacct_blkio_end(void);
|
extern void __delayacct_blkio_end(struct task_struct *);
|
||||||
extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
|
extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
|
||||||
extern __u64 __delayacct_blkio_ticks(struct task_struct *);
|
extern __u64 __delayacct_blkio_ticks(struct task_struct *);
|
||||||
extern void __delayacct_freepages_start(void);
|
extern void __delayacct_freepages_start(void);
|
||||||
|
@ -122,10 +122,10 @@ static inline void delayacct_blkio_start(void)
|
||||||
__delayacct_blkio_start();
|
__delayacct_blkio_start();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void delayacct_blkio_end(void)
|
static inline void delayacct_blkio_end(struct task_struct *p)
|
||||||
{
|
{
|
||||||
if (current->delays)
|
if (current->delays)
|
||||||
__delayacct_blkio_end();
|
__delayacct_blkio_end(p);
|
||||||
delayacct_clear_flag(DELAYACCT_PF_BLKIO);
|
delayacct_clear_flag(DELAYACCT_PF_BLKIO);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ static inline void delayacct_tsk_free(struct task_struct *tsk)
|
||||||
{}
|
{}
|
||||||
static inline void delayacct_blkio_start(void)
|
static inline void delayacct_blkio_start(void)
|
||||||
{}
|
{}
|
||||||
static inline void delayacct_blkio_end(void)
|
static inline void delayacct_blkio_end(struct task_struct *p)
|
||||||
{}
|
{}
|
||||||
static inline int delayacct_add_tsk(struct taskstats *d,
|
static inline int delayacct_add_tsk(struct taskstats *d,
|
||||||
struct task_struct *tsk)
|
struct task_struct *tsk)
|
||||||
|
|
|
@ -124,6 +124,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
|
||||||
return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
|
return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
|
||||||
|
{
|
||||||
|
return swp_offset(entry);
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct page *device_private_entry_to_page(swp_entry_t entry)
|
static inline struct page *device_private_entry_to_page(swp_entry_t entry)
|
||||||
{
|
{
|
||||||
return pfn_to_page(swp_offset(entry));
|
return pfn_to_page(swp_offset(entry));
|
||||||
|
@ -154,6 +159,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct page *device_private_entry_to_page(swp_entry_t entry)
|
static inline struct page *device_private_entry_to_page(swp_entry_t entry)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -189,6 +199,11 @@ static inline int is_write_migration_entry(swp_entry_t entry)
|
||||||
return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
|
return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
|
||||||
|
{
|
||||||
|
return swp_offset(entry);
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct page *migration_entry_to_page(swp_entry_t entry)
|
static inline struct page *migration_entry_to_page(swp_entry_t entry)
|
||||||
{
|
{
|
||||||
struct page *p = pfn_to_page(swp_offset(entry));
|
struct page *p = pfn_to_page(swp_offset(entry));
|
||||||
|
@ -218,6 +233,12 @@ static inline int is_migration_entry(swp_entry_t swp)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct page *migration_entry_to_page(swp_entry_t entry)
|
static inline struct page *migration_entry_to_page(swp_entry_t entry)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -31,11 +31,17 @@
|
||||||
#else
|
#else
|
||||||
#define MODULE_RANDSTRUCT_PLUGIN
|
#define MODULE_RANDSTRUCT_PLUGIN
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef RETPOLINE
|
||||||
|
#define MODULE_VERMAGIC_RETPOLINE "retpoline "
|
||||||
|
#else
|
||||||
|
#define MODULE_VERMAGIC_RETPOLINE ""
|
||||||
|
#endif
|
||||||
|
|
||||||
#define VERMAGIC_STRING \
|
#define VERMAGIC_STRING \
|
||||||
UTS_RELEASE " " \
|
UTS_RELEASE " " \
|
||||||
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
|
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
|
||||||
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
|
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
|
||||||
MODULE_ARCH_VERMAGIC \
|
MODULE_ARCH_VERMAGIC \
|
||||||
MODULE_RANDSTRUCT_PLUGIN
|
MODULE_RANDSTRUCT_PLUGIN \
|
||||||
|
MODULE_VERMAGIC_RETPOLINE
|
||||||
|
|
||||||
|
|
|
@ -51,16 +51,16 @@ void __delayacct_tsk_init(struct task_struct *tsk)
|
||||||
* Finish delay accounting for a statistic using its timestamps (@start),
|
* Finish delay accounting for a statistic using its timestamps (@start),
|
||||||
* accumalator (@total) and @count
|
* accumalator (@total) and @count
|
||||||
*/
|
*/
|
||||||
static void delayacct_end(u64 *start, u64 *total, u32 *count)
|
static void delayacct_end(spinlock_t *lock, u64 *start, u64 *total, u32 *count)
|
||||||
{
|
{
|
||||||
s64 ns = ktime_get_ns() - *start;
|
s64 ns = ktime_get_ns() - *start;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (ns > 0) {
|
if (ns > 0) {
|
||||||
spin_lock_irqsave(¤t->delays->lock, flags);
|
spin_lock_irqsave(lock, flags);
|
||||||
*total += ns;
|
*total += ns;
|
||||||
(*count)++;
|
(*count)++;
|
||||||
spin_unlock_irqrestore(¤t->delays->lock, flags);
|
spin_unlock_irqrestore(lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,17 +69,25 @@ void __delayacct_blkio_start(void)
|
||||||
current->delays->blkio_start = ktime_get_ns();
|
current->delays->blkio_start = ktime_get_ns();
|
||||||
}
|
}
|
||||||
|
|
||||||
void __delayacct_blkio_end(void)
|
/*
|
||||||
|
* We cannot rely on the `current` macro, as we haven't yet switched back to
|
||||||
|
* the process being woken.
|
||||||
|
*/
|
||||||
|
void __delayacct_blkio_end(struct task_struct *p)
|
||||||
{
|
{
|
||||||
if (current->delays->flags & DELAYACCT_PF_SWAPIN)
|
struct task_delay_info *delays = p->delays;
|
||||||
/* Swapin block I/O */
|
u64 *total;
|
||||||
delayacct_end(¤t->delays->blkio_start,
|
u32 *count;
|
||||||
¤t->delays->swapin_delay,
|
|
||||||
¤t->delays->swapin_count);
|
if (p->delays->flags & DELAYACCT_PF_SWAPIN) {
|
||||||
else /* Other block I/O */
|
total = &delays->swapin_delay;
|
||||||
delayacct_end(¤t->delays->blkio_start,
|
count = &delays->swapin_count;
|
||||||
¤t->delays->blkio_delay,
|
} else {
|
||||||
¤t->delays->blkio_count);
|
total = &delays->blkio_delay;
|
||||||
|
count = &delays->blkio_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
delayacct_end(&delays->lock, &delays->blkio_start, total, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
|
int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
|
||||||
|
@ -153,8 +161,10 @@ void __delayacct_freepages_start(void)
|
||||||
|
|
||||||
void __delayacct_freepages_end(void)
|
void __delayacct_freepages_end(void)
|
||||||
{
|
{
|
||||||
delayacct_end(¤t->delays->freepages_start,
|
delayacct_end(
|
||||||
¤t->delays->freepages_delay,
|
¤t->delays->lock,
|
||||||
¤t->delays->freepages_count);
|
¤t->delays->freepages_start,
|
||||||
|
¤t->delays->freepages_delay,
|
||||||
|
¤t->delays->freepages_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1878,6 +1878,9 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
|
||||||
struct futex_q *this, *next;
|
struct futex_q *this, *next;
|
||||||
DEFINE_WAKE_Q(wake_q);
|
DEFINE_WAKE_Q(wake_q);
|
||||||
|
|
||||||
|
if (nr_wake < 0 || nr_requeue < 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When PI not supported: return -ENOSYS if requeue_pi is true,
|
* When PI not supported: return -ENOSYS if requeue_pi is true,
|
||||||
* consequently the compiler knows requeue_pi is always false past
|
* consequently the compiler knows requeue_pi is always false past
|
||||||
|
@ -2294,21 +2297,17 @@ static void unqueue_me_pi(struct futex_q *q)
|
||||||
spin_unlock(q->lock_ptr);
|
spin_unlock(q->lock_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Fixup the pi_state owner with the new owner.
|
|
||||||
*
|
|
||||||
* Must be called with hash bucket lock held and mm->sem held for non
|
|
||||||
* private futexes.
|
|
||||||
*/
|
|
||||||
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
||||||
struct task_struct *newowner)
|
struct task_struct *argowner)
|
||||||
{
|
{
|
||||||
u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
|
|
||||||
struct futex_pi_state *pi_state = q->pi_state;
|
struct futex_pi_state *pi_state = q->pi_state;
|
||||||
u32 uval, uninitialized_var(curval), newval;
|
u32 uval, uninitialized_var(curval), newval;
|
||||||
struct task_struct *oldowner;
|
struct task_struct *oldowner, *newowner;
|
||||||
|
u32 newtid;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
lockdep_assert_held(q->lock_ptr);
|
||||||
|
|
||||||
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
||||||
|
|
||||||
oldowner = pi_state->owner;
|
oldowner = pi_state->owner;
|
||||||
|
@ -2317,11 +2316,17 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
||||||
newtid |= FUTEX_OWNER_DIED;
|
newtid |= FUTEX_OWNER_DIED;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We are here either because we stole the rtmutex from the
|
* We are here because either:
|
||||||
* previous highest priority waiter or we are the highest priority
|
|
||||||
* waiter but have failed to get the rtmutex the first time.
|
|
||||||
*
|
*
|
||||||
* We have to replace the newowner TID in the user space variable.
|
* - we stole the lock and pi_state->owner needs updating to reflect
|
||||||
|
* that (@argowner == current),
|
||||||
|
*
|
||||||
|
* or:
|
||||||
|
*
|
||||||
|
* - someone stole our lock and we need to fix things to point to the
|
||||||
|
* new owner (@argowner == NULL).
|
||||||
|
*
|
||||||
|
* Either way, we have to replace the TID in the user space variable.
|
||||||
* This must be atomic as we have to preserve the owner died bit here.
|
* This must be atomic as we have to preserve the owner died bit here.
|
||||||
*
|
*
|
||||||
* Note: We write the user space value _before_ changing the pi_state
|
* Note: We write the user space value _before_ changing the pi_state
|
||||||
|
@ -2334,6 +2339,42 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
||||||
* in the PID check in lookup_pi_state.
|
* in the PID check in lookup_pi_state.
|
||||||
*/
|
*/
|
||||||
retry:
|
retry:
|
||||||
|
if (!argowner) {
|
||||||
|
if (oldowner != current) {
|
||||||
|
/*
|
||||||
|
* We raced against a concurrent self; things are
|
||||||
|
* already fixed up. Nothing to do.
|
||||||
|
*/
|
||||||
|
ret = 0;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
|
||||||
|
/* We got the lock after all, nothing to fix. */
|
||||||
|
ret = 0;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since we just failed the trylock; there must be an owner.
|
||||||
|
*/
|
||||||
|
newowner = rt_mutex_owner(&pi_state->pi_mutex);
|
||||||
|
BUG_ON(!newowner);
|
||||||
|
} else {
|
||||||
|
WARN_ON_ONCE(argowner != current);
|
||||||
|
if (oldowner == current) {
|
||||||
|
/*
|
||||||
|
* We raced against a concurrent self; things are
|
||||||
|
* already fixed up. Nothing to do.
|
||||||
|
*/
|
||||||
|
ret = 0;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
newowner = argowner;
|
||||||
|
}
|
||||||
|
|
||||||
|
newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
|
||||||
|
|
||||||
if (get_futex_value_locked(&uval, uaddr))
|
if (get_futex_value_locked(&uval, uaddr))
|
||||||
goto handle_fault;
|
goto handle_fault;
|
||||||
|
|
||||||
|
@ -2434,15 +2475,28 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
|
||||||
* Got the lock. We might not be the anticipated owner if we
|
* Got the lock. We might not be the anticipated owner if we
|
||||||
* did a lock-steal - fix up the PI-state in that case:
|
* did a lock-steal - fix up the PI-state in that case:
|
||||||
*
|
*
|
||||||
* We can safely read pi_state->owner without holding wait_lock
|
* Speculative pi_state->owner read (we don't hold wait_lock);
|
||||||
* because we now own the rt_mutex, only the owner will attempt
|
* since we own the lock pi_state->owner == current is the
|
||||||
* to change it.
|
* stable state, anything else needs more attention.
|
||||||
*/
|
*/
|
||||||
if (q->pi_state->owner != current)
|
if (q->pi_state->owner != current)
|
||||||
ret = fixup_pi_state_owner(uaddr, q, current);
|
ret = fixup_pi_state_owner(uaddr, q, current);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we didn't get the lock; check if anybody stole it from us. In
|
||||||
|
* that case, we need to fix up the uval to point to them instead of
|
||||||
|
* us, otherwise bad things happen. [10]
|
||||||
|
*
|
||||||
|
* Another speculative read; pi_state->owner == current is unstable
|
||||||
|
* but needs our attention.
|
||||||
|
*/
|
||||||
|
if (q->pi_state->owner == current) {
|
||||||
|
ret = fixup_pi_state_owner(uaddr, q, NULL);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Paranoia check. If we did not take the lock, then we should not be
|
* Paranoia check. If we did not take the lock, then we should not be
|
||||||
* the owner of the rt_mutex.
|
* the owner of the rt_mutex.
|
||||||
|
|
|
@ -1290,6 +1290,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
|
||||||
|
{
|
||||||
|
int ret = try_to_take_rt_mutex(lock, current, NULL);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* try_to_take_rt_mutex() sets the lock waiters bit
|
||||||
|
* unconditionally. Clean this up.
|
||||||
|
*/
|
||||||
|
fixup_rt_mutex_waiters(lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Slow path try-lock function:
|
* Slow path try-lock function:
|
||||||
*/
|
*/
|
||||||
|
@ -1312,13 +1325,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
|
||||||
*/
|
*/
|
||||||
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
raw_spin_lock_irqsave(&lock->wait_lock, flags);
|
||||||
|
|
||||||
ret = try_to_take_rt_mutex(lock, current, NULL);
|
ret = __rt_mutex_slowtrylock(lock);
|
||||||
|
|
||||||
/*
|
|
||||||
* try_to_take_rt_mutex() sets the lock waiters bit
|
|
||||||
* unconditionally. Clean this up.
|
|
||||||
*/
|
|
||||||
fixup_rt_mutex_waiters(lock);
|
|
||||||
|
|
||||||
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
|
||||||
|
|
||||||
|
@ -1505,6 +1512,11 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
|
||||||
return rt_mutex_slowtrylock(lock);
|
return rt_mutex_slowtrylock(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
|
||||||
|
{
|
||||||
|
return __rt_mutex_slowtrylock(lock);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* rt_mutex_timed_lock - lock a rt_mutex interruptible
|
* rt_mutex_timed_lock - lock a rt_mutex interruptible
|
||||||
* the timeout structure is provided
|
* the timeout structure is provided
|
||||||
|
|
|
@ -148,6 +148,7 @@ extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
|
||||||
struct rt_mutex_waiter *waiter);
|
struct rt_mutex_waiter *waiter);
|
||||||
|
|
||||||
extern int rt_mutex_futex_trylock(struct rt_mutex *l);
|
extern int rt_mutex_futex_trylock(struct rt_mutex *l);
|
||||||
|
extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
|
||||||
|
|
||||||
extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
|
extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
|
||||||
extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
|
extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
|
||||||
|
|
|
@ -2046,7 +2046,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||||
p->state = TASK_WAKING;
|
p->state = TASK_WAKING;
|
||||||
|
|
||||||
if (p->in_iowait) {
|
if (p->in_iowait) {
|
||||||
delayacct_blkio_end();
|
delayacct_blkio_end(p);
|
||||||
atomic_dec(&task_rq(p)->nr_iowait);
|
atomic_dec(&task_rq(p)->nr_iowait);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2059,7 +2059,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||||
#else /* CONFIG_SMP */
|
#else /* CONFIG_SMP */
|
||||||
|
|
||||||
if (p->in_iowait) {
|
if (p->in_iowait) {
|
||||||
delayacct_blkio_end();
|
delayacct_blkio_end(p);
|
||||||
atomic_dec(&task_rq(p)->nr_iowait);
|
atomic_dec(&task_rq(p)->nr_iowait);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2112,7 +2112,7 @@ static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
|
||||||
|
|
||||||
if (!task_on_rq_queued(p)) {
|
if (!task_on_rq_queued(p)) {
|
||||||
if (p->in_iowait) {
|
if (p->in_iowait) {
|
||||||
delayacct_blkio_end();
|
delayacct_blkio_end(p);
|
||||||
atomic_dec(&rq->nr_iowait);
|
atomic_dec(&rq->nr_iowait);
|
||||||
}
|
}
|
||||||
ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK);
|
ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK);
|
||||||
|
|
|
@ -1656,7 +1656,7 @@ void run_local_timers(void)
|
||||||
hrtimer_run_queues();
|
hrtimer_run_queues();
|
||||||
/* Raise the softirq only if required. */
|
/* Raise the softirq only if required. */
|
||||||
if (time_before(jiffies, base->clk)) {
|
if (time_before(jiffies, base->clk)) {
|
||||||
if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
|
if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
|
||||||
return;
|
return;
|
||||||
/* CPU is awake, so check the deferrable base. */
|
/* CPU is awake, so check the deferrable base. */
|
||||||
base++;
|
base++;
|
||||||
|
|
|
@ -2213,6 +2213,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
|
||||||
{
|
{
|
||||||
struct trace_event_call *call, *p;
|
struct trace_event_call *call, *p;
|
||||||
const char *last_system = NULL;
|
const char *last_system = NULL;
|
||||||
|
bool first = false;
|
||||||
int last_i;
|
int last_i;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -2220,15 +2221,28 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
|
||||||
list_for_each_entry_safe(call, p, &ftrace_events, list) {
|
list_for_each_entry_safe(call, p, &ftrace_events, list) {
|
||||||
/* events are usually grouped together with systems */
|
/* events are usually grouped together with systems */
|
||||||
if (!last_system || call->class->system != last_system) {
|
if (!last_system || call->class->system != last_system) {
|
||||||
|
first = true;
|
||||||
last_i = 0;
|
last_i = 0;
|
||||||
last_system = call->class->system;
|
last_system = call->class->system;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since calls are grouped by systems, the likelyhood that the
|
||||||
|
* next call in the iteration belongs to the same system as the
|
||||||
|
* previous call is high. As an optimization, we skip seaching
|
||||||
|
* for a map[] that matches the call's system if the last call
|
||||||
|
* was from the same system. That's what last_i is for. If the
|
||||||
|
* call has the same system as the previous call, then last_i
|
||||||
|
* will be the index of the first map[] that has a matching
|
||||||
|
* system.
|
||||||
|
*/
|
||||||
for (i = last_i; i < len; i++) {
|
for (i = last_i; i < len; i++) {
|
||||||
if (call->class->system == map[i]->system) {
|
if (call->class->system == map[i]->system) {
|
||||||
/* Save the first system if need be */
|
/* Save the first system if need be */
|
||||||
if (!last_i)
|
if (first) {
|
||||||
last_i = i;
|
last_i = i;
|
||||||
|
first = false;
|
||||||
|
}
|
||||||
update_event_printk(call, map[i]);
|
update_event_printk(call, map[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,6 +48,7 @@
|
||||||
#include <linux/nodemask.h>
|
#include <linux/nodemask.h>
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
#include <linux/nmi.h>
|
||||||
|
|
||||||
#include "workqueue_internal.h"
|
#include "workqueue_internal.h"
|
||||||
|
|
||||||
|
@ -4479,6 +4480,12 @@ void show_workqueue_state(void)
|
||||||
if (pwq->nr_active || !list_empty(&pwq->delayed_works))
|
if (pwq->nr_active || !list_empty(&pwq->delayed_works))
|
||||||
show_pwq(pwq);
|
show_pwq(pwq);
|
||||||
spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
||||||
|
/*
|
||||||
|
* We could be printing a lot from atomic context, e.g.
|
||||||
|
* sysrq-t -> show_workqueue_state(). Avoid triggering
|
||||||
|
* hard lockup.
|
||||||
|
*/
|
||||||
|
touch_nmi_watchdog();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4506,6 +4513,12 @@ void show_workqueue_state(void)
|
||||||
pr_cont("\n");
|
pr_cont("\n");
|
||||||
next_pool:
|
next_pool:
|
||||||
spin_unlock_irqrestore(&pool->lock, flags);
|
spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
|
/*
|
||||||
|
* We could be printing a lot from atomic context, e.g.
|
||||||
|
* sysrq-t -> show_workqueue_state(). Avoid triggering
|
||||||
|
* hard lockup.
|
||||||
|
*/
|
||||||
|
touch_nmi_watchdog();
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_unlock_sched();
|
rcu_read_unlock_sched();
|
||||||
|
|
|
@ -30,10 +30,29 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* check_pte - check if @pvmw->page is mapped at the @pvmw->pte
|
||||||
|
*
|
||||||
|
* page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
|
||||||
|
* mapped. check_pte() has to validate this.
|
||||||
|
*
|
||||||
|
* @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
|
||||||
|
* page.
|
||||||
|
*
|
||||||
|
* If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
|
||||||
|
* entry that points to @pvmw->page or any subpage in case of THP.
|
||||||
|
*
|
||||||
|
* If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
|
||||||
|
* @pvmw->page or any subpage in case of THP.
|
||||||
|
*
|
||||||
|
* Otherwise, return false.
|
||||||
|
*
|
||||||
|
*/
|
||||||
static bool check_pte(struct page_vma_mapped_walk *pvmw)
|
static bool check_pte(struct page_vma_mapped_walk *pvmw)
|
||||||
{
|
{
|
||||||
|
unsigned long pfn;
|
||||||
|
|
||||||
if (pvmw->flags & PVMW_MIGRATION) {
|
if (pvmw->flags & PVMW_MIGRATION) {
|
||||||
#ifdef CONFIG_MIGRATION
|
|
||||||
swp_entry_t entry;
|
swp_entry_t entry;
|
||||||
if (!is_swap_pte(*pvmw->pte))
|
if (!is_swap_pte(*pvmw->pte))
|
||||||
return false;
|
return false;
|
||||||
|
@ -41,37 +60,31 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
|
||||||
|
|
||||||
if (!is_migration_entry(entry))
|
if (!is_migration_entry(entry))
|
||||||
return false;
|
return false;
|
||||||
if (migration_entry_to_page(entry) - pvmw->page >=
|
|
||||||
hpage_nr_pages(pvmw->page)) {
|
pfn = migration_entry_to_pfn(entry);
|
||||||
|
} else if (is_swap_pte(*pvmw->pte)) {
|
||||||
|
swp_entry_t entry;
|
||||||
|
|
||||||
|
/* Handle un-addressable ZONE_DEVICE memory */
|
||||||
|
entry = pte_to_swp_entry(*pvmw->pte);
|
||||||
|
if (!is_device_private_entry(entry))
|
||||||
return false;
|
return false;
|
||||||
}
|
|
||||||
if (migration_entry_to_page(entry) < pvmw->page)
|
pfn = device_private_entry_to_pfn(entry);
|
||||||
return false;
|
|
||||||
#else
|
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
if (is_swap_pte(*pvmw->pte)) {
|
|
||||||
swp_entry_t entry;
|
|
||||||
|
|
||||||
entry = pte_to_swp_entry(*pvmw->pte);
|
|
||||||
if (is_device_private_entry(entry) &&
|
|
||||||
device_private_entry_to_page(entry) == pvmw->page)
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!pte_present(*pvmw->pte))
|
if (!pte_present(*pvmw->pte))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* THP can be referenced by any subpage */
|
pfn = pte_pfn(*pvmw->pte);
|
||||||
if (pte_page(*pvmw->pte) - pvmw->page >=
|
|
||||||
hpage_nr_pages(pvmw->page)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (pte_page(*pvmw->pte) < pvmw->page)
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (pfn < page_to_pfn(pvmw->page))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* THP can be referenced by any subpage */
|
||||||
|
if (pfn - page_to_pfn(pvmw->page) >= hpage_nr_pages(pvmw->page))
|
||||||
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -721,20 +721,16 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||||
{
|
{
|
||||||
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
|
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
|
||||||
|
|
||||||
if (WARN_ONCE(dev->type != ARPHRD_CAN ||
|
if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
|
||||||
skb->len != CAN_MTU ||
|
cfd->len > CAN_MAX_DLEN)) {
|
||||||
cfd->len > CAN_MAX_DLEN,
|
pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n",
|
||||||
"PF_CAN: dropped non conform CAN skbuf: "
|
dev->type, skb->len, cfd->len);
|
||||||
"dev type %d, len %d, datalen %d\n",
|
kfree_skb(skb);
|
||||||
dev->type, skb->len, cfd->len))
|
return NET_RX_DROP;
|
||||||
goto drop;
|
}
|
||||||
|
|
||||||
can_receive(skb, dev);
|
can_receive(skb, dev);
|
||||||
return NET_RX_SUCCESS;
|
return NET_RX_SUCCESS;
|
||||||
|
|
||||||
drop:
|
|
||||||
kfree_skb(skb);
|
|
||||||
return NET_RX_DROP;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
|
static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||||
|
@ -742,20 +738,16 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||||
{
|
{
|
||||||
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
|
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
|
||||||
|
|
||||||
if (WARN_ONCE(dev->type != ARPHRD_CAN ||
|
if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
|
||||||
skb->len != CANFD_MTU ||
|
cfd->len > CANFD_MAX_DLEN)) {
|
||||||
cfd->len > CANFD_MAX_DLEN,
|
pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n",
|
||||||
"PF_CAN: dropped non conform CAN FD skbuf: "
|
dev->type, skb->len, cfd->len);
|
||||||
"dev type %d, len %d, datalen %d\n",
|
kfree_skb(skb);
|
||||||
dev->type, skb->len, cfd->len))
|
return NET_RX_DROP;
|
||||||
goto drop;
|
}
|
||||||
|
|
||||||
can_receive(skb, dev);
|
can_receive(skb, dev);
|
||||||
return NET_RX_SUCCESS;
|
return NET_RX_SUCCESS;
|
||||||
|
|
||||||
drop:
|
|
||||||
kfree_skb(skb);
|
|
||||||
return NET_RX_DROP;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -401,6 +401,11 @@ static int verify_address_len(const void *p)
|
||||||
#endif
|
#endif
|
||||||
int len;
|
int len;
|
||||||
|
|
||||||
|
if (sp->sadb_address_len <
|
||||||
|
DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family),
|
||||||
|
sizeof(uint64_t)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
switch (addr->sa_family) {
|
switch (addr->sa_family) {
|
||||||
case AF_INET:
|
case AF_INET:
|
||||||
len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t));
|
len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t));
|
||||||
|
@ -511,6 +516,9 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
|
||||||
uint16_t ext_type;
|
uint16_t ext_type;
|
||||||
int ext_len;
|
int ext_len;
|
||||||
|
|
||||||
|
if (len < sizeof(*ehdr))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
ext_len = ehdr->sadb_ext_len;
|
ext_len = ehdr->sadb_ext_len;
|
||||||
ext_len *= sizeof(uint64_t);
|
ext_len *= sizeof(uint64_t);
|
||||||
ext_type = ehdr->sadb_ext_type;
|
ext_type = ehdr->sadb_ext_type;
|
||||||
|
|
|
@ -270,12 +270,18 @@ else
|
||||||
objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
|
objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifdef CONFIG_MODVERSIONS
|
||||||
|
objtool_o = $(@D)/.tmp_$(@F)
|
||||||
|
else
|
||||||
|
objtool_o = $(@)
|
||||||
|
endif
|
||||||
|
|
||||||
# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
|
# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
|
||||||
# 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file
|
# 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file
|
||||||
# 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file
|
# 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file
|
||||||
cmd_objtool = $(if $(patsubst y%,, \
|
cmd_objtool = $(if $(patsubst y%,, \
|
||||||
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
|
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
|
||||||
$(__objtool_obj) $(objtool_args) "$(@)";)
|
$(__objtool_obj) $(objtool_args) "$(objtool_o)";)
|
||||||
objtool_obj = $(if $(patsubst y%,, \
|
objtool_obj = $(if $(patsubst y%,, \
|
||||||
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
|
$(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \
|
||||||
$(__objtool_obj))
|
$(__objtool_obj))
|
||||||
|
@ -291,15 +297,15 @@ objtool_dep = $(objtool_obj) \
|
||||||
define rule_cc_o_c
|
define rule_cc_o_c
|
||||||
$(call echo-cmd,checksrc) $(cmd_checksrc) \
|
$(call echo-cmd,checksrc) $(cmd_checksrc) \
|
||||||
$(call cmd_and_fixdep,cc_o_c) \
|
$(call cmd_and_fixdep,cc_o_c) \
|
||||||
$(cmd_modversions_c) \
|
|
||||||
$(call echo-cmd,objtool) $(cmd_objtool) \
|
$(call echo-cmd,objtool) $(cmd_objtool) \
|
||||||
|
$(cmd_modversions_c) \
|
||||||
$(call echo-cmd,record_mcount) $(cmd_record_mcount)
|
$(call echo-cmd,record_mcount) $(cmd_record_mcount)
|
||||||
endef
|
endef
|
||||||
|
|
||||||
define rule_as_o_S
|
define rule_as_o_S
|
||||||
$(call cmd_and_fixdep,as_o_S) \
|
$(call cmd_and_fixdep,as_o_S) \
|
||||||
$(cmd_modversions_S) \
|
$(call echo-cmd,objtool) $(cmd_objtool) \
|
||||||
$(call echo-cmd,objtool) $(cmd_objtool)
|
$(cmd_modversions_S)
|
||||||
endef
|
endef
|
||||||
|
|
||||||
# List module undefined symbols (or empty line if not enabled)
|
# List module undefined symbols (or empty line if not enabled)
|
||||||
|
|
|
@ -96,6 +96,8 @@ def get_thread_info(task):
|
||||||
thread_info_addr = task.address + ia64_task_size
|
thread_info_addr = task.address + ia64_task_size
|
||||||
thread_info = thread_info_addr.cast(thread_info_ptr_type)
|
thread_info = thread_info_addr.cast(thread_info_ptr_type)
|
||||||
else:
|
else:
|
||||||
|
if task.type.fields()[0].type == thread_info_type.get_type():
|
||||||
|
return task['thread_info']
|
||||||
thread_info = task['stack'].cast(thread_info_ptr_type)
|
thread_info = task['stack'].cast(thread_info_ptr_type)
|
||||||
return thread_info.dereference()
|
return thread_info.dereference()
|
||||||
|
|
||||||
|
|
|
@ -560,7 +560,6 @@ static inline unsigned int muldiv32(unsigned int a, unsigned int b,
|
||||||
{
|
{
|
||||||
u_int64_t n = (u_int64_t) a * b;
|
u_int64_t n = (u_int64_t) a * b;
|
||||||
if (c == 0) {
|
if (c == 0) {
|
||||||
snd_BUG_ON(!n);
|
|
||||||
*r = 0;
|
*r = 0;
|
||||||
return UINT_MAX;
|
return UINT_MAX;
|
||||||
}
|
}
|
||||||
|
|
|
@ -221,6 +221,7 @@ static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
|
||||||
rwlock_init(&client->ports_lock);
|
rwlock_init(&client->ports_lock);
|
||||||
mutex_init(&client->ports_mutex);
|
mutex_init(&client->ports_mutex);
|
||||||
INIT_LIST_HEAD(&client->ports_list_head);
|
INIT_LIST_HEAD(&client->ports_list_head);
|
||||||
|
mutex_init(&client->ioctl_mutex);
|
||||||
|
|
||||||
/* find free slot in the client table */
|
/* find free slot in the client table */
|
||||||
spin_lock_irqsave(&clients_lock, flags);
|
spin_lock_irqsave(&clients_lock, flags);
|
||||||
|
@ -2126,7 +2127,9 @@ static long snd_seq_ioctl(struct file *file, unsigned int cmd,
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&client->ioctl_mutex);
|
||||||
err = handler->func(client, &buf);
|
err = handler->func(client, &buf);
|
||||||
|
mutex_unlock(&client->ioctl_mutex);
|
||||||
if (err >= 0) {
|
if (err >= 0) {
|
||||||
/* Some commands includes a bug in 'dir' field. */
|
/* Some commands includes a bug in 'dir' field. */
|
||||||
if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
|
if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
|
||||||
|
|
|
@ -61,6 +61,7 @@ struct snd_seq_client {
|
||||||
struct list_head ports_list_head;
|
struct list_head ports_list_head;
|
||||||
rwlock_t ports_lock;
|
rwlock_t ports_lock;
|
||||||
struct mutex ports_mutex;
|
struct mutex ports_mutex;
|
||||||
|
struct mutex ioctl_mutex;
|
||||||
int convert32; /* convert 32->64bit */
|
int convert32; /* convert 32->64bit */
|
||||||
|
|
||||||
/* output pool */
|
/* output pool */
|
||||||
|
|
|
@ -408,6 +408,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
|
||||||
/*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
|
/*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
|
||||||
|
|
||||||
/* codec SSID */
|
/* codec SSID */
|
||||||
|
SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122),
|
||||||
SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
|
SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
|
||||||
SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
|
SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
|
||||||
SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
|
SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
|
||||||
|
|
|
@ -6173,6 +6173,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||||
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
|
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
|
||||||
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
|
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
|
||||||
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
|
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
|
||||||
|
SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
|
||||||
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||||
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||||
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
|
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
|
||||||
|
|
|
@ -46,7 +46,7 @@ $(OBJTOOL_IN): fixdep FORCE
|
||||||
@$(MAKE) $(build)=objtool
|
@$(MAKE) $(build)=objtool
|
||||||
|
|
||||||
$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
|
$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
|
||||||
@./sync-check.sh
|
@$(CONFIG_SHELL) ./sync-check.sh
|
||||||
$(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
|
$(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
|
||||||
|
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue