updating to mainline 4.14.10
This commit is contained in:
parent
97cd4e0224
commit
c0e26a3220
2
config
2
config
|
@ -1,6 +1,6 @@
|
||||||
#
|
#
|
||||||
# Automatically generated file; DO NOT EDIT.
|
# Automatically generated file; DO NOT EDIT.
|
||||||
# Linux/x86_64 4.14.9-jakeday Kernel Configuration
|
# Linux/x86_64 4.14.10-jakeday Kernel Configuration
|
||||||
#
|
#
|
||||||
CONFIG_64BIT=y
|
CONFIG_64BIT=y
|
||||||
CONFIG_X86_64=y
|
CONFIG_X86_64=y
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
|
|
||||||
<previous description obsolete, deleted>
|
|
||||||
|
|
||||||
Virtual memory map with 4 level page tables:
|
Virtual memory map with 4 level page tables:
|
||||||
|
|
||||||
0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm
|
0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm
|
||||||
|
@ -14,13 +12,15 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
|
||||||
... unused hole ...
|
... unused hole ...
|
||||||
ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
|
ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
|
||||||
... unused hole ...
|
... unused hole ...
|
||||||
|
fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
|
||||||
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
|
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
|
||||||
... unused hole ...
|
... unused hole ...
|
||||||
ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
|
ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
|
||||||
... unused hole ...
|
... unused hole ...
|
||||||
ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
|
ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
|
||||||
ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space (variable)
|
ffffffffa0000000 - [fixmap start] (~1526 MB) module mapping space (variable)
|
||||||
ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
|
[fixmap start] - ffffffffff5fffff kernel-internal fixmap range
|
||||||
|
ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
|
||||||
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
|
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
|
||||||
|
|
||||||
Virtual memory map with 5 level page tables:
|
Virtual memory map with 5 level page tables:
|
||||||
|
@ -36,19 +36,22 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
|
||||||
... unused hole ...
|
... unused hole ...
|
||||||
ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
|
ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
|
||||||
... unused hole ...
|
... unused hole ...
|
||||||
|
fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
|
||||||
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
|
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
|
||||||
... unused hole ...
|
... unused hole ...
|
||||||
ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
|
ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
|
||||||
... unused hole ...
|
... unused hole ...
|
||||||
ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
|
ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0
|
||||||
ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space
|
ffffffffa0000000 - [fixmap start] (~1526 MB) module mapping space
|
||||||
ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
|
[fixmap start] - ffffffffff5fffff kernel-internal fixmap range
|
||||||
|
ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
|
||||||
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
|
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
|
||||||
|
|
||||||
Architecture defines a 64-bit virtual address. Implementations can support
|
Architecture defines a 64-bit virtual address. Implementations can support
|
||||||
less. Currently supported are 48- and 57-bit virtual addresses. Bits 63
|
less. Currently supported are 48- and 57-bit virtual addresses. Bits 63
|
||||||
through to the most-significant implemented bit are set to either all ones
|
through to the most-significant implemented bit are sign extended.
|
||||||
or all zero. This causes hole between user space and kernel addresses.
|
This causes hole between user space and kernel addresses if you interpret them
|
||||||
|
as unsigned.
|
||||||
|
|
||||||
The direct mapping covers all memory in the system up to the highest
|
The direct mapping covers all memory in the system up to the highest
|
||||||
memory address (this means in some cases it can also include PCI memory
|
memory address (this means in some cases it can also include PCI memory
|
||||||
|
@ -58,9 +61,6 @@ vmalloc space is lazily synchronized into the different PML4/PML5 pages of
|
||||||
the processes using the page fault handler, with init_top_pgt as
|
the processes using the page fault handler, with init_top_pgt as
|
||||||
reference.
|
reference.
|
||||||
|
|
||||||
Current X86-64 implementations support up to 46 bits of address space (64 TB),
|
|
||||||
which is our current limit. This expands into MBZ space in the page tables.
|
|
||||||
|
|
||||||
We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
|
We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
|
||||||
memory window (this size is arbitrary, it can be raised later if needed).
|
memory window (this size is arbitrary, it can be raised later if needed).
|
||||||
The mappings are not part of any other kernel PGD and are only available
|
The mappings are not part of any other kernel PGD and are only available
|
||||||
|
@ -72,5 +72,3 @@ following fixmap section.
|
||||||
Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
|
Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
|
||||||
physical memory, vmalloc/ioremap space and virtual memory map are randomized.
|
physical memory, vmalloc/ioremap space and virtual memory map are randomized.
|
||||||
Their order is preserved but their base will be offset early at boot time.
|
Their order is preserved but their base will be offset early at boot time.
|
||||||
|
|
||||||
-Andi Kleen, Jul 2004
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 14
|
PATCHLEVEL = 14
|
||||||
SUBLEVEL = 9
|
SUBLEVEL = 10
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Petit Gorille
|
NAME = Petit Gorille
|
||||||
|
|
||||||
|
|
|
@ -84,6 +84,9 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
|
||||||
{
|
{
|
||||||
u64 reg;
|
u64 reg;
|
||||||
|
|
||||||
|
/* Clear pmscr in case of early return */
|
||||||
|
*pmscr_el1 = 0;
|
||||||
|
|
||||||
/* SPE present on this CPU? */
|
/* SPE present on this CPU? */
|
||||||
if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
|
if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
|
||||||
ID_AA64DFR0_PMSVER_SHIFT))
|
ID_AA64DFR0_PMSVER_SHIFT))
|
||||||
|
|
|
@ -878,9 +878,6 @@ ENTRY_CFI(syscall_exit_rfi)
|
||||||
STREG %r19,PT_SR7(%r16)
|
STREG %r19,PT_SR7(%r16)
|
||||||
|
|
||||||
intr_return:
|
intr_return:
|
||||||
/* NOTE: Need to enable interrupts incase we schedule. */
|
|
||||||
ssm PSW_SM_I, %r0
|
|
||||||
|
|
||||||
/* check for reschedule */
|
/* check for reschedule */
|
||||||
mfctl %cr30,%r1
|
mfctl %cr30,%r1
|
||||||
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
|
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
|
||||||
|
@ -907,6 +904,11 @@ intr_check_sig:
|
||||||
LDREG PT_IASQ1(%r16), %r20
|
LDREG PT_IASQ1(%r16), %r20
|
||||||
cmpib,COND(=),n 0,%r20,intr_restore /* backward */
|
cmpib,COND(=),n 0,%r20,intr_restore /* backward */
|
||||||
|
|
||||||
|
/* NOTE: We need to enable interrupts if we have to deliver
|
||||||
|
* signals. We used to do this earlier but it caused kernel
|
||||||
|
* stack overflows. */
|
||||||
|
ssm PSW_SM_I, %r0
|
||||||
|
|
||||||
copy %r0, %r25 /* long in_syscall = 0 */
|
copy %r0, %r25 /* long in_syscall = 0 */
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
ldo -16(%r30),%r29 /* Reference param save area */
|
ldo -16(%r30),%r29 /* Reference param save area */
|
||||||
|
@ -958,6 +960,10 @@ intr_do_resched:
|
||||||
cmpib,COND(=) 0, %r20, intr_do_preempt
|
cmpib,COND(=) 0, %r20, intr_do_preempt
|
||||||
nop
|
nop
|
||||||
|
|
||||||
|
/* NOTE: We need to enable interrupts if we schedule. We used
|
||||||
|
* to do this earlier but it caused kernel stack overflows. */
|
||||||
|
ssm PSW_SM_I, %r0
|
||||||
|
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
ldo -16(%r30),%r29 /* Reference param save area */
|
ldo -16(%r30),%r29 /* Reference param save area */
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -305,6 +305,7 @@ ENDPROC_CFI(os_hpmc)
|
||||||
|
|
||||||
|
|
||||||
__INITRODATA
|
__INITRODATA
|
||||||
|
.align 4
|
||||||
.export os_hpmc_size
|
.export os_hpmc_size
|
||||||
os_hpmc_size:
|
os_hpmc_size:
|
||||||
.word .os_hpmc_end-.os_hpmc
|
.word .os_hpmc_end-.os_hpmc
|
||||||
|
|
|
@ -114,9 +114,10 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_dup_mmap(struct mm_struct *oldmm,
|
static inline int arch_dup_mmap(struct mm_struct *oldmm,
|
||||||
struct mm_struct *mm)
|
struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_exit_mmap(struct mm_struct *mm)
|
static inline void arch_exit_mmap(struct mm_struct *mm)
|
||||||
|
|
|
@ -725,7 +725,8 @@ u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
/* Return the per-cpu state for state saving/migration */
|
/* Return the per-cpu state for state saving/migration */
|
||||||
return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
|
return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
|
||||||
(u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT;
|
(u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
|
||||||
|
(u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
|
int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
|
||||||
|
@ -1558,7 +1559,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Restore P and Q. If the interrupt was pending, we
|
* Restore P and Q. If the interrupt was pending, we
|
||||||
* force both P and Q, which will trigger a resend.
|
* force Q and !P, which will trigger a resend.
|
||||||
*
|
*
|
||||||
* That means that a guest that had both an interrupt
|
* That means that a guest that had both an interrupt
|
||||||
* pending (queued) and Q set will restore with only
|
* pending (queued) and Q set will restore with only
|
||||||
|
@ -1566,7 +1567,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
|
||||||
* is perfectly fine as coalescing interrupts that haven't
|
* is perfectly fine as coalescing interrupts that haven't
|
||||||
* been presented yet is always allowed.
|
* been presented yet is always allowed.
|
||||||
*/
|
*/
|
||||||
if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
|
if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
|
||||||
state->old_p = true;
|
state->old_p = true;
|
||||||
if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
|
if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
|
||||||
state->old_q = true;
|
state->old_q = true;
|
||||||
|
|
|
@ -410,8 +410,12 @@ static __u64 power_pmu_bhrb_to(u64 addr)
|
||||||
int ret;
|
int ret;
|
||||||
__u64 target;
|
__u64 target;
|
||||||
|
|
||||||
if (is_kernel_addr(addr))
|
if (is_kernel_addr(addr)) {
|
||||||
return branch_target((unsigned int *)addr);
|
if (probe_kernel_read(&instr, (void *)addr, sizeof(instr)))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return branch_target(&instr);
|
||||||
|
}
|
||||||
|
|
||||||
/* Userspace: need copy instruction here then translate it */
|
/* Userspace: need copy instruction here then translate it */
|
||||||
pagefault_disable();
|
pagefault_disable();
|
||||||
|
|
|
@ -15,9 +15,10 @@ extern void uml_setup_stubs(struct mm_struct *mm);
|
||||||
/*
|
/*
|
||||||
* Needed since we do not use the asm-generic/mm_hooks.h:
|
* Needed since we do not use the asm-generic/mm_hooks.h:
|
||||||
*/
|
*/
|
||||||
static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
|
static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
uml_setup_stubs(mm);
|
uml_setup_stubs(mm);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
extern void arch_exit_mmap(struct mm_struct *mm);
|
extern void arch_exit_mmap(struct mm_struct *mm);
|
||||||
static inline void arch_unmap(struct mm_struct *mm,
|
static inline void arch_unmap(struct mm_struct *mm,
|
||||||
|
|
|
@ -81,9 +81,10 @@ do { \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
static inline void arch_dup_mmap(struct mm_struct *oldmm,
|
static inline int arch_dup_mmap(struct mm_struct *oldmm,
|
||||||
struct mm_struct *mm)
|
struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_unmap(struct mm_struct *mm,
|
static inline void arch_unmap(struct mm_struct *mm,
|
||||||
|
|
|
@ -925,7 +925,8 @@ config MAXSMP
|
||||||
config NR_CPUS
|
config NR_CPUS
|
||||||
int "Maximum number of CPUs" if SMP && !MAXSMP
|
int "Maximum number of CPUs" if SMP && !MAXSMP
|
||||||
range 2 8 if SMP && X86_32 && !X86_BIGSMP
|
range 2 8 if SMP && X86_32 && !X86_BIGSMP
|
||||||
range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK
|
range 2 64 if SMP && X86_32 && X86_BIGSMP
|
||||||
|
range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64
|
||||||
range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64
|
range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64
|
||||||
default "1" if !SMP
|
default "1" if !SMP
|
||||||
default "8192" if MAXSMP
|
default "8192" if MAXSMP
|
||||||
|
|
|
@ -942,9 +942,9 @@ ENTRY(debug)
|
||||||
|
|
||||||
/* Are we currently on the SYSENTER stack? */
|
/* Are we currently on the SYSENTER stack? */
|
||||||
movl PER_CPU_VAR(cpu_entry_area), %ecx
|
movl PER_CPU_VAR(cpu_entry_area), %ecx
|
||||||
addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
|
addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
|
||||||
subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
|
subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
|
||||||
cmpl $SIZEOF_SYSENTER_stack, %ecx
|
cmpl $SIZEOF_entry_stack, %ecx
|
||||||
jb .Ldebug_from_sysenter_stack
|
jb .Ldebug_from_sysenter_stack
|
||||||
|
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
|
@ -986,9 +986,9 @@ ENTRY(nmi)
|
||||||
|
|
||||||
/* Are we currently on the SYSENTER stack? */
|
/* Are we currently on the SYSENTER stack? */
|
||||||
movl PER_CPU_VAR(cpu_entry_area), %ecx
|
movl PER_CPU_VAR(cpu_entry_area), %ecx
|
||||||
addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
|
addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
|
||||||
subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
|
subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
|
||||||
cmpl $SIZEOF_SYSENTER_stack, %ecx
|
cmpl $SIZEOF_entry_stack, %ecx
|
||||||
jb .Lnmi_from_sysenter_stack
|
jb .Lnmi_from_sysenter_stack
|
||||||
|
|
||||||
/* Not on SYSENTER stack. */
|
/* Not on SYSENTER stack. */
|
||||||
|
|
|
@ -154,8 +154,8 @@ END(native_usergs_sysret64)
|
||||||
_entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip)
|
_entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip)
|
||||||
|
|
||||||
/* The top word of the SYSENTER stack is hot and is usable as scratch space. */
|
/* The top word of the SYSENTER stack is hot and is usable as scratch space. */
|
||||||
#define RSP_SCRATCH CPU_ENTRY_AREA_SYSENTER_stack + \
|
#define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \
|
||||||
SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA
|
SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA
|
||||||
|
|
||||||
ENTRY(entry_SYSCALL_64_trampoline)
|
ENTRY(entry_SYSCALL_64_trampoline)
|
||||||
UNWIND_HINT_EMPTY
|
UNWIND_HINT_EMPTY
|
||||||
|
|
|
@ -37,6 +37,7 @@
|
||||||
#include <asm/unistd.h>
|
#include <asm/unistd.h>
|
||||||
#include <asm/fixmap.h>
|
#include <asm/fixmap.h>
|
||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
|
#include <asm/paravirt.h>
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include "vsyscall_trace.h"
|
#include "vsyscall_trace.h"
|
||||||
|
@ -138,6 +139,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
|
||||||
|
|
||||||
WARN_ON_ONCE(address != regs->ip);
|
WARN_ON_ONCE(address != regs->ip);
|
||||||
|
|
||||||
|
/* This should be unreachable in NATIVE mode. */
|
||||||
|
if (WARN_ON(vsyscall_mode == NATIVE))
|
||||||
|
return false;
|
||||||
|
|
||||||
if (vsyscall_mode == NONE) {
|
if (vsyscall_mode == NONE) {
|
||||||
warn_bad_vsyscall(KERN_INFO, regs,
|
warn_bad_vsyscall(KERN_INFO, regs,
|
||||||
"vsyscall attempted with vsyscall=none");
|
"vsyscall attempted with vsyscall=none");
|
||||||
|
@ -329,16 +334,47 @@ int in_gate_area_no_mm(unsigned long addr)
|
||||||
return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR;
|
return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The VSYSCALL page is the only user-accessible page in the kernel address
|
||||||
|
* range. Normally, the kernel page tables can have _PAGE_USER clear, but
|
||||||
|
* the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls
|
||||||
|
* are enabled.
|
||||||
|
*
|
||||||
|
* Some day we may create a "minimal" vsyscall mode in which we emulate
|
||||||
|
* vsyscalls but leave the page not present. If so, we skip calling
|
||||||
|
* this.
|
||||||
|
*/
|
||||||
|
static void __init set_vsyscall_pgtable_user_bits(void)
|
||||||
|
{
|
||||||
|
pgd_t *pgd;
|
||||||
|
p4d_t *p4d;
|
||||||
|
pud_t *pud;
|
||||||
|
pmd_t *pmd;
|
||||||
|
|
||||||
|
pgd = pgd_offset_k(VSYSCALL_ADDR);
|
||||||
|
set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
|
||||||
|
p4d = p4d_offset(pgd, VSYSCALL_ADDR);
|
||||||
|
#if CONFIG_PGTABLE_LEVELS >= 5
|
||||||
|
p4d->p4d |= _PAGE_USER;
|
||||||
|
#endif
|
||||||
|
pud = pud_offset(p4d, VSYSCALL_ADDR);
|
||||||
|
set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
|
||||||
|
pmd = pmd_offset(pud, VSYSCALL_ADDR);
|
||||||
|
set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));
|
||||||
|
}
|
||||||
|
|
||||||
void __init map_vsyscall(void)
|
void __init map_vsyscall(void)
|
||||||
{
|
{
|
||||||
extern char __vsyscall_page;
|
extern char __vsyscall_page;
|
||||||
unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
|
unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
|
||||||
|
|
||||||
if (vsyscall_mode != NONE)
|
if (vsyscall_mode != NONE) {
|
||||||
__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
|
__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
|
||||||
vsyscall_mode == NATIVE
|
vsyscall_mode == NATIVE
|
||||||
? PAGE_KERNEL_VSYSCALL
|
? PAGE_KERNEL_VSYSCALL
|
||||||
: PAGE_KERNEL_VVAR);
|
: PAGE_KERNEL_VVAR);
|
||||||
|
set_vsyscall_pgtable_user_bits();
|
||||||
|
}
|
||||||
|
|
||||||
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
|
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
|
||||||
(unsigned long)VSYSCALL_ADDR);
|
(unsigned long)VSYSCALL_ADDR);
|
||||||
|
|
68
kernel/arch/x86/include/asm/cpu_entry_area.h
Normal file
68
kernel/arch/x86/include/asm/cpu_entry_area.h
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
#ifndef _ASM_X86_CPU_ENTRY_AREA_H
|
||||||
|
#define _ASM_X86_CPU_ENTRY_AREA_H
|
||||||
|
|
||||||
|
#include <linux/percpu-defs.h>
|
||||||
|
#include <asm/processor.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* cpu_entry_area is a percpu region that contains things needed by the CPU
|
||||||
|
* and early entry/exit code. Real types aren't used for all fields here
|
||||||
|
* to avoid circular header dependencies.
|
||||||
|
*
|
||||||
|
* Every field is a virtual alias of some other allocated backing store.
|
||||||
|
* There is no direct allocation of a struct cpu_entry_area.
|
||||||
|
*/
|
||||||
|
struct cpu_entry_area {
|
||||||
|
char gdt[PAGE_SIZE];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The GDT is just below entry_stack and thus serves (on x86_64) as
|
||||||
|
* a a read-only guard page.
|
||||||
|
*/
|
||||||
|
struct entry_stack_page entry_stack_page;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because
|
||||||
|
* we need task switches to work, and task switches write to the TSS.
|
||||||
|
*/
|
||||||
|
struct tss_struct tss;
|
||||||
|
|
||||||
|
char entry_trampoline[PAGE_SIZE];
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
/*
|
||||||
|
* Exception stacks used for IST entries.
|
||||||
|
*
|
||||||
|
* In the future, this should have a separate slot for each stack
|
||||||
|
* with guard pages between them.
|
||||||
|
*/
|
||||||
|
char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
|
||||||
|
#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
|
||||||
|
|
||||||
|
DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
|
||||||
|
|
||||||
|
extern void setup_cpu_entry_areas(void);
|
||||||
|
extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
|
||||||
|
|
||||||
|
#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
|
||||||
|
#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
|
||||||
|
|
||||||
|
#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
|
||||||
|
|
||||||
|
#define CPU_ENTRY_AREA_MAP_SIZE \
|
||||||
|
(CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)
|
||||||
|
|
||||||
|
extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
|
||||||
|
|
||||||
|
static inline struct entry_stack *cpu_entry_stack(int cpu)
|
||||||
|
{
|
||||||
|
return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
|
@ -7,6 +7,7 @@
|
||||||
#include <asm/mmu.h>
|
#include <asm/mmu.h>
|
||||||
#include <asm/fixmap.h>
|
#include <asm/fixmap.h>
|
||||||
#include <asm/irq_vectors.h>
|
#include <asm/irq_vectors.h>
|
||||||
|
#include <asm/cpu_entry_area.h>
|
||||||
|
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
#ifndef _ASM_X86_ESPFIX_H
|
#ifndef _ASM_X86_ESPFIX_H
|
||||||
#define _ASM_X86_ESPFIX_H
|
#define _ASM_X86_ESPFIX_H
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_ESPFIX64
|
||||||
|
|
||||||
#include <asm/percpu.h>
|
#include <asm/percpu.h>
|
||||||
|
|
||||||
|
@ -11,7 +11,8 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
|
||||||
|
|
||||||
extern void init_espfix_bsp(void);
|
extern void init_espfix_bsp(void);
|
||||||
extern void init_espfix_ap(int cpu);
|
extern void init_espfix_ap(int cpu);
|
||||||
|
#else
|
||||||
#endif /* CONFIG_X86_64 */
|
static inline void init_espfix_ap(int cpu) { }
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* _ASM_X86_ESPFIX_H */
|
#endif /* _ASM_X86_ESPFIX_H */
|
||||||
|
|
|
@ -44,46 +44,6 @@ extern unsigned long __FIXADDR_TOP;
|
||||||
PAGE_SIZE)
|
PAGE_SIZE)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* cpu_entry_area is a percpu region in the fixmap that contains things
|
|
||||||
* needed by the CPU and early entry/exit code. Real types aren't used
|
|
||||||
* for all fields here to avoid circular header dependencies.
|
|
||||||
*
|
|
||||||
* Every field is a virtual alias of some other allocated backing store.
|
|
||||||
* There is no direct allocation of a struct cpu_entry_area.
|
|
||||||
*/
|
|
||||||
struct cpu_entry_area {
|
|
||||||
char gdt[PAGE_SIZE];
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The GDT is just below SYSENTER_stack and thus serves (on x86_64) as
|
|
||||||
* a a read-only guard page.
|
|
||||||
*/
|
|
||||||
struct SYSENTER_stack_page SYSENTER_stack_page;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because
|
|
||||||
* we need task switches to work, and task switches write to the TSS.
|
|
||||||
*/
|
|
||||||
struct tss_struct tss;
|
|
||||||
|
|
||||||
char entry_trampoline[PAGE_SIZE];
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
/*
|
|
||||||
* Exception stacks used for IST entries.
|
|
||||||
*
|
|
||||||
* In the future, this should have a separate slot for each stack
|
|
||||||
* with guard pages between them.
|
|
||||||
*/
|
|
||||||
char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
#define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE)
|
|
||||||
|
|
||||||
extern void setup_cpu_entry_areas(void);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Here we define all the compile-time 'special' virtual
|
* Here we define all the compile-time 'special' virtual
|
||||||
* addresses. The point is to have a constant address at
|
* addresses. The point is to have a constant address at
|
||||||
|
@ -123,7 +83,6 @@ enum fixed_addresses {
|
||||||
FIX_IO_APIC_BASE_0,
|
FIX_IO_APIC_BASE_0,
|
||||||
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
|
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
|
||||||
#endif
|
#endif
|
||||||
FIX_RO_IDT, /* Virtual mapping for read-only IDT */
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
|
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
|
||||||
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
|
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
|
||||||
|
@ -139,9 +98,6 @@ enum fixed_addresses {
|
||||||
#ifdef CONFIG_X86_INTEL_MID
|
#ifdef CONFIG_X86_INTEL_MID
|
||||||
FIX_LNW_VRTC,
|
FIX_LNW_VRTC,
|
||||||
#endif
|
#endif
|
||||||
/* Fixmap entries to remap the GDTs, one per processor. */
|
|
||||||
FIX_CPU_ENTRY_AREA_TOP,
|
|
||||||
FIX_CPU_ENTRY_AREA_BOTTOM = FIX_CPU_ENTRY_AREA_TOP + (CPU_ENTRY_AREA_PAGES * NR_CPUS) - 1,
|
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI_APEI_GHES
|
#ifdef CONFIG_ACPI_APEI_GHES
|
||||||
/* Used for GHES mapping from assorted contexts */
|
/* Used for GHES mapping from assorted contexts */
|
||||||
|
@ -230,30 +186,5 @@ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
|
||||||
void __early_set_fixmap(enum fixed_addresses idx,
|
void __early_set_fixmap(enum fixed_addresses idx,
|
||||||
phys_addr_t phys, pgprot_t flags);
|
phys_addr_t phys, pgprot_t flags);
|
||||||
|
|
||||||
static inline unsigned int __get_cpu_entry_area_page_index(int cpu, int page)
|
|
||||||
{
|
|
||||||
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
|
|
||||||
|
|
||||||
return FIX_CPU_ENTRY_AREA_BOTTOM - cpu*CPU_ENTRY_AREA_PAGES - page;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __get_cpu_entry_area_offset_index(cpu, offset) ({ \
|
|
||||||
BUILD_BUG_ON(offset % PAGE_SIZE != 0); \
|
|
||||||
__get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE); \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define get_cpu_entry_area_index(cpu, field) \
|
|
||||||
__get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field))
|
|
||||||
|
|
||||||
static inline struct cpu_entry_area *get_cpu_entry_area(int cpu)
|
|
||||||
{
|
|
||||||
return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu)
|
|
||||||
{
|
|
||||||
return &get_cpu_entry_area(cpu)->SYSENTER_stack_page.stack;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
#endif /* _ASM_X86_FIXMAP_H */
|
#endif /* _ASM_X86_FIXMAP_H */
|
||||||
|
|
|
@ -97,6 +97,16 @@
|
||||||
#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
|
#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
|
||||||
#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
|
#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
|
||||||
|
|
||||||
|
/* Identifiers for segment registers */
|
||||||
|
#define INAT_SEG_REG_IGNORE 0
|
||||||
|
#define INAT_SEG_REG_DEFAULT 1
|
||||||
|
#define INAT_SEG_REG_CS 2
|
||||||
|
#define INAT_SEG_REG_SS 3
|
||||||
|
#define INAT_SEG_REG_DS 4
|
||||||
|
#define INAT_SEG_REG_ES 5
|
||||||
|
#define INAT_SEG_REG_FS 6
|
||||||
|
#define INAT_SEG_REG_GS 7
|
||||||
|
|
||||||
/* Attribute search APIs */
|
/* Attribute search APIs */
|
||||||
extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
|
extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
|
||||||
extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
|
extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
|
||||||
|
|
53
kernel/arch/x86/include/asm/invpcid.h
Normal file
53
kernel/arch/x86/include/asm/invpcid.h
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef _ASM_X86_INVPCID
|
||||||
|
#define _ASM_X86_INVPCID
|
||||||
|
|
||||||
|
static inline void __invpcid(unsigned long pcid, unsigned long addr,
|
||||||
|
unsigned long type)
|
||||||
|
{
|
||||||
|
struct { u64 d[2]; } desc = { { pcid, addr } };
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The memory clobber is because the whole point is to invalidate
|
||||||
|
* stale TLB entries and, especially if we're flushing global
|
||||||
|
* mappings, we don't want the compiler to reorder any subsequent
|
||||||
|
* memory accesses before the TLB flush.
|
||||||
|
*
|
||||||
|
* The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
|
||||||
|
* invpcid (%rcx), %rax in long mode.
|
||||||
|
*/
|
||||||
|
asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
|
||||||
|
: : "m" (desc), "a" (type), "c" (&desc) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
#define INVPCID_TYPE_INDIV_ADDR 0
|
||||||
|
#define INVPCID_TYPE_SINGLE_CTXT 1
|
||||||
|
#define INVPCID_TYPE_ALL_INCL_GLOBAL 2
|
||||||
|
#define INVPCID_TYPE_ALL_NON_GLOBAL 3
|
||||||
|
|
||||||
|
/* Flush all mappings for a given pcid and addr, not including globals. */
|
||||||
|
static inline void invpcid_flush_one(unsigned long pcid,
|
||||||
|
unsigned long addr)
|
||||||
|
{
|
||||||
|
__invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Flush all mappings for a given PCID, not including globals. */
|
||||||
|
static inline void invpcid_flush_single_context(unsigned long pcid)
|
||||||
|
{
|
||||||
|
__invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Flush all mappings, including globals, for all PCIDs. */
|
||||||
|
static inline void invpcid_flush_all(void)
|
||||||
|
{
|
||||||
|
__invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Flush all mappings for all PCIDs except globals. */
|
||||||
|
static inline void invpcid_flush_all_nonglobals(void)
|
||||||
|
{
|
||||||
|
__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* _ASM_X86_INVPCID */
|
|
@ -3,6 +3,7 @@
|
||||||
#define _ASM_X86_MMU_H
|
#define _ASM_X86_MMU_H
|
||||||
|
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
#include <linux/rwsem.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
|
|
||||||
|
@ -27,6 +28,7 @@ typedef struct {
|
||||||
atomic64_t tlb_gen;
|
atomic64_t tlb_gen;
|
||||||
|
|
||||||
#ifdef CONFIG_MODIFY_LDT_SYSCALL
|
#ifdef CONFIG_MODIFY_LDT_SYSCALL
|
||||||
|
struct rw_semaphore ldt_usr_sem;
|
||||||
struct ldt_struct *ldt;
|
struct ldt_struct *ldt;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -57,10 +57,16 @@ struct ldt_struct {
|
||||||
/*
|
/*
|
||||||
* Used for LDT copy/destruction.
|
* Used for LDT copy/destruction.
|
||||||
*/
|
*/
|
||||||
int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
|
static inline void init_new_context_ldt(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
mm->context.ldt = NULL;
|
||||||
|
init_rwsem(&mm->context.ldt_usr_sem);
|
||||||
|
}
|
||||||
|
int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
|
||||||
void destroy_context_ldt(struct mm_struct *mm);
|
void destroy_context_ldt(struct mm_struct *mm);
|
||||||
#else /* CONFIG_MODIFY_LDT_SYSCALL */
|
#else /* CONFIG_MODIFY_LDT_SYSCALL */
|
||||||
static inline int init_new_context_ldt(struct task_struct *tsk,
|
static inline void init_new_context_ldt(struct mm_struct *mm) { }
|
||||||
|
static inline int ldt_dup_context(struct mm_struct *oldmm,
|
||||||
struct mm_struct *mm)
|
struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -132,6 +138,8 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
|
||||||
static inline int init_new_context(struct task_struct *tsk,
|
static inline int init_new_context(struct task_struct *tsk,
|
||||||
struct mm_struct *mm)
|
struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
mutex_init(&mm->context.lock);
|
||||||
|
|
||||||
mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
|
mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
|
||||||
atomic64_set(&mm->context.tlb_gen, 0);
|
atomic64_set(&mm->context.tlb_gen, 0);
|
||||||
|
|
||||||
|
@ -143,7 +151,8 @@ static inline int init_new_context(struct task_struct *tsk,
|
||||||
mm->context.execute_only_pkey = -1;
|
mm->context.execute_only_pkey = -1;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
return init_new_context_ldt(tsk, mm);
|
init_new_context_ldt(mm);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
static inline void destroy_context(struct mm_struct *mm)
|
static inline void destroy_context(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
@ -176,10 +185,10 @@ do { \
|
||||||
} while (0)
|
} while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline void arch_dup_mmap(struct mm_struct *oldmm,
|
static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
|
||||||
struct mm_struct *mm)
|
|
||||||
{
|
{
|
||||||
paravirt_arch_dup_mmap(oldmm, mm);
|
paravirt_arch_dup_mmap(oldmm, mm);
|
||||||
|
return ldt_dup_context(oldmm, mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_exit_mmap(struct mm_struct *mm)
|
static inline void arch_exit_mmap(struct mm_struct *mm)
|
||||||
|
@ -281,33 +290,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
||||||
return __pkru_allows_pkey(vma_pkey(vma), write);
|
return __pkru_allows_pkey(vma_pkey(vma), write);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* If PCID is on, ASID-aware code paths put the ASID+1 into the PCID
|
|
||||||
* bits. This serves two purposes. It prevents a nasty situation in
|
|
||||||
* which PCID-unaware code saves CR3, loads some other value (with PCID
|
|
||||||
* == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if
|
|
||||||
* the saved ASID was nonzero. It also means that any bugs involving
|
|
||||||
* loading a PCID-enabled CR3 with CR4.PCIDE off will trigger
|
|
||||||
* deterministically.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid)
|
|
||||||
{
|
|
||||||
if (static_cpu_has(X86_FEATURE_PCID)) {
|
|
||||||
VM_WARN_ON_ONCE(asid > 4094);
|
|
||||||
return __sme_pa(mm->pgd) | (asid + 1);
|
|
||||||
} else {
|
|
||||||
VM_WARN_ON_ONCE(asid != 0);
|
|
||||||
return __sme_pa(mm->pgd);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
|
|
||||||
{
|
|
||||||
VM_WARN_ON_ONCE(asid > 4094);
|
|
||||||
return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This can be used from process context to figure out what the value of
|
* This can be used from process context to figure out what the value of
|
||||||
* CR3 is without needing to do a (slow) __read_cr3().
|
* CR3 is without needing to do a (slow) __read_cr3().
|
||||||
|
@ -317,7 +299,7 @@ static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
|
||||||
*/
|
*/
|
||||||
static inline unsigned long __get_current_cr3_fast(void)
|
static inline unsigned long __get_current_cr3_fast(void)
|
||||||
{
|
{
|
||||||
unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm),
|
unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
|
||||||
this_cpu_read(cpu_tlbstate.loaded_mm_asid));
|
this_cpu_read(cpu_tlbstate.loaded_mm_asid));
|
||||||
|
|
||||||
/* For now, be very restrictive about when this can be called. */
|
/* For now, be very restrictive about when this can be called. */
|
||||||
|
|
|
@ -38,13 +38,22 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
|
||||||
#define LAST_PKMAP 1024
|
#define LAST_PKMAP 1024
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
|
/*
|
||||||
& PMD_MASK)
|
* Define this here and validate with BUILD_BUG_ON() in pgtable_32.c
|
||||||
|
* to avoid include recursion hell
|
||||||
|
*/
|
||||||
|
#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40)
|
||||||
|
|
||||||
|
#define CPU_ENTRY_AREA_BASE \
|
||||||
|
((FIXADDR_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) & PMD_MASK)
|
||||||
|
|
||||||
|
#define PKMAP_BASE \
|
||||||
|
((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
|
# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
|
||||||
#else
|
#else
|
||||||
# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
|
# define VMALLOC_END (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define MODULES_VADDR VMALLOC_START
|
#define MODULES_VADDR VMALLOC_START
|
||||||
|
|
|
@ -77,6 +77,7 @@ typedef struct { pteval_t pte; } pte_t;
|
||||||
|
|
||||||
/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
|
/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
|
||||||
#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
|
#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
|
||||||
|
|
||||||
#ifdef CONFIG_X86_5LEVEL
|
#ifdef CONFIG_X86_5LEVEL
|
||||||
# define VMALLOC_SIZE_TB _AC(16384, UL)
|
# define VMALLOC_SIZE_TB _AC(16384, UL)
|
||||||
# define __VMALLOC_BASE _AC(0xff92000000000000, UL)
|
# define __VMALLOC_BASE _AC(0xff92000000000000, UL)
|
||||||
|
@ -86,6 +87,7 @@ typedef struct { pteval_t pte; } pte_t;
|
||||||
# define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
|
# define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
|
||||||
# define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
|
# define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_RANDOMIZE_MEMORY
|
#ifdef CONFIG_RANDOMIZE_MEMORY
|
||||||
# define VMALLOC_START vmalloc_base
|
# define VMALLOC_START vmalloc_base
|
||||||
# define VMEMMAP_START vmemmap_base
|
# define VMEMMAP_START vmemmap_base
|
||||||
|
@ -93,13 +95,20 @@ typedef struct { pteval_t pte; } pte_t;
|
||||||
# define VMALLOC_START __VMALLOC_BASE
|
# define VMALLOC_START __VMALLOC_BASE
|
||||||
# define VMEMMAP_START __VMEMMAP_BASE
|
# define VMEMMAP_START __VMEMMAP_BASE
|
||||||
#endif /* CONFIG_RANDOMIZE_MEMORY */
|
#endif /* CONFIG_RANDOMIZE_MEMORY */
|
||||||
|
|
||||||
#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
|
#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
|
||||||
|
|
||||||
#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
|
#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
|
||||||
/* The module sections ends with the start of the fixmap */
|
/* The module sections ends with the start of the fixmap */
|
||||||
#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1)
|
#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1)
|
||||||
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
|
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
|
||||||
|
|
||||||
#define ESPFIX_PGD_ENTRY _AC(-2, UL)
|
#define ESPFIX_PGD_ENTRY _AC(-2, UL)
|
||||||
#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT)
|
#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT)
|
||||||
|
|
||||||
|
#define CPU_ENTRY_AREA_PGD _AC(-3, UL)
|
||||||
|
#define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT)
|
||||||
|
|
||||||
#define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
|
#define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
|
||||||
#define EFI_VA_END (-68 * (_AC(1, UL) << 30))
|
#define EFI_VA_END (-68 * (_AC(1, UL) << 30))
|
||||||
|
|
||||||
|
|
|
@ -336,12 +336,12 @@ struct x86_hw_tss {
|
||||||
#define IO_BITMAP_OFFSET (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss))
|
#define IO_BITMAP_OFFSET (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss))
|
||||||
#define INVALID_IO_BITMAP_OFFSET 0x8000
|
#define INVALID_IO_BITMAP_OFFSET 0x8000
|
||||||
|
|
||||||
struct SYSENTER_stack {
|
struct entry_stack {
|
||||||
unsigned long words[64];
|
unsigned long words[64];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct SYSENTER_stack_page {
|
struct entry_stack_page {
|
||||||
struct SYSENTER_stack stack;
|
struct entry_stack stack;
|
||||||
} __aligned(PAGE_SIZE);
|
} __aligned(PAGE_SIZE);
|
||||||
|
|
||||||
struct tss_struct {
|
struct tss_struct {
|
||||||
|
|
|
@ -16,7 +16,7 @@ enum stack_type {
|
||||||
STACK_TYPE_TASK,
|
STACK_TYPE_TASK,
|
||||||
STACK_TYPE_IRQ,
|
STACK_TYPE_IRQ,
|
||||||
STACK_TYPE_SOFTIRQ,
|
STACK_TYPE_SOFTIRQ,
|
||||||
STACK_TYPE_SYSENTER,
|
STACK_TYPE_ENTRY,
|
||||||
STACK_TYPE_EXCEPTION,
|
STACK_TYPE_EXCEPTION,
|
||||||
STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1,
|
STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1,
|
||||||
};
|
};
|
||||||
|
@ -29,7 +29,7 @@ struct stack_info {
|
||||||
bool in_task_stack(unsigned long *stack, struct task_struct *task,
|
bool in_task_stack(unsigned long *stack, struct task_struct *task,
|
||||||
struct stack_info *info);
|
struct stack_info *info);
|
||||||
|
|
||||||
bool in_sysenter_stack(unsigned long *stack, struct stack_info *info);
|
bool in_entry_stack(unsigned long *stack, struct stack_info *info);
|
||||||
|
|
||||||
int get_stack_info(unsigned long *stack, struct task_struct *task,
|
int get_stack_info(unsigned long *stack, struct task_struct *task,
|
||||||
struct stack_info *info, unsigned long *visit_mask);
|
struct stack_info *info, unsigned long *visit_mask);
|
||||||
|
|
|
@ -9,70 +9,66 @@
|
||||||
#include <asm/cpufeature.h>
|
#include <asm/cpufeature.h>
|
||||||
#include <asm/special_insns.h>
|
#include <asm/special_insns.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
|
#include <asm/invpcid.h>
|
||||||
static inline void __invpcid(unsigned long pcid, unsigned long addr,
|
|
||||||
unsigned long type)
|
|
||||||
{
|
|
||||||
struct { u64 d[2]; } desc = { { pcid, addr } };
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The memory clobber is because the whole point is to invalidate
|
|
||||||
* stale TLB entries and, especially if we're flushing global
|
|
||||||
* mappings, we don't want the compiler to reorder any subsequent
|
|
||||||
* memory accesses before the TLB flush.
|
|
||||||
*
|
|
||||||
* The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
|
|
||||||
* invpcid (%rcx), %rax in long mode.
|
|
||||||
*/
|
|
||||||
asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
|
|
||||||
: : "m" (desc), "a" (type), "c" (&desc) : "memory");
|
|
||||||
}
|
|
||||||
|
|
||||||
#define INVPCID_TYPE_INDIV_ADDR 0
|
|
||||||
#define INVPCID_TYPE_SINGLE_CTXT 1
|
|
||||||
#define INVPCID_TYPE_ALL_INCL_GLOBAL 2
|
|
||||||
#define INVPCID_TYPE_ALL_NON_GLOBAL 3
|
|
||||||
|
|
||||||
/* Flush all mappings for a given pcid and addr, not including globals. */
|
|
||||||
static inline void invpcid_flush_one(unsigned long pcid,
|
|
||||||
unsigned long addr)
|
|
||||||
{
|
|
||||||
__invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Flush all mappings for a given PCID, not including globals. */
|
|
||||||
static inline void invpcid_flush_single_context(unsigned long pcid)
|
|
||||||
{
|
|
||||||
__invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Flush all mappings, including globals, for all PCIDs. */
|
|
||||||
static inline void invpcid_flush_all(void)
|
|
||||||
{
|
|
||||||
__invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Flush all mappings for all PCIDs except globals. */
|
|
||||||
static inline void invpcid_flush_all_nonglobals(void)
|
|
||||||
{
|
|
||||||
__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
|
static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
u64 new_tlb_gen;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bump the generation count. This also serves as a full barrier
|
* Bump the generation count. This also serves as a full barrier
|
||||||
* that synchronizes with switch_mm(): callers are required to order
|
* that synchronizes with switch_mm(): callers are required to order
|
||||||
* their read of mm_cpumask after their writes to the paging
|
* their read of mm_cpumask after their writes to the paging
|
||||||
* structures.
|
* structures.
|
||||||
*/
|
*/
|
||||||
smp_mb__before_atomic();
|
return atomic64_inc_return(&mm->context.tlb_gen);
|
||||||
new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
|
}
|
||||||
smp_mb__after_atomic();
|
|
||||||
|
|
||||||
return new_tlb_gen;
|
/* There are 12 bits of space for ASIDS in CR3 */
|
||||||
|
#define CR3_HW_ASID_BITS 12
|
||||||
|
/*
|
||||||
|
* When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
|
||||||
|
* user/kernel switches
|
||||||
|
*/
|
||||||
|
#define PTI_CONSUMED_ASID_BITS 0
|
||||||
|
|
||||||
|
#define CR3_AVAIL_ASID_BITS (CR3_HW_ASID_BITS - PTI_CONSUMED_ASID_BITS)
|
||||||
|
/*
|
||||||
|
* ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
|
||||||
|
* for them being zero-based. Another -1 is because ASID 0 is reserved for
|
||||||
|
* use by non-PCID-aware users.
|
||||||
|
*/
|
||||||
|
#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_ASID_BITS) - 2)
|
||||||
|
|
||||||
|
static inline u16 kern_pcid(u16 asid)
|
||||||
|
{
|
||||||
|
VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
|
||||||
|
/*
|
||||||
|
* If PCID is on, ASID-aware code paths put the ASID+1 into the
|
||||||
|
* PCID bits. This serves two purposes. It prevents a nasty
|
||||||
|
* situation in which PCID-unaware code saves CR3, loads some other
|
||||||
|
* value (with PCID == 0), and then restores CR3, thus corrupting
|
||||||
|
* the TLB for ASID 0 if the saved ASID was nonzero. It also means
|
||||||
|
* that any bugs involving loading a PCID-enabled CR3 with
|
||||||
|
* CR4.PCIDE off will trigger deterministically.
|
||||||
|
*/
|
||||||
|
return asid + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct pgd_t;
|
||||||
|
static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
|
||||||
|
{
|
||||||
|
if (static_cpu_has(X86_FEATURE_PCID)) {
|
||||||
|
return __sme_pa(pgd) | kern_pcid(asid);
|
||||||
|
} else {
|
||||||
|
VM_WARN_ON_ONCE(asid != 0);
|
||||||
|
return __sme_pa(pgd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
|
||||||
|
{
|
||||||
|
VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
|
||||||
|
VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID));
|
||||||
|
return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PARAVIRT
|
#ifdef CONFIG_PARAVIRT
|
||||||
|
@ -234,6 +230,9 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
|
||||||
|
|
||||||
extern void initialize_tlbstate_and_flush(void);
|
extern void initialize_tlbstate_and_flush(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* flush the entire current user mapping
|
||||||
|
*/
|
||||||
static inline void __native_flush_tlb(void)
|
static inline void __native_flush_tlb(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -246,20 +245,12 @@ static inline void __native_flush_tlb(void)
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __native_flush_tlb_global_irq_disabled(void)
|
/*
|
||||||
{
|
* flush everything
|
||||||
unsigned long cr4;
|
*/
|
||||||
|
|
||||||
cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
|
||||||
/* clear PGE */
|
|
||||||
native_write_cr4(cr4 & ~X86_CR4_PGE);
|
|
||||||
/* write old PGE again and flush TLBs */
|
|
||||||
native_write_cr4(cr4);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __native_flush_tlb_global(void)
|
static inline void __native_flush_tlb_global(void)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long cr4, flags;
|
||||||
|
|
||||||
if (static_cpu_has(X86_FEATURE_INVPCID)) {
|
if (static_cpu_has(X86_FEATURE_INVPCID)) {
|
||||||
/*
|
/*
|
||||||
|
@ -277,22 +268,36 @@ static inline void __native_flush_tlb_global(void)
|
||||||
*/
|
*/
|
||||||
raw_local_irq_save(flags);
|
raw_local_irq_save(flags);
|
||||||
|
|
||||||
__native_flush_tlb_global_irq_disabled();
|
cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
||||||
|
/* toggle PGE */
|
||||||
|
native_write_cr4(cr4 ^ X86_CR4_PGE);
|
||||||
|
/* write old PGE again and flush TLBs */
|
||||||
|
native_write_cr4(cr4);
|
||||||
|
|
||||||
raw_local_irq_restore(flags);
|
raw_local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* flush one page in the user mapping
|
||||||
|
*/
|
||||||
static inline void __native_flush_tlb_single(unsigned long addr)
|
static inline void __native_flush_tlb_single(unsigned long addr)
|
||||||
{
|
{
|
||||||
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
|
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* flush everything
|
||||||
|
*/
|
||||||
static inline void __flush_tlb_all(void)
|
static inline void __flush_tlb_all(void)
|
||||||
{
|
{
|
||||||
if (boot_cpu_has(X86_FEATURE_PGE))
|
if (boot_cpu_has(X86_FEATURE_PGE)) {
|
||||||
__flush_tlb_global();
|
__flush_tlb_global();
|
||||||
else
|
} else {
|
||||||
|
/*
|
||||||
|
* !PGE -> !PCID (setup_pcid()), thus every flush is total.
|
||||||
|
*/
|
||||||
__flush_tlb();
|
__flush_tlb();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note: if we somehow had PCID but not PGE, then this wouldn't work --
|
* Note: if we somehow had PCID but not PGE, then this wouldn't work --
|
||||||
|
@ -303,6 +308,9 @@ static inline void __flush_tlb_all(void)
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* flush one page in the kernel mapping
|
||||||
|
*/
|
||||||
static inline void __flush_tlb_one(unsigned long addr)
|
static inline void __flush_tlb_one(unsigned long addr)
|
||||||
{
|
{
|
||||||
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
|
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
|
||||||
|
|
|
@ -97,6 +97,6 @@ void common(void) {
|
||||||
/* Layout info for cpu_entry_area */
|
/* Layout info for cpu_entry_area */
|
||||||
OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
|
OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
|
||||||
OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
|
OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
|
||||||
OFFSET(CPU_ENTRY_AREA_SYSENTER_stack, cpu_entry_area, SYSENTER_stack_page);
|
OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
|
||||||
DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack));
|
DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,7 @@ void foo(void)
|
||||||
|
|
||||||
/* Offset from the sysenter stack to tss.sp0 */
|
/* Offset from the sysenter stack to tss.sp0 */
|
||||||
DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
|
DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
|
||||||
offsetofend(struct cpu_entry_area, SYSENTER_stack_page.stack));
|
offsetofend(struct cpu_entry_area, entry_stack_page.stack));
|
||||||
|
|
||||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||||
BLANK();
|
BLANK();
|
||||||
|
|
|
@ -482,102 +482,8 @@ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
|
||||||
[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
|
[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
|
||||||
[DEBUG_STACK - 1] = DEBUG_STKSZ
|
[DEBUG_STACK - 1] = DEBUG_STKSZ
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
|
|
||||||
[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static DEFINE_PER_CPU_PAGE_ALIGNED(struct SYSENTER_stack_page,
|
|
||||||
SYSENTER_stack_storage);
|
|
||||||
|
|
||||||
static void __init
|
|
||||||
set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot)
|
|
||||||
{
|
|
||||||
for ( ; pages; pages--, idx--, ptr += PAGE_SIZE)
|
|
||||||
__set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Setup the fixmap mappings only once per-processor */
|
|
||||||
static void __init setup_cpu_entry_area(int cpu)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
extern char _entry_trampoline[];
|
|
||||||
|
|
||||||
/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
|
|
||||||
pgprot_t gdt_prot = PAGE_KERNEL_RO;
|
|
||||||
pgprot_t tss_prot = PAGE_KERNEL_RO;
|
|
||||||
#else
|
|
||||||
/*
|
|
||||||
* On native 32-bit systems, the GDT cannot be read-only because
|
|
||||||
* our double fault handler uses a task gate, and entering through
|
|
||||||
* a task gate needs to change an available TSS to busy. If the
|
|
||||||
* GDT is read-only, that will triple fault. The TSS cannot be
|
|
||||||
* read-only because the CPU writes to it on task switches.
|
|
||||||
*
|
|
||||||
* On Xen PV, the GDT must be read-only because the hypervisor
|
|
||||||
* requires it.
|
|
||||||
*/
|
|
||||||
pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
|
|
||||||
PAGE_KERNEL_RO : PAGE_KERNEL;
|
|
||||||
pgprot_t tss_prot = PAGE_KERNEL;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
__set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot);
|
|
||||||
set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, SYSENTER_stack_page),
|
|
||||||
per_cpu_ptr(&SYSENTER_stack_storage, cpu), 1,
|
|
||||||
PAGE_KERNEL);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The Intel SDM says (Volume 3, 7.2.1):
|
|
||||||
*
|
|
||||||
* Avoid placing a page boundary in the part of the TSS that the
|
|
||||||
* processor reads during a task switch (the first 104 bytes). The
|
|
||||||
* processor may not correctly perform address translations if a
|
|
||||||
* boundary occurs in this area. During a task switch, the processor
|
|
||||||
* reads and writes into the first 104 bytes of each TSS (using
|
|
||||||
* contiguous physical addresses beginning with the physical address
|
|
||||||
* of the first byte of the TSS). So, after TSS access begins, if
|
|
||||||
* part of the 104 bytes is not physically contiguous, the processor
|
|
||||||
* will access incorrect information without generating a page-fault
|
|
||||||
* exception.
|
|
||||||
*
|
|
||||||
* There are also a lot of errata involving the TSS spanning a page
|
|
||||||
* boundary. Assert that we're not doing that.
|
|
||||||
*/
|
|
||||||
BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
|
|
||||||
offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
|
|
||||||
BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
|
|
||||||
set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss),
|
|
||||||
&per_cpu(cpu_tss_rw, cpu),
|
|
||||||
sizeof(struct tss_struct) / PAGE_SIZE,
|
|
||||||
tss_prot);
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
|
|
||||||
BUILD_BUG_ON(sizeof(exception_stacks) !=
|
|
||||||
sizeof(((struct cpu_entry_area *)0)->exception_stacks));
|
|
||||||
set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks),
|
|
||||||
&per_cpu(exception_stacks, cpu),
|
|
||||||
sizeof(exception_stacks) / PAGE_SIZE,
|
|
||||||
PAGE_KERNEL);
|
|
||||||
|
|
||||||
__set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline),
|
|
||||||
__pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init setup_cpu_entry_areas(void)
|
|
||||||
{
|
|
||||||
unsigned int cpu;
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu)
|
|
||||||
setup_cpu_entry_area(cpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Load the original GDT from the per-cpu structure */
|
/* Load the original GDT from the per-cpu structure */
|
||||||
void load_direct_gdt(int cpu)
|
void load_direct_gdt(int cpu)
|
||||||
{
|
{
|
||||||
|
@ -1323,7 +1229,7 @@ void enable_sep_cpu(void)
|
||||||
|
|
||||||
tss->x86_tss.ss1 = __KERNEL_CS;
|
tss->x86_tss.ss1 = __KERNEL_CS;
|
||||||
wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
|
wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
|
||||||
wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_SYSENTER_stack(cpu) + 1), 0);
|
wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
|
||||||
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
|
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
|
||||||
|
|
||||||
put_cpu();
|
put_cpu();
|
||||||
|
@ -1440,7 +1346,7 @@ void syscall_init(void)
|
||||||
* AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
|
* AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
|
||||||
*/
|
*/
|
||||||
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
|
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
|
||||||
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_SYSENTER_stack(cpu) + 1));
|
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
|
||||||
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
|
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
|
||||||
#else
|
#else
|
||||||
wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
|
wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
|
||||||
|
@ -1655,7 +1561,7 @@ void cpu_init(void)
|
||||||
*/
|
*/
|
||||||
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
|
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
|
||||||
load_TR_desc();
|
load_TR_desc();
|
||||||
load_sp0((unsigned long)(cpu_SYSENTER_stack(cpu) + 1));
|
load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
|
||||||
|
|
||||||
load_mm_ldt(&init_mm);
|
load_mm_ldt(&init_mm);
|
||||||
|
|
||||||
|
|
|
@ -565,15 +565,6 @@ static void print_ucode(struct ucode_cpu_info *uci)
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
|
||||||
/*
|
|
||||||
* Flush global tlb. We only do this in x86_64 where paging has been enabled
|
|
||||||
* already and PGE should be enabled as well.
|
|
||||||
*/
|
|
||||||
static inline void flush_tlb_early(void)
|
|
||||||
{
|
|
||||||
__native_flush_tlb_global_irq_disabled();
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void print_ucode(struct ucode_cpu_info *uci)
|
static inline void print_ucode(struct ucode_cpu_info *uci)
|
||||||
{
|
{
|
||||||
struct microcode_intel *mc;
|
struct microcode_intel *mc;
|
||||||
|
@ -602,10 +593,6 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
|
||||||
if (rev != mc->hdr.rev)
|
if (rev != mc->hdr.rev)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
/* Flush global tlb. This is precaution. */
|
|
||||||
flush_tlb_early();
|
|
||||||
#endif
|
|
||||||
uci->cpu_sig.rev = rev;
|
uci->cpu_sig.rev = rev;
|
||||||
|
|
||||||
if (early)
|
if (early)
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <linux/nmi.h>
|
#include <linux/nmi.h>
|
||||||
#include <linux/sysfs.h>
|
#include <linux/sysfs.h>
|
||||||
|
|
||||||
|
#include <asm/cpu_entry_area.h>
|
||||||
#include <asm/stacktrace.h>
|
#include <asm/stacktrace.h>
|
||||||
#include <asm/unwind.h>
|
#include <asm/unwind.h>
|
||||||
|
|
||||||
|
@ -43,9 +44,9 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool in_sysenter_stack(unsigned long *stack, struct stack_info *info)
|
bool in_entry_stack(unsigned long *stack, struct stack_info *info)
|
||||||
{
|
{
|
||||||
struct SYSENTER_stack *ss = cpu_SYSENTER_stack(smp_processor_id());
|
struct entry_stack *ss = cpu_entry_stack(smp_processor_id());
|
||||||
|
|
||||||
void *begin = ss;
|
void *begin = ss;
|
||||||
void *end = ss + 1;
|
void *end = ss + 1;
|
||||||
|
@ -53,7 +54,7 @@ bool in_sysenter_stack(unsigned long *stack, struct stack_info *info)
|
||||||
if ((void *)stack < begin || (void *)stack >= end)
|
if ((void *)stack < begin || (void *)stack >= end)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
info->type = STACK_TYPE_SYSENTER;
|
info->type = STACK_TYPE_ENTRY;
|
||||||
info->begin = begin;
|
info->begin = begin;
|
||||||
info->end = end;
|
info->end = end;
|
||||||
info->next_sp = NULL;
|
info->next_sp = NULL;
|
||||||
|
@ -111,13 +112,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||||
* - task stack
|
* - task stack
|
||||||
* - interrupt stack
|
* - interrupt stack
|
||||||
* - HW exception stacks (double fault, nmi, debug, mce)
|
* - HW exception stacks (double fault, nmi, debug, mce)
|
||||||
* - SYSENTER stack
|
* - entry stack
|
||||||
*
|
*
|
||||||
* x86-32 can have up to four stacks:
|
* x86-32 can have up to four stacks:
|
||||||
* - task stack
|
* - task stack
|
||||||
* - softirq stack
|
* - softirq stack
|
||||||
* - hardirq stack
|
* - hardirq stack
|
||||||
* - SYSENTER stack
|
* - entry stack
|
||||||
*/
|
*/
|
||||||
for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
|
for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
|
||||||
const char *stack_name;
|
const char *stack_name;
|
||||||
|
|
|
@ -26,8 +26,8 @@ const char *stack_type_name(enum stack_type type)
|
||||||
if (type == STACK_TYPE_SOFTIRQ)
|
if (type == STACK_TYPE_SOFTIRQ)
|
||||||
return "SOFTIRQ";
|
return "SOFTIRQ";
|
||||||
|
|
||||||
if (type == STACK_TYPE_SYSENTER)
|
if (type == STACK_TYPE_ENTRY)
|
||||||
return "SYSENTER";
|
return "ENTRY_TRAMPOLINE";
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -96,7 +96,7 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
|
||||||
if (task != current)
|
if (task != current)
|
||||||
goto unknown;
|
goto unknown;
|
||||||
|
|
||||||
if (in_sysenter_stack(stack, info))
|
if (in_entry_stack(stack, info))
|
||||||
goto recursion_check;
|
goto recursion_check;
|
||||||
|
|
||||||
if (in_hardirq_stack(stack, info))
|
if (in_hardirq_stack(stack, info))
|
||||||
|
|
|
@ -37,8 +37,14 @@ const char *stack_type_name(enum stack_type type)
|
||||||
if (type == STACK_TYPE_IRQ)
|
if (type == STACK_TYPE_IRQ)
|
||||||
return "IRQ";
|
return "IRQ";
|
||||||
|
|
||||||
if (type == STACK_TYPE_SYSENTER)
|
if (type == STACK_TYPE_ENTRY) {
|
||||||
return "SYSENTER";
|
/*
|
||||||
|
* On 64-bit, we have a generic entry stack that we
|
||||||
|
* use for all the kernel entry points, including
|
||||||
|
* SYSENTER.
|
||||||
|
*/
|
||||||
|
return "ENTRY_TRAMPOLINE";
|
||||||
|
}
|
||||||
|
|
||||||
if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
|
if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
|
||||||
return exception_stack_names[type - STACK_TYPE_EXCEPTION];
|
return exception_stack_names[type - STACK_TYPE_EXCEPTION];
|
||||||
|
@ -118,7 +124,7 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
|
||||||
if (in_irq_stack(stack, info))
|
if (in_irq_stack(stack, info))
|
||||||
goto recursion_check;
|
goto recursion_check;
|
||||||
|
|
||||||
if (in_sysenter_stack(stack, info))
|
if (in_entry_stack(stack, info))
|
||||||
goto recursion_check;
|
goto recursion_check;
|
||||||
|
|
||||||
goto unknown;
|
goto unknown;
|
||||||
|
|
|
@ -5,6 +5,11 @@
|
||||||
* Copyright (C) 2002 Andi Kleen
|
* Copyright (C) 2002 Andi Kleen
|
||||||
*
|
*
|
||||||
* This handles calls from both 32bit and 64bit mode.
|
* This handles calls from both 32bit and 64bit mode.
|
||||||
|
*
|
||||||
|
* Lock order:
|
||||||
|
* contex.ldt_usr_sem
|
||||||
|
* mmap_sem
|
||||||
|
* context.lock
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
|
@ -42,7 +47,7 @@ static void refresh_ldt_segments(void)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* context.lock is held for us, so we don't need any locking. */
|
/* context.lock is held by the task which issued the smp function call */
|
||||||
static void flush_ldt(void *__mm)
|
static void flush_ldt(void *__mm)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = __mm;
|
struct mm_struct *mm = __mm;
|
||||||
|
@ -99,15 +104,17 @@ static void finalize_ldt_struct(struct ldt_struct *ldt)
|
||||||
paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
|
paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* context.lock is held */
|
static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
|
||||||
static void install_ldt(struct mm_struct *current_mm,
|
|
||||||
struct ldt_struct *ldt)
|
|
||||||
{
|
{
|
||||||
/* Synchronizes with READ_ONCE in load_mm_ldt. */
|
mutex_lock(&mm->context.lock);
|
||||||
smp_store_release(¤t_mm->context.ldt, ldt);
|
|
||||||
|
|
||||||
/* Activate the LDT for all CPUs using current_mm. */
|
/* Synchronizes with READ_ONCE in load_mm_ldt. */
|
||||||
on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
|
smp_store_release(&mm->context.ldt, ldt);
|
||||||
|
|
||||||
|
/* Activate the LDT for all CPUs using currents mm. */
|
||||||
|
on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
|
||||||
|
|
||||||
|
mutex_unlock(&mm->context.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_ldt_struct(struct ldt_struct *ldt)
|
static void free_ldt_struct(struct ldt_struct *ldt)
|
||||||
|
@ -124,27 +131,20 @@ static void free_ldt_struct(struct ldt_struct *ldt)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we do not have to muck with descriptors here, that is
|
* Called on fork from arch_dup_mmap(). Just copy the current LDT state,
|
||||||
* done in switch_mm() as needed.
|
* the new task is not running, so nothing can be installed.
|
||||||
*/
|
*/
|
||||||
int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
|
int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
struct ldt_struct *new_ldt;
|
struct ldt_struct *new_ldt;
|
||||||
struct mm_struct *old_mm;
|
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
|
|
||||||
mutex_init(&mm->context.lock);
|
if (!old_mm)
|
||||||
old_mm = current->mm;
|
|
||||||
if (!old_mm) {
|
|
||||||
mm->context.ldt = NULL;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
|
|
||||||
mutex_lock(&old_mm->context.lock);
|
mutex_lock(&old_mm->context.lock);
|
||||||
if (!old_mm->context.ldt) {
|
if (!old_mm->context.ldt)
|
||||||
mm->context.ldt = NULL;
|
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
|
||||||
|
|
||||||
new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
|
new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
|
||||||
if (!new_ldt) {
|
if (!new_ldt) {
|
||||||
|
@ -180,7 +180,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
|
||||||
unsigned long entries_size;
|
unsigned long entries_size;
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
mutex_lock(&mm->context.lock);
|
down_read(&mm->context.ldt_usr_sem);
|
||||||
|
|
||||||
if (!mm->context.ldt) {
|
if (!mm->context.ldt) {
|
||||||
retval = 0;
|
retval = 0;
|
||||||
|
@ -209,7 +209,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
|
||||||
retval = bytecount;
|
retval = bytecount;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&mm->context.lock);
|
up_read(&mm->context.ldt_usr_sem);
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,7 +269,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
|
||||||
ldt.avl = 0;
|
ldt.avl = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&mm->context.lock);
|
if (down_write_killable(&mm->context.ldt_usr_sem))
|
||||||
|
return -EINTR;
|
||||||
|
|
||||||
old_ldt = mm->context.ldt;
|
old_ldt = mm->context.ldt;
|
||||||
old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
|
old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
|
||||||
|
@ -291,7 +292,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
|
||||||
error = 0;
|
error = 0;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&mm->context.lock);
|
up_write(&mm->context.ldt_usr_sem);
|
||||||
out:
|
out:
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
|
@ -990,12 +990,8 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
|
||||||
initial_code = (unsigned long)start_secondary;
|
initial_code = (unsigned long)start_secondary;
|
||||||
initial_stack = idle->thread.sp;
|
initial_stack = idle->thread.sp;
|
||||||
|
|
||||||
/*
|
/* Enable the espfix hack for this CPU */
|
||||||
* Enable the espfix hack for this CPU
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_X86_ESPFIX64
|
|
||||||
init_espfix_ap(cpu);
|
init_espfix_ap(cpu);
|
||||||
#endif
|
|
||||||
|
|
||||||
/* So we see what's up */
|
/* So we see what's up */
|
||||||
announce_cpu(cpu, apicid);
|
announce_cpu(cpu, apicid);
|
||||||
|
|
|
@ -52,6 +52,7 @@
|
||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
#include <asm/desc.h>
|
#include <asm/desc.h>
|
||||||
#include <asm/fpu/internal.h>
|
#include <asm/fpu/internal.h>
|
||||||
|
#include <asm/cpu_entry_area.h>
|
||||||
#include <asm/mce.h>
|
#include <asm/mce.h>
|
||||||
#include <asm/fixmap.h>
|
#include <asm/fixmap.h>
|
||||||
#include <asm/mach_traps.h>
|
#include <asm/mach_traps.h>
|
||||||
|
@ -950,8 +951,9 @@ void __init trap_init(void)
|
||||||
* "sidt" instruction will not leak the location of the kernel, and
|
* "sidt" instruction will not leak the location of the kernel, and
|
||||||
* to defend the IDT against arbitrary memory write vulnerabilities.
|
* to defend the IDT against arbitrary memory write vulnerabilities.
|
||||||
* It will be reloaded in cpu_init() */
|
* It will be reloaded in cpu_init() */
|
||||||
__set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
|
cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
|
||||||
idt_descr.address = fix_to_virt(FIX_RO_IDT);
|
PAGE_KERNEL_RO);
|
||||||
|
idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Should be a barrier for any external CPU state:
|
* Should be a barrier for any external CPU state:
|
||||||
|
|
|
@ -2404,9 +2404,21 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
|
static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
|
||||||
u64 cr0, u64 cr4)
|
u64 cr0, u64 cr3, u64 cr4)
|
||||||
{
|
{
|
||||||
int bad;
|
int bad;
|
||||||
|
u64 pcid;
|
||||||
|
|
||||||
|
/* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
|
||||||
|
pcid = 0;
|
||||||
|
if (cr4 & X86_CR4_PCIDE) {
|
||||||
|
pcid = cr3 & 0xfff;
|
||||||
|
cr3 &= ~0xfff;
|
||||||
|
}
|
||||||
|
|
||||||
|
bad = ctxt->ops->set_cr(ctxt, 3, cr3);
|
||||||
|
if (bad)
|
||||||
|
return X86EMUL_UNHANDLEABLE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First enable PAE, long mode needs it before CR0.PG = 1 is set.
|
* First enable PAE, long mode needs it before CR0.PG = 1 is set.
|
||||||
|
@ -2425,6 +2437,12 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
|
||||||
bad = ctxt->ops->set_cr(ctxt, 4, cr4);
|
bad = ctxt->ops->set_cr(ctxt, 4, cr4);
|
||||||
if (bad)
|
if (bad)
|
||||||
return X86EMUL_UNHANDLEABLE;
|
return X86EMUL_UNHANDLEABLE;
|
||||||
|
if (pcid) {
|
||||||
|
bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
|
||||||
|
if (bad)
|
||||||
|
return X86EMUL_UNHANDLEABLE;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return X86EMUL_CONTINUE;
|
return X86EMUL_CONTINUE;
|
||||||
|
@ -2435,11 +2453,11 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
||||||
struct desc_struct desc;
|
struct desc_struct desc;
|
||||||
struct desc_ptr dt;
|
struct desc_ptr dt;
|
||||||
u16 selector;
|
u16 selector;
|
||||||
u32 val, cr0, cr4;
|
u32 val, cr0, cr3, cr4;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
|
cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
|
||||||
ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
|
cr3 = GET_SMSTATE(u32, smbase, 0x7ff8);
|
||||||
ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
|
ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
|
||||||
ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
|
ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
|
||||||
|
|
||||||
|
@ -2481,14 +2499,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
||||||
|
|
||||||
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
|
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
|
||||||
|
|
||||||
return rsm_enter_protected_mode(ctxt, cr0, cr4);
|
return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
||||||
{
|
{
|
||||||
struct desc_struct desc;
|
struct desc_struct desc;
|
||||||
struct desc_ptr dt;
|
struct desc_ptr dt;
|
||||||
u64 val, cr0, cr4;
|
u64 val, cr0, cr3, cr4;
|
||||||
u32 base3;
|
u32 base3;
|
||||||
u16 selector;
|
u16 selector;
|
||||||
int i, r;
|
int i, r;
|
||||||
|
@ -2505,7 +2523,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
||||||
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
|
ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
|
||||||
|
|
||||||
cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
|
cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
|
||||||
ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50));
|
cr3 = GET_SMSTATE(u64, smbase, 0x7f50);
|
||||||
cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
|
cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
|
||||||
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
|
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
|
||||||
val = GET_SMSTATE(u64, smbase, 0x7ed0);
|
val = GET_SMSTATE(u64, smbase, 0x7ed0);
|
||||||
|
@ -2533,7 +2551,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
|
||||||
dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
|
dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
|
||||||
ctxt->ops->set_gdt(ctxt, &dt);
|
ctxt->ops->set_gdt(ctxt, &dt);
|
||||||
|
|
||||||
r = rsm_enter_protected_mode(ctxt, cr0, cr4);
|
r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
|
||||||
if (r != X86EMUL_CONTINUE)
|
if (r != X86EMUL_CONTINUE)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
|
|
@ -3382,7 +3382,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
|
||||||
spin_lock(&vcpu->kvm->mmu_lock);
|
spin_lock(&vcpu->kvm->mmu_lock);
|
||||||
if(make_mmu_pages_available(vcpu) < 0) {
|
if(make_mmu_pages_available(vcpu) < 0) {
|
||||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||||
return 1;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
sp = kvm_mmu_get_page(vcpu, 0, 0,
|
sp = kvm_mmu_get_page(vcpu, 0, 0,
|
||||||
vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL);
|
vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL);
|
||||||
|
@ -3397,7 +3397,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
|
||||||
spin_lock(&vcpu->kvm->mmu_lock);
|
spin_lock(&vcpu->kvm->mmu_lock);
|
||||||
if (make_mmu_pages_available(vcpu) < 0) {
|
if (make_mmu_pages_available(vcpu) < 0) {
|
||||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||||
return 1;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
|
sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
|
||||||
i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
|
i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
|
||||||
|
@ -3437,7 +3437,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
||||||
spin_lock(&vcpu->kvm->mmu_lock);
|
spin_lock(&vcpu->kvm->mmu_lock);
|
||||||
if (make_mmu_pages_available(vcpu) < 0) {
|
if (make_mmu_pages_available(vcpu) < 0) {
|
||||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||||
return 1;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
|
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
|
||||||
vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL);
|
vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL);
|
||||||
|
@ -3474,7 +3474,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
|
||||||
spin_lock(&vcpu->kvm->mmu_lock);
|
spin_lock(&vcpu->kvm->mmu_lock);
|
||||||
if (make_mmu_pages_available(vcpu) < 0) {
|
if (make_mmu_pages_available(vcpu) < 0) {
|
||||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||||
return 1;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
|
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
|
||||||
0, ACC_ALL);
|
0, ACC_ALL);
|
||||||
|
|
|
@ -7359,7 +7359,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
kvm_rip_write(vcpu, regs->rip);
|
kvm_rip_write(vcpu, regs->rip);
|
||||||
kvm_set_rflags(vcpu, regs->rflags);
|
kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
|
||||||
|
|
||||||
vcpu->arch.exception.pending = false;
|
vcpu->arch.exception.pending = false;
|
||||||
|
|
||||||
|
|
|
@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
|
||||||
fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
|
fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
|
||||||
fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
|
fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
|
||||||
fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
|
fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
|
||||||
ff:
|
ff: UD0
|
||||||
EndTable
|
EndTable
|
||||||
|
|
||||||
Table: 3-byte opcode 1 (0x0f 0x38)
|
Table: 3-byte opcode 1 (0x0f 0x38)
|
||||||
|
@ -717,7 +717,7 @@ AVXcode: 2
|
||||||
7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
|
7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
|
||||||
7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
|
7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
|
||||||
80: INVEPT Gy,Mdq (66)
|
80: INVEPT Gy,Mdq (66)
|
||||||
81: INVPID Gy,Mdq (66)
|
81: INVVPID Gy,Mdq (66)
|
||||||
82: INVPCID Gy,Mdq (66)
|
82: INVPCID Gy,Mdq (66)
|
||||||
83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
|
83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
|
||||||
88: vexpandps/d Vpd,Wpd (66),(ev)
|
88: vexpandps/d Vpd,Wpd (66),(ev)
|
||||||
|
@ -970,6 +970,15 @@ GrpTable: Grp9
|
||||||
EndTable
|
EndTable
|
||||||
|
|
||||||
GrpTable: Grp10
|
GrpTable: Grp10
|
||||||
|
# all are UD1
|
||||||
|
0: UD1
|
||||||
|
1: UD1
|
||||||
|
2: UD1
|
||||||
|
3: UD1
|
||||||
|
4: UD1
|
||||||
|
5: UD1
|
||||||
|
6: UD1
|
||||||
|
7: UD1
|
||||||
EndTable
|
EndTable
|
||||||
|
|
||||||
# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
|
# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
|
||||||
|
|
|
@ -10,7 +10,7 @@ CFLAGS_REMOVE_mem_encrypt.o = -pg
|
||||||
endif
|
endif
|
||||||
|
|
||||||
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
|
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
|
||||||
pat.o pgtable.o physaddr.o setup_nx.o tlb.o
|
pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o
|
||||||
|
|
||||||
# Make sure __phys_addr has no stackprotector
|
# Make sure __phys_addr has no stackprotector
|
||||||
nostackp := $(call cc-option, -fno-stack-protector)
|
nostackp := $(call cc-option, -fno-stack-protector)
|
||||||
|
|
139
kernel/arch/x86/mm/cpu_entry_area.c
Normal file
139
kernel/arch/x86/mm/cpu_entry_area.c
Normal file
|
@ -0,0 +1,139 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
#include <linux/spinlock.h>
|
||||||
|
#include <linux/percpu.h>
|
||||||
|
|
||||||
|
#include <asm/cpu_entry_area.h>
|
||||||
|
#include <asm/pgtable.h>
|
||||||
|
#include <asm/fixmap.h>
|
||||||
|
#include <asm/desc.h>
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
|
||||||
|
[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct cpu_entry_area *get_cpu_entry_area(int cpu)
|
||||||
|
{
|
||||||
|
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
|
||||||
|
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
|
||||||
|
|
||||||
|
return (struct cpu_entry_area *) va;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(get_cpu_entry_area);
|
||||||
|
|
||||||
|
void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
|
||||||
|
{
|
||||||
|
unsigned long va = (unsigned long) cea_vaddr;
|
||||||
|
|
||||||
|
set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init
|
||||||
|
cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
|
||||||
|
{
|
||||||
|
for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
|
||||||
|
cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Setup the fixmap mappings only once per-processor */
|
||||||
|
static void __init setup_cpu_entry_area(int cpu)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
extern char _entry_trampoline[];
|
||||||
|
|
||||||
|
/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
|
||||||
|
pgprot_t gdt_prot = PAGE_KERNEL_RO;
|
||||||
|
pgprot_t tss_prot = PAGE_KERNEL_RO;
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* On native 32-bit systems, the GDT cannot be read-only because
|
||||||
|
* our double fault handler uses a task gate, and entering through
|
||||||
|
* a task gate needs to change an available TSS to busy. If the
|
||||||
|
* GDT is read-only, that will triple fault. The TSS cannot be
|
||||||
|
* read-only because the CPU writes to it on task switches.
|
||||||
|
*
|
||||||
|
* On Xen PV, the GDT must be read-only because the hypervisor
|
||||||
|
* requires it.
|
||||||
|
*/
|
||||||
|
pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
|
||||||
|
PAGE_KERNEL_RO : PAGE_KERNEL;
|
||||||
|
pgprot_t tss_prot = PAGE_KERNEL;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
|
||||||
|
gdt_prot);
|
||||||
|
|
||||||
|
cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
|
||||||
|
per_cpu_ptr(&entry_stack_storage, cpu), 1,
|
||||||
|
PAGE_KERNEL);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The Intel SDM says (Volume 3, 7.2.1):
|
||||||
|
*
|
||||||
|
* Avoid placing a page boundary in the part of the TSS that the
|
||||||
|
* processor reads during a task switch (the first 104 bytes). The
|
||||||
|
* processor may not correctly perform address translations if a
|
||||||
|
* boundary occurs in this area. During a task switch, the processor
|
||||||
|
* reads and writes into the first 104 bytes of each TSS (using
|
||||||
|
* contiguous physical addresses beginning with the physical address
|
||||||
|
* of the first byte of the TSS). So, after TSS access begins, if
|
||||||
|
* part of the 104 bytes is not physically contiguous, the processor
|
||||||
|
* will access incorrect information without generating a page-fault
|
||||||
|
* exception.
|
||||||
|
*
|
||||||
|
* There are also a lot of errata involving the TSS spanning a page
|
||||||
|
* boundary. Assert that we're not doing that.
|
||||||
|
*/
|
||||||
|
BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
|
||||||
|
offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
|
||||||
|
BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
|
||||||
|
cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
|
||||||
|
&per_cpu(cpu_tss_rw, cpu),
|
||||||
|
sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
|
||||||
|
BUILD_BUG_ON(sizeof(exception_stacks) !=
|
||||||
|
sizeof(((struct cpu_entry_area *)0)->exception_stacks));
|
||||||
|
cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
|
||||||
|
&per_cpu(exception_stacks, cpu),
|
||||||
|
sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
|
||||||
|
|
||||||
|
cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
|
||||||
|
__pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static __init void setup_cpu_entry_area_ptes(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
unsigned long start, end;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
|
||||||
|
BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
|
||||||
|
|
||||||
|
start = CPU_ENTRY_AREA_BASE;
|
||||||
|
end = start + CPU_ENTRY_AREA_MAP_SIZE;
|
||||||
|
|
||||||
|
/* Careful here: start + PMD_SIZE might wrap around */
|
||||||
|
for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
|
||||||
|
populate_extra_pte(start);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void __init setup_cpu_entry_areas(void)
|
||||||
|
{
|
||||||
|
unsigned int cpu;
|
||||||
|
|
||||||
|
setup_cpu_entry_area_ptes();
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu)
|
||||||
|
setup_cpu_entry_area(cpu);
|
||||||
|
}
|
|
@ -44,10 +44,12 @@ struct addr_marker {
|
||||||
unsigned long max_lines;
|
unsigned long max_lines;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* indices for address_markers; keep sync'd w/ address_markers below */
|
/* Address space markers hints */
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
|
||||||
enum address_markers_idx {
|
enum address_markers_idx {
|
||||||
USER_SPACE_NR = 0,
|
USER_SPACE_NR = 0,
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
KERNEL_SPACE_NR,
|
KERNEL_SPACE_NR,
|
||||||
LOW_KERNEL_NR,
|
LOW_KERNEL_NR,
|
||||||
VMALLOC_START_NR,
|
VMALLOC_START_NR,
|
||||||
|
@ -56,56 +58,74 @@ enum address_markers_idx {
|
||||||
KASAN_SHADOW_START_NR,
|
KASAN_SHADOW_START_NR,
|
||||||
KASAN_SHADOW_END_NR,
|
KASAN_SHADOW_END_NR,
|
||||||
#endif
|
#endif
|
||||||
|
CPU_ENTRY_AREA_NR,
|
||||||
#ifdef CONFIG_X86_ESPFIX64
|
#ifdef CONFIG_X86_ESPFIX64
|
||||||
ESPFIX_START_NR,
|
ESPFIX_START_NR,
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_EFI
|
||||||
|
EFI_END_NR,
|
||||||
#endif
|
#endif
|
||||||
HIGH_KERNEL_NR,
|
HIGH_KERNEL_NR,
|
||||||
MODULES_VADDR_NR,
|
MODULES_VADDR_NR,
|
||||||
MODULES_END_NR,
|
MODULES_END_NR,
|
||||||
#else
|
FIXADDR_START_NR,
|
||||||
|
END_OF_SPACE_NR,
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct addr_marker address_markers[] = {
|
||||||
|
[USER_SPACE_NR] = { 0, "User Space" },
|
||||||
|
[KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" },
|
||||||
|
[LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" },
|
||||||
|
[VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
|
||||||
|
[VMEMMAP_START_NR] = { 0UL, "Vmemmap" },
|
||||||
|
#ifdef CONFIG_KASAN
|
||||||
|
[KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" },
|
||||||
|
[KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" },
|
||||||
|
#endif
|
||||||
|
[CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
|
||||||
|
#ifdef CONFIG_X86_ESPFIX64
|
||||||
|
[ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_EFI
|
||||||
|
[EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" },
|
||||||
|
#endif
|
||||||
|
[HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" },
|
||||||
|
[MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" },
|
||||||
|
[MODULES_END_NR] = { MODULES_END, "End Modules" },
|
||||||
|
[FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" },
|
||||||
|
[END_OF_SPACE_NR] = { -1, NULL }
|
||||||
|
};
|
||||||
|
|
||||||
|
#else /* CONFIG_X86_64 */
|
||||||
|
|
||||||
|
enum address_markers_idx {
|
||||||
|
USER_SPACE_NR = 0,
|
||||||
KERNEL_SPACE_NR,
|
KERNEL_SPACE_NR,
|
||||||
VMALLOC_START_NR,
|
VMALLOC_START_NR,
|
||||||
VMALLOC_END_NR,
|
VMALLOC_END_NR,
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
PKMAP_BASE_NR,
|
PKMAP_BASE_NR,
|
||||||
#endif
|
#endif
|
||||||
|
CPU_ENTRY_AREA_NR,
|
||||||
FIXADDR_START_NR,
|
FIXADDR_START_NR,
|
||||||
#endif
|
END_OF_SPACE_NR,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Address space markers hints */
|
|
||||||
static struct addr_marker address_markers[] = {
|
static struct addr_marker address_markers[] = {
|
||||||
{ 0, "User Space" },
|
[USER_SPACE_NR] = { 0, "User Space" },
|
||||||
#ifdef CONFIG_X86_64
|
[KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" },
|
||||||
{ 0x8000000000000000UL, "Kernel Space" },
|
[VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
|
||||||
{ 0/* PAGE_OFFSET */, "Low Kernel Mapping" },
|
[VMALLOC_END_NR] = { 0UL, "vmalloc() End" },
|
||||||
{ 0/* VMALLOC_START */, "vmalloc() Area" },
|
|
||||||
{ 0/* VMEMMAP_START */, "Vmemmap" },
|
|
||||||
#ifdef CONFIG_KASAN
|
|
||||||
{ KASAN_SHADOW_START, "KASAN shadow" },
|
|
||||||
{ KASAN_SHADOW_END, "KASAN shadow end" },
|
|
||||||
#endif
|
|
||||||
# ifdef CONFIG_X86_ESPFIX64
|
|
||||||
{ ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
|
|
||||||
# endif
|
|
||||||
# ifdef CONFIG_EFI
|
|
||||||
{ EFI_VA_END, "EFI Runtime Services" },
|
|
||||||
# endif
|
|
||||||
{ __START_KERNEL_map, "High Kernel Mapping" },
|
|
||||||
{ MODULES_VADDR, "Modules" },
|
|
||||||
{ MODULES_END, "End Modules" },
|
|
||||||
#else
|
|
||||||
{ PAGE_OFFSET, "Kernel Mapping" },
|
|
||||||
{ 0/* VMALLOC_START */, "vmalloc() Area" },
|
|
||||||
{ 0/*VMALLOC_END*/, "vmalloc() End" },
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
{ 0/*PKMAP_BASE*/, "Persistent kmap() Area" },
|
[PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" },
|
||||||
#endif
|
#endif
|
||||||
{ 0/*FIXADDR_START*/, "Fixmap Area" },
|
[CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" },
|
||||||
#endif
|
[FIXADDR_START_NR] = { 0UL, "Fixmap area" },
|
||||||
{ -1, NULL } /* End of list */
|
[END_OF_SPACE_NR] = { -1, NULL }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#endif /* !CONFIG_X86_64 */
|
||||||
|
|
||||||
/* Multipliers for offsets within the PTEs */
|
/* Multipliers for offsets within the PTEs */
|
||||||
#define PTE_LEVEL_MULT (PAGE_SIZE)
|
#define PTE_LEVEL_MULT (PAGE_SIZE)
|
||||||
#define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
|
#define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
|
||||||
|
@ -140,7 +160,7 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
|
||||||
static const char * const level_name[] =
|
static const char * const level_name[] =
|
||||||
{ "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
|
{ "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
|
||||||
|
|
||||||
if (!pgprot_val(prot)) {
|
if (!(pr & _PAGE_PRESENT)) {
|
||||||
/* Not present */
|
/* Not present */
|
||||||
pt_dump_cont_printf(m, dmsg, " ");
|
pt_dump_cont_printf(m, dmsg, " ");
|
||||||
} else {
|
} else {
|
||||||
|
@ -525,8 +545,8 @@ static int __init pt_dump_init(void)
|
||||||
address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
|
address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
|
||||||
# endif
|
# endif
|
||||||
address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
|
address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
|
||||||
|
address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
__initcall(pt_dump_init);
|
__initcall(pt_dump_init);
|
||||||
|
|
|
@ -50,6 +50,7 @@
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/set_memory.h>
|
#include <asm/set_memory.h>
|
||||||
#include <asm/page_types.h>
|
#include <asm/page_types.h>
|
||||||
|
#include <asm/cpu_entry_area.h>
|
||||||
#include <asm/init.h>
|
#include <asm/init.h>
|
||||||
|
|
||||||
#include "mm_internal.h"
|
#include "mm_internal.h"
|
||||||
|
@ -766,6 +767,7 @@ void __init mem_init(void)
|
||||||
mem_init_print_info(NULL);
|
mem_init_print_info(NULL);
|
||||||
printk(KERN_INFO "virtual kernel memory layout:\n"
|
printk(KERN_INFO "virtual kernel memory layout:\n"
|
||||||
" fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
" fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||||
|
" cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
" pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
" pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||||
#endif
|
#endif
|
||||||
|
@ -777,6 +779,10 @@ void __init mem_init(void)
|
||||||
FIXADDR_START, FIXADDR_TOP,
|
FIXADDR_START, FIXADDR_TOP,
|
||||||
(FIXADDR_TOP - FIXADDR_START) >> 10,
|
(FIXADDR_TOP - FIXADDR_START) >> 10,
|
||||||
|
|
||||||
|
CPU_ENTRY_AREA_BASE,
|
||||||
|
CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE,
|
||||||
|
CPU_ENTRY_AREA_MAP_SIZE >> 10,
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
|
PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
|
||||||
(LAST_PKMAP*PAGE_SIZE) >> 10,
|
(LAST_PKMAP*PAGE_SIZE) >> 10,
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
#include <asm/cpu_entry_area.h>
|
||||||
|
|
||||||
extern struct range pfn_mapped[E820_MAX_ENTRIES];
|
extern struct range pfn_mapped[E820_MAX_ENTRIES];
|
||||||
|
|
||||||
|
@ -322,31 +323,33 @@ void __init kasan_init(void)
|
||||||
map_range(&pfn_mapped[i]);
|
map_range(&pfn_mapped[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
|
||||||
|
shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
|
||||||
|
shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
|
||||||
|
PAGE_SIZE);
|
||||||
|
|
||||||
|
shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
|
||||||
|
CPU_ENTRY_AREA_MAP_SIZE);
|
||||||
|
shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
|
||||||
|
shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
|
||||||
|
PAGE_SIZE);
|
||||||
|
|
||||||
kasan_populate_zero_shadow(
|
kasan_populate_zero_shadow(
|
||||||
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
|
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
|
||||||
|
shadow_cpu_entry_begin);
|
||||||
|
|
||||||
|
kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
|
||||||
|
(unsigned long)shadow_cpu_entry_end, 0);
|
||||||
|
|
||||||
|
kasan_populate_zero_shadow(shadow_cpu_entry_end,
|
||||||
kasan_mem_to_shadow((void *)__START_KERNEL_map));
|
kasan_mem_to_shadow((void *)__START_KERNEL_map));
|
||||||
|
|
||||||
kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
|
kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
|
||||||
(unsigned long)kasan_mem_to_shadow(_end),
|
(unsigned long)kasan_mem_to_shadow(_end),
|
||||||
early_pfn_to_nid(__pa(_stext)));
|
early_pfn_to_nid(__pa(_stext)));
|
||||||
|
|
||||||
shadow_cpu_entry_begin = (void *)__fix_to_virt(FIX_CPU_ENTRY_AREA_BOTTOM);
|
|
||||||
shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
|
|
||||||
shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
|
|
||||||
PAGE_SIZE);
|
|
||||||
|
|
||||||
shadow_cpu_entry_end = (void *)(__fix_to_virt(FIX_CPU_ENTRY_AREA_TOP) + PAGE_SIZE);
|
|
||||||
shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
|
|
||||||
shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
|
|
||||||
PAGE_SIZE);
|
|
||||||
|
|
||||||
kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
|
kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
|
||||||
shadow_cpu_entry_begin);
|
(void *)KASAN_SHADOW_END);
|
||||||
|
|
||||||
kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
|
|
||||||
(unsigned long)shadow_cpu_entry_end, 0);
|
|
||||||
|
|
||||||
kasan_populate_zero_shadow(shadow_cpu_entry_end, (void *)KASAN_SHADOW_END);
|
|
||||||
|
|
||||||
load_cr3(init_top_pgt);
|
load_cr3(init_top_pgt);
|
||||||
__flush_tlb_all();
|
__flush_tlb_all();
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
|
||||||
|
#include <asm/cpu_entry_area.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/fixmap.h>
|
#include <asm/fixmap.h>
|
||||||
|
|
|
@ -128,7 +128,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
* isn't free.
|
* isn't free.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_DEBUG_VM
|
#ifdef CONFIG_DEBUG_VM
|
||||||
if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) {
|
if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
|
||||||
/*
|
/*
|
||||||
* If we were to BUG here, we'd be very likely to kill
|
* If we were to BUG here, we'd be very likely to kill
|
||||||
* the system so hard that we don't see the call trace.
|
* the system so hard that we don't see the call trace.
|
||||||
|
@ -195,7 +195,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
if (need_flush) {
|
if (need_flush) {
|
||||||
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
|
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
|
||||||
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
|
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
|
||||||
write_cr3(build_cr3(next, new_asid));
|
write_cr3(build_cr3(next->pgd, new_asid));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NB: This gets called via leave_mm() in the idle path
|
* NB: This gets called via leave_mm() in the idle path
|
||||||
|
@ -208,7 +208,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||||
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
||||||
} else {
|
} else {
|
||||||
/* The new ASID is already up to date. */
|
/* The new ASID is already up to date. */
|
||||||
write_cr3(build_cr3_noflush(next, new_asid));
|
write_cr3(build_cr3_noflush(next->pgd, new_asid));
|
||||||
|
|
||||||
/* See above wrt _rcuidle. */
|
/* See above wrt _rcuidle. */
|
||||||
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
|
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
|
||||||
|
@ -288,7 +288,7 @@ void initialize_tlbstate_and_flush(void)
|
||||||
!(cr4_read_shadow() & X86_CR4_PCIDE));
|
!(cr4_read_shadow() & X86_CR4_PCIDE));
|
||||||
|
|
||||||
/* Force ASID 0 and force a TLB flush. */
|
/* Force ASID 0 and force a TLB flush. */
|
||||||
write_cr3(build_cr3(mm, 0));
|
write_cr3(build_cr3(mm->pgd, 0));
|
||||||
|
|
||||||
/* Reinitialize tlbstate. */
|
/* Reinitialize tlbstate. */
|
||||||
this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
|
this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
|
||||||
|
@ -551,7 +551,7 @@ static void do_kernel_range_flush(void *info)
|
||||||
|
|
||||||
/* flush range by one by one 'invlpg' */
|
/* flush range by one by one 'invlpg' */
|
||||||
for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
|
for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
|
||||||
__flush_tlb_single(addr);
|
__flush_tlb_one(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||||
|
|
|
@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
|
||||||
local_flush_tlb();
|
local_flush_tlb();
|
||||||
stat->d_alltlb++;
|
stat->d_alltlb++;
|
||||||
} else {
|
} else {
|
||||||
__flush_tlb_one(msg->address);
|
__flush_tlb_single(msg->address);
|
||||||
stat->d_onetlb++;
|
stat->d_onetlb++;
|
||||||
}
|
}
|
||||||
stat->d_requestee++;
|
stat->d_requestee++;
|
||||||
|
|
|
@ -2261,7 +2261,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
|
||||||
|
|
||||||
switch (idx) {
|
switch (idx) {
|
||||||
case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
|
case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
|
||||||
case FIX_RO_IDT:
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
case FIX_WP_TEST:
|
case FIX_WP_TEST:
|
||||||
# ifdef CONFIG_HIGHMEM
|
# ifdef CONFIG_HIGHMEM
|
||||||
|
@ -2272,7 +2271,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
|
||||||
#endif
|
#endif
|
||||||
case FIX_TEXT_POKE0:
|
case FIX_TEXT_POKE0:
|
||||||
case FIX_TEXT_POKE1:
|
case FIX_TEXT_POKE1:
|
||||||
case FIX_CPU_ENTRY_AREA_TOP ... FIX_CPU_ENTRY_AREA_BOTTOM:
|
|
||||||
/* All local page mappings */
|
/* All local page mappings */
|
||||||
pte = pfn_pte(phys, prot);
|
pte = pfn_pte(phys, prot);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -599,6 +599,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
|
||||||
bio->bi_disk = bio_src->bi_disk;
|
bio->bi_disk = bio_src->bi_disk;
|
||||||
bio->bi_partno = bio_src->bi_partno;
|
bio->bi_partno = bio_src->bi_partno;
|
||||||
bio_set_flag(bio, BIO_CLONED);
|
bio_set_flag(bio, BIO_CLONED);
|
||||||
|
if (bio_flagged(bio_src, BIO_THROTTLED))
|
||||||
|
bio_set_flag(bio, BIO_THROTTLED);
|
||||||
bio->bi_opf = bio_src->bi_opf;
|
bio->bi_opf = bio_src->bi_opf;
|
||||||
bio->bi_write_hint = bio_src->bi_write_hint;
|
bio->bi_write_hint = bio_src->bi_write_hint;
|
||||||
bio->bi_iter = bio_src->bi_iter;
|
bio->bi_iter = bio_src->bi_iter;
|
||||||
|
|
|
@ -2223,13 +2223,7 @@ again:
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
out:
|
out:
|
||||||
/*
|
bio_set_flag(bio, BIO_THROTTLED);
|
||||||
* As multiple blk-throtls may stack in the same issue path, we
|
|
||||||
* don't want bios to leave with the flag set. Clear the flag if
|
|
||||||
* being issued.
|
|
||||||
*/
|
|
||||||
if (!throttled)
|
|
||||||
bio_clear_flag(bio, BIO_THROTTLED);
|
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
||||||
if (throttled || !td->track_bio_latency)
|
if (throttled || !td->track_bio_latency)
|
||||||
|
|
|
@ -1165,12 +1165,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
|
||||||
if (!af_alg_readable(sk))
|
if (!af_alg_readable(sk))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (!ctx->used) {
|
|
||||||
err = af_alg_wait_for_data(sk, flags);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
seglen = min_t(size_t, (maxsize - len),
|
seglen = min_t(size_t, (maxsize - len),
|
||||||
msg_data_left(msg));
|
msg_data_left(msg));
|
||||||
|
|
||||||
|
|
|
@ -111,6 +111,12 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||||
size_t usedpages = 0; /* [in] RX bufs to be used from user */
|
size_t usedpages = 0; /* [in] RX bufs to be used from user */
|
||||||
size_t processed = 0; /* [in] TX bufs to be consumed */
|
size_t processed = 0; /* [in] TX bufs to be consumed */
|
||||||
|
|
||||||
|
if (!ctx->used) {
|
||||||
|
err = af_alg_wait_for_data(sk, flags);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Data length provided by caller via sendmsg/sendpage that has not
|
* Data length provided by caller via sendmsg/sendpage that has not
|
||||||
* yet been processed.
|
* yet been processed.
|
||||||
|
@ -285,6 +291,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||||
/* AIO operation */
|
/* AIO operation */
|
||||||
sock_hold(sk);
|
sock_hold(sk);
|
||||||
areq->iocb = msg->msg_iocb;
|
areq->iocb = msg->msg_iocb;
|
||||||
|
|
||||||
|
/* Remember output size that will be generated. */
|
||||||
|
areq->outlen = outlen;
|
||||||
|
|
||||||
aead_request_set_callback(&areq->cra_u.aead_req,
|
aead_request_set_callback(&areq->cra_u.aead_req,
|
||||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||||
af_alg_async_cb, areq);
|
af_alg_async_cb, areq);
|
||||||
|
@ -292,12 +302,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||||
crypto_aead_decrypt(&areq->cra_u.aead_req);
|
crypto_aead_decrypt(&areq->cra_u.aead_req);
|
||||||
|
|
||||||
/* AIO operation in progress */
|
/* AIO operation in progress */
|
||||||
if (err == -EINPROGRESS || err == -EBUSY) {
|
if (err == -EINPROGRESS || err == -EBUSY)
|
||||||
/* Remember output size that will be generated. */
|
|
||||||
areq->outlen = outlen;
|
|
||||||
|
|
||||||
return -EIOCBQUEUED;
|
return -EIOCBQUEUED;
|
||||||
}
|
|
||||||
|
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -72,6 +72,12 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||||
int err = 0;
|
int err = 0;
|
||||||
size_t len = 0;
|
size_t len = 0;
|
||||||
|
|
||||||
|
if (!ctx->used) {
|
||||||
|
err = af_alg_wait_for_data(sk, flags);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/* Allocate cipher request for current operation. */
|
/* Allocate cipher request for current operation. */
|
||||||
areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
|
areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
|
||||||
crypto_skcipher_reqsize(tfm));
|
crypto_skcipher_reqsize(tfm));
|
||||||
|
@ -119,6 +125,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||||
/* AIO operation */
|
/* AIO operation */
|
||||||
sock_hold(sk);
|
sock_hold(sk);
|
||||||
areq->iocb = msg->msg_iocb;
|
areq->iocb = msg->msg_iocb;
|
||||||
|
|
||||||
|
/* Remember output size that will be generated. */
|
||||||
|
areq->outlen = len;
|
||||||
|
|
||||||
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
|
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP,
|
CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||||
af_alg_async_cb, areq);
|
af_alg_async_cb, areq);
|
||||||
|
@ -127,12 +137,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||||
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
|
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
|
||||||
|
|
||||||
/* AIO operation in progress */
|
/* AIO operation in progress */
|
||||||
if (err == -EINPROGRESS || err == -EBUSY) {
|
if (err == -EINPROGRESS || err == -EBUSY)
|
||||||
/* Remember output size that will be generated. */
|
|
||||||
areq->outlen = len;
|
|
||||||
|
|
||||||
return -EIOCBQUEUED;
|
return -EIOCBQUEUED;
|
||||||
}
|
|
||||||
|
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue,
|
||||||
pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
|
pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
|
||||||
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
|
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
|
||||||
INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
|
INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
|
||||||
|
spin_lock_init(&cpu_queue->q_lock);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
|
||||||
int cpu, err;
|
int cpu, err;
|
||||||
struct mcryptd_cpu_queue *cpu_queue;
|
struct mcryptd_cpu_queue *cpu_queue;
|
||||||
|
|
||||||
cpu = get_cpu();
|
cpu_queue = raw_cpu_ptr(queue->cpu_queue);
|
||||||
cpu_queue = this_cpu_ptr(queue->cpu_queue);
|
spin_lock(&cpu_queue->q_lock);
|
||||||
rctx->tag.cpu = cpu;
|
cpu = smp_processor_id();
|
||||||
|
rctx->tag.cpu = smp_processor_id();
|
||||||
|
|
||||||
err = crypto_enqueue_request(&cpu_queue->queue, request);
|
err = crypto_enqueue_request(&cpu_queue->queue, request);
|
||||||
pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
|
pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
|
||||||
cpu, cpu_queue, request);
|
cpu, cpu_queue, request);
|
||||||
|
spin_unlock(&cpu_queue->q_lock);
|
||||||
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
|
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
|
||||||
put_cpu();
|
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct work_struct *work)
|
||||||
cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
|
cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
|
||||||
i = 0;
|
i = 0;
|
||||||
while (i < MCRYPTD_BATCH || single_task_running()) {
|
while (i < MCRYPTD_BATCH || single_task_running()) {
|
||||||
/*
|
|
||||||
* preempt_disable/enable is used to prevent
|
spin_lock_bh(&cpu_queue->q_lock);
|
||||||
* being preempted by mcryptd_enqueue_request()
|
|
||||||
*/
|
|
||||||
local_bh_disable();
|
|
||||||
preempt_disable();
|
|
||||||
backlog = crypto_get_backlog(&cpu_queue->queue);
|
backlog = crypto_get_backlog(&cpu_queue->queue);
|
||||||
req = crypto_dequeue_request(&cpu_queue->queue);
|
req = crypto_dequeue_request(&cpu_queue->queue);
|
||||||
preempt_enable();
|
spin_unlock_bh(&cpu_queue->q_lock);
|
||||||
local_bh_enable();
|
|
||||||
|
|
||||||
if (!req) {
|
if (!req) {
|
||||||
mcryptd_opportunistic_flush();
|
mcryptd_opportunistic_flush();
|
||||||
|
@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct work_struct *work)
|
||||||
++i;
|
++i;
|
||||||
}
|
}
|
||||||
if (cpu_queue->queue.qlen)
|
if (cpu_queue->queue.qlen)
|
||||||
queue_work(kcrypto_wq, &cpu_queue->work);
|
queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mcryptd_flusher(struct work_struct *__work)
|
void mcryptd_flusher(struct work_struct *__work)
|
||||||
|
|
|
@ -449,6 +449,8 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
|
||||||
|
|
||||||
walk->total = req->cryptlen;
|
walk->total = req->cryptlen;
|
||||||
walk->nbytes = 0;
|
walk->nbytes = 0;
|
||||||
|
walk->iv = req->iv;
|
||||||
|
walk->oiv = req->iv;
|
||||||
|
|
||||||
if (unlikely(!walk->total))
|
if (unlikely(!walk->total))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -456,9 +458,6 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
|
||||||
scatterwalk_start(&walk->in, req->src);
|
scatterwalk_start(&walk->in, req->src);
|
||||||
scatterwalk_start(&walk->out, req->dst);
|
scatterwalk_start(&walk->out, req->dst);
|
||||||
|
|
||||||
walk->iv = req->iv;
|
|
||||||
walk->oiv = req->iv;
|
|
||||||
|
|
||||||
walk->flags &= ~SKCIPHER_WALK_SLEEP;
|
walk->flags &= ~SKCIPHER_WALK_SLEEP;
|
||||||
walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
|
walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
|
||||||
SKCIPHER_WALK_SLEEP : 0;
|
SKCIPHER_WALK_SLEEP : 0;
|
||||||
|
@ -510,6 +509,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
walk->nbytes = 0;
|
walk->nbytes = 0;
|
||||||
|
walk->iv = req->iv;
|
||||||
|
walk->oiv = req->iv;
|
||||||
|
|
||||||
if (unlikely(!walk->total))
|
if (unlikely(!walk->total))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -525,9 +526,6 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
|
||||||
scatterwalk_done(&walk->in, 0, walk->total);
|
scatterwalk_done(&walk->in, 0, walk->total);
|
||||||
scatterwalk_done(&walk->out, 0, walk->total);
|
scatterwalk_done(&walk->out, 0, walk->total);
|
||||||
|
|
||||||
walk->iv = req->iv;
|
|
||||||
walk->oiv = req->iv;
|
|
||||||
|
|
||||||
if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
|
if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
|
||||||
walk->flags |= SKCIPHER_WALK_SLEEP;
|
walk->flags |= SKCIPHER_WALK_SLEEP;
|
||||||
else
|
else
|
||||||
|
|
|
@ -1007,7 +1007,7 @@ skip:
|
||||||
/* The record may be cleared by others, try read next record */
|
/* The record may be cleared by others, try read next record */
|
||||||
if (len == -ENOENT)
|
if (len == -ENOENT)
|
||||||
goto skip;
|
goto skip;
|
||||||
else if (len < sizeof(*rcd)) {
|
else if (len < 0 || len < sizeof(*rcd)) {
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1457,6 +1457,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
|
||||||
dev_name(&adev_dimm->dev));
|
dev_name(&adev_dimm->dev));
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* Record nfit_mem for the notification path to track back to
|
||||||
|
* the nfit sysfs attributes for this dimm device object.
|
||||||
|
*/
|
||||||
|
dev_set_drvdata(&adev_dimm->dev, nfit_mem);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Until standardization materializes we need to consider 4
|
* Until standardization materializes we need to consider 4
|
||||||
|
@ -1516,9 +1521,11 @@ static void shutdown_dimm_notify(void *data)
|
||||||
sysfs_put(nfit_mem->flags_attr);
|
sysfs_put(nfit_mem->flags_attr);
|
||||||
nfit_mem->flags_attr = NULL;
|
nfit_mem->flags_attr = NULL;
|
||||||
}
|
}
|
||||||
if (adev_dimm)
|
if (adev_dimm) {
|
||||||
acpi_remove_notify_handler(adev_dimm->handle,
|
acpi_remove_notify_handler(adev_dimm->handle,
|
||||||
ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
|
ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
|
||||||
|
dev_set_drvdata(&adev_dimm->dev, NULL);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
mutex_unlock(&acpi_desc->init_mutex);
|
mutex_unlock(&acpi_desc->init_mutex);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3469,7 +3469,6 @@ static int add_smi(struct smi_info *new_smi)
|
||||||
ipmi_addr_src_to_str(new_smi->addr_source),
|
ipmi_addr_src_to_str(new_smi->addr_source),
|
||||||
si_to_str[new_smi->si_type]);
|
si_to_str[new_smi->si_type]);
|
||||||
rv = -EBUSY;
|
rv = -EBUSY;
|
||||||
kfree(new_smi);
|
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
|
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
#include <linux/clk-provider.h>
|
#include <linux/clk-provider.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/of_device.h>
|
#include <linux/of_device.h>
|
||||||
|
@ -83,9 +84,20 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int sun9i_mmc_reset_reset(struct reset_controller_dev *rcdev,
|
||||||
|
unsigned long id)
|
||||||
|
{
|
||||||
|
sun9i_mmc_reset_assert(rcdev, id);
|
||||||
|
udelay(10);
|
||||||
|
sun9i_mmc_reset_deassert(rcdev, id);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct reset_control_ops sun9i_mmc_reset_ops = {
|
static const struct reset_control_ops sun9i_mmc_reset_ops = {
|
||||||
.assert = sun9i_mmc_reset_assert,
|
.assert = sun9i_mmc_reset_assert,
|
||||||
.deassert = sun9i_mmc_reset_deassert,
|
.deassert = sun9i_mmc_reset_deassert,
|
||||||
|
.reset = sun9i_mmc_reset_reset,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
|
static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
|
||||||
|
|
|
@ -325,17 +325,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
|
||||||
* must wait for all rendering to complete to the object (as unbinding
|
* must wait for all rendering to complete to the object (as unbinding
|
||||||
* must anyway), and retire the requests.
|
* must anyway), and retire the requests.
|
||||||
*/
|
*/
|
||||||
ret = i915_gem_object_wait(obj,
|
ret = i915_gem_object_set_to_cpu_domain(obj, false);
|
||||||
I915_WAIT_INTERRUPTIBLE |
|
|
||||||
I915_WAIT_LOCKED |
|
|
||||||
I915_WAIT_ALL,
|
|
||||||
MAX_SCHEDULE_TIMEOUT,
|
|
||||||
NULL);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
i915_gem_retire_requests(to_i915(obj->base.dev));
|
|
||||||
|
|
||||||
while ((vma = list_first_entry_or_null(&obj->vma_list,
|
while ((vma = list_first_entry_or_null(&obj->vma_list,
|
||||||
struct i915_vma,
|
struct i915_vma,
|
||||||
obj_link))) {
|
obj_link))) {
|
||||||
|
|
|
@ -567,12 +567,12 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
|
||||||
if (IS_ERR(tcon->crtc)) {
|
if (IS_ERR(tcon->crtc)) {
|
||||||
dev_err(dev, "Couldn't create our CRTC\n");
|
dev_err(dev, "Couldn't create our CRTC\n");
|
||||||
ret = PTR_ERR(tcon->crtc);
|
ret = PTR_ERR(tcon->crtc);
|
||||||
goto err_free_clocks;
|
goto err_free_dotclock;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = sun4i_rgb_init(drm, tcon);
|
ret = sun4i_rgb_init(drm, tcon);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err_free_clocks;
|
goto err_free_dotclock;
|
||||||
|
|
||||||
list_add_tail(&tcon->list, &drv->tcon_list);
|
list_add_tail(&tcon->list, &drv->tcon_list);
|
||||||
|
|
||||||
|
|
|
@ -667,6 +667,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
|
||||||
sizeof(struct ec_response_get_protocol_info);
|
sizeof(struct ec_response_get_protocol_info);
|
||||||
ec_dev->dout_size = sizeof(struct ec_host_request);
|
ec_dev->dout_size = sizeof(struct ec_host_request);
|
||||||
|
|
||||||
|
ec_spi->last_transfer_ns = ktime_get_ns();
|
||||||
|
|
||||||
err = cros_ec_register(ec_dev);
|
err = cros_ec_register(ec_dev);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|
|
@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void)
|
||||||
EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
|
EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
|
||||||
|
|
||||||
static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
|
static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
|
||||||
struct device_node *node)
|
struct device_node *parent)
|
||||||
{
|
{
|
||||||
|
struct device_node *node;
|
||||||
|
|
||||||
if (pdata && pdata->codec)
|
if (pdata && pdata->codec)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (of_find_node_by_name(node, "codec"))
|
node = of_get_child_by_name(parent, "codec");
|
||||||
|
if (node) {
|
||||||
|
of_node_put(node);
|
||||||
return true;
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static bool twl6040_has_vibra(struct device_node *node)
|
static bool twl6040_has_vibra(struct device_node *parent)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_OF
|
struct device_node *node;
|
||||||
if (of_find_node_by_name(node, "vibra"))
|
|
||||||
|
node = of_get_child_by_name(parent, "vibra");
|
||||||
|
if (node) {
|
||||||
|
of_node_put(node);
|
||||||
return true;
|
return true;
|
||||||
#endif
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1214,6 +1214,10 @@ static void mvneta_port_disable(struct mvneta_port *pp)
|
||||||
val &= ~MVNETA_GMAC0_PORT_ENABLE;
|
val &= ~MVNETA_GMAC0_PORT_ENABLE;
|
||||||
mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
|
mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
|
||||||
|
|
||||||
|
pp->link = 0;
|
||||||
|
pp->duplex = -1;
|
||||||
|
pp->speed = 0;
|
||||||
|
|
||||||
udelay(200);
|
udelay(200);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1958,9 +1962,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
|
||||||
|
|
||||||
if (!mvneta_rxq_desc_is_first_last(rx_status) ||
|
if (!mvneta_rxq_desc_is_first_last(rx_status) ||
|
||||||
(rx_status & MVNETA_RXD_ERR_SUMMARY)) {
|
(rx_status & MVNETA_RXD_ERR_SUMMARY)) {
|
||||||
|
mvneta_rx_error(pp, rx_desc);
|
||||||
err_drop_frame:
|
err_drop_frame:
|
||||||
dev->stats.rx_errors++;
|
dev->stats.rx_errors++;
|
||||||
mvneta_rx_error(pp, rx_desc);
|
|
||||||
/* leave the descriptor untouched */
|
/* leave the descriptor untouched */
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -3011,7 +3015,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
|
||||||
{
|
{
|
||||||
int queue;
|
int queue;
|
||||||
|
|
||||||
for (queue = 0; queue < txq_number; queue++)
|
for (queue = 0; queue < rxq_number; queue++)
|
||||||
mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
|
mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -210,12 +210,12 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int btt_log_read_pair(struct arena_info *arena, u32 lane,
|
static int btt_log_group_read(struct arena_info *arena, u32 lane,
|
||||||
struct log_entry *ent)
|
struct log_group *log)
|
||||||
{
|
{
|
||||||
return arena_read_bytes(arena,
|
return arena_read_bytes(arena,
|
||||||
arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
|
arena->logoff + (lane * LOG_GRP_SIZE), log,
|
||||||
2 * LOG_ENT_SIZE, 0);
|
LOG_GRP_SIZE, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dentry *debugfs_root;
|
static struct dentry *debugfs_root;
|
||||||
|
@ -255,6 +255,8 @@ static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
|
||||||
debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
|
debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
|
||||||
debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
|
debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
|
||||||
debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
|
debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
|
||||||
|
debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
|
||||||
|
debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void btt_debugfs_init(struct btt *btt)
|
static void btt_debugfs_init(struct btt *btt)
|
||||||
|
@ -273,6 +275,11 @@ static void btt_debugfs_init(struct btt *btt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u32 log_seq(struct log_group *log, int log_idx)
|
||||||
|
{
|
||||||
|
return le32_to_cpu(log->ent[log_idx].seq);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function accepts two log entries, and uses the
|
* This function accepts two log entries, and uses the
|
||||||
* sequence number to find the 'older' entry.
|
* sequence number to find the 'older' entry.
|
||||||
|
@ -282,8 +289,10 @@ static void btt_debugfs_init(struct btt *btt)
|
||||||
*
|
*
|
||||||
* TODO The logic feels a bit kludge-y. make it better..
|
* TODO The logic feels a bit kludge-y. make it better..
|
||||||
*/
|
*/
|
||||||
static int btt_log_get_old(struct log_entry *ent)
|
static int btt_log_get_old(struct arena_info *a, struct log_group *log)
|
||||||
{
|
{
|
||||||
|
int idx0 = a->log_index[0];
|
||||||
|
int idx1 = a->log_index[1];
|
||||||
int old;
|
int old;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -291,23 +300,23 @@ static int btt_log_get_old(struct log_entry *ent)
|
||||||
* the next time, the following logic works out to put this
|
* the next time, the following logic works out to put this
|
||||||
* (next) entry into [1]
|
* (next) entry into [1]
|
||||||
*/
|
*/
|
||||||
if (ent[0].seq == 0) {
|
if (log_seq(log, idx0) == 0) {
|
||||||
ent[0].seq = cpu_to_le32(1);
|
log->ent[idx0].seq = cpu_to_le32(1);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ent[0].seq == ent[1].seq)
|
if (log_seq(log, idx0) == log_seq(log, idx1))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5)
|
if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) {
|
if (log_seq(log, idx0) < log_seq(log, idx1)) {
|
||||||
if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1)
|
if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
|
||||||
old = 0;
|
old = 0;
|
||||||
else
|
else
|
||||||
old = 1;
|
old = 1;
|
||||||
} else {
|
} else {
|
||||||
if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1)
|
if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
|
||||||
old = 1;
|
old = 1;
|
||||||
else
|
else
|
||||||
old = 0;
|
old = 0;
|
||||||
|
@ -327,17 +336,18 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
int old_ent, ret_ent;
|
int old_ent, ret_ent;
|
||||||
struct log_entry log[2];
|
struct log_group log;
|
||||||
|
|
||||||
ret = btt_log_read_pair(arena, lane, log);
|
ret = btt_log_group_read(arena, lane, &log);
|
||||||
if (ret)
|
if (ret)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
old_ent = btt_log_get_old(log);
|
old_ent = btt_log_get_old(arena, &log);
|
||||||
if (old_ent < 0 || old_ent > 1) {
|
if (old_ent < 0 || old_ent > 1) {
|
||||||
dev_err(to_dev(arena),
|
dev_err(to_dev(arena),
|
||||||
"log corruption (%d): lane %d seq [%d, %d]\n",
|
"log corruption (%d): lane %d seq [%d, %d]\n",
|
||||||
old_ent, lane, log[0].seq, log[1].seq);
|
old_ent, lane, log.ent[arena->log_index[0]].seq,
|
||||||
|
log.ent[arena->log_index[1]].seq);
|
||||||
/* TODO set error state? */
|
/* TODO set error state? */
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
@ -345,7 +355,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
|
||||||
ret_ent = (old_flag ? old_ent : (1 - old_ent));
|
ret_ent = (old_flag ? old_ent : (1 - old_ent));
|
||||||
|
|
||||||
if (ent != NULL)
|
if (ent != NULL)
|
||||||
memcpy(ent, &log[ret_ent], LOG_ENT_SIZE);
|
memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
|
||||||
|
|
||||||
return ret_ent;
|
return ret_ent;
|
||||||
}
|
}
|
||||||
|
@ -359,17 +369,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane,
|
||||||
u32 sub, struct log_entry *ent, unsigned long flags)
|
u32 sub, struct log_entry *ent, unsigned long flags)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
/*
|
u32 group_slot = arena->log_index[sub];
|
||||||
* Ignore the padding in log_entry for calculating log_half.
|
unsigned int log_half = LOG_ENT_SIZE / 2;
|
||||||
* The entry is 'committed' when we write the sequence number,
|
|
||||||
* and we want to ensure that that is the last thing written.
|
|
||||||
* We don't bother writing the padding as that would be extra
|
|
||||||
* media wear and write amplification
|
|
||||||
*/
|
|
||||||
unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
|
|
||||||
u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
|
|
||||||
void *src = ent;
|
void *src = ent;
|
||||||
|
u64 ns_off;
|
||||||
|
|
||||||
|
ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
|
||||||
|
(group_slot * LOG_ENT_SIZE);
|
||||||
/* split the 16B write into atomic, durable halves */
|
/* split the 16B write into atomic, durable halves */
|
||||||
ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
|
ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -452,7 +458,7 @@ static int btt_log_init(struct arena_info *arena)
|
||||||
{
|
{
|
||||||
size_t logsize = arena->info2off - arena->logoff;
|
size_t logsize = arena->info2off - arena->logoff;
|
||||||
size_t chunk_size = SZ_4K, offset = 0;
|
size_t chunk_size = SZ_4K, offset = 0;
|
||||||
struct log_entry log;
|
struct log_entry ent;
|
||||||
void *zerobuf;
|
void *zerobuf;
|
||||||
int ret;
|
int ret;
|
||||||
u32 i;
|
u32 i;
|
||||||
|
@ -484,11 +490,11 @@ static int btt_log_init(struct arena_info *arena)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < arena->nfree; i++) {
|
for (i = 0; i < arena->nfree; i++) {
|
||||||
log.lba = cpu_to_le32(i);
|
ent.lba = cpu_to_le32(i);
|
||||||
log.old_map = cpu_to_le32(arena->external_nlba + i);
|
ent.old_map = cpu_to_le32(arena->external_nlba + i);
|
||||||
log.new_map = cpu_to_le32(arena->external_nlba + i);
|
ent.new_map = cpu_to_le32(arena->external_nlba + i);
|
||||||
log.seq = cpu_to_le32(LOG_SEQ_INIT);
|
ent.seq = cpu_to_le32(LOG_SEQ_INIT);
|
||||||
ret = __btt_log_write(arena, i, 0, &log, 0);
|
ret = __btt_log_write(arena, i, 0, &ent, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free;
|
goto free;
|
||||||
}
|
}
|
||||||
|
@ -593,6 +599,123 @@ static int btt_freelist_init(struct arena_info *arena)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool ent_is_padding(struct log_entry *ent)
|
||||||
|
{
|
||||||
|
return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
|
||||||
|
&& (ent->seq == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Detecting valid log indices: We read a log group (see the comments in btt.h
|
||||||
|
* for a description of a 'log_group' and its 'slots'), and iterate over its
|
||||||
|
* four slots. We expect that a padding slot will be all-zeroes, and use this
|
||||||
|
* to detect a padding slot vs. an actual entry.
|
||||||
|
*
|
||||||
|
* If a log_group is in the initial state, i.e. hasn't been used since the
|
||||||
|
* creation of this BTT layout, it will have three of the four slots with
|
||||||
|
* zeroes. We skip over these log_groups for the detection of log_index. If
|
||||||
|
* all log_groups are in the initial state (i.e. the BTT has never been
|
||||||
|
* written to), it is safe to assume the 'new format' of log entries in slots
|
||||||
|
* (0, 1).
|
||||||
|
*/
|
||||||
|
static int log_set_indices(struct arena_info *arena)
|
||||||
|
{
|
||||||
|
bool idx_set = false, initial_state = true;
|
||||||
|
int ret, log_index[2] = {-1, -1};
|
||||||
|
u32 i, j, next_idx = 0;
|
||||||
|
struct log_group log;
|
||||||
|
u32 pad_count = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < arena->nfree; i++) {
|
||||||
|
ret = btt_log_group_read(arena, i, &log);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
for (j = 0; j < 4; j++) {
|
||||||
|
if (!idx_set) {
|
||||||
|
if (ent_is_padding(&log.ent[j])) {
|
||||||
|
pad_count++;
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
/* Skip if index has been recorded */
|
||||||
|
if ((next_idx == 1) &&
|
||||||
|
(j == log_index[0]))
|
||||||
|
continue;
|
||||||
|
/* valid entry, record index */
|
||||||
|
log_index[next_idx] = j;
|
||||||
|
next_idx++;
|
||||||
|
}
|
||||||
|
if (next_idx == 2) {
|
||||||
|
/* two valid entries found */
|
||||||
|
idx_set = true;
|
||||||
|
} else if (next_idx > 2) {
|
||||||
|
/* too many valid indices */
|
||||||
|
return -ENXIO;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* once the indices have been set, just verify
|
||||||
|
* that all subsequent log groups are either in
|
||||||
|
* their initial state or follow the same
|
||||||
|
* indices.
|
||||||
|
*/
|
||||||
|
if (j == log_index[0]) {
|
||||||
|
/* entry must be 'valid' */
|
||||||
|
if (ent_is_padding(&log.ent[j]))
|
||||||
|
return -ENXIO;
|
||||||
|
} else if (j == log_index[1]) {
|
||||||
|
;
|
||||||
|
/*
|
||||||
|
* log_index[1] can be padding if the
|
||||||
|
* lane never got used and it is still
|
||||||
|
* in the initial state (three 'padding'
|
||||||
|
* entries)
|
||||||
|
*/
|
||||||
|
} else {
|
||||||
|
/* entry must be invalid (padding) */
|
||||||
|
if (!ent_is_padding(&log.ent[j]))
|
||||||
|
return -ENXIO;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* If any of the log_groups have more than one valid,
|
||||||
|
* non-padding entry, then the we are no longer in the
|
||||||
|
* initial_state
|
||||||
|
*/
|
||||||
|
if (pad_count < 3)
|
||||||
|
initial_state = false;
|
||||||
|
pad_count = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!initial_state && !idx_set)
|
||||||
|
return -ENXIO;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If all the entries in the log were in the initial state,
|
||||||
|
* assume new padding scheme
|
||||||
|
*/
|
||||||
|
if (initial_state)
|
||||||
|
log_index[1] = 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Only allow the known permutations of log/padding indices,
|
||||||
|
* i.e. (0, 1), and (0, 2)
|
||||||
|
*/
|
||||||
|
if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
|
||||||
|
; /* known index possibilities */
|
||||||
|
else {
|
||||||
|
dev_err(to_dev(arena), "Found an unknown padding scheme\n");
|
||||||
|
return -ENXIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
arena->log_index[0] = log_index[0];
|
||||||
|
arena->log_index[1] = log_index[1];
|
||||||
|
dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
|
||||||
|
dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int btt_rtt_init(struct arena_info *arena)
|
static int btt_rtt_init(struct arena_info *arena)
|
||||||
{
|
{
|
||||||
arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
|
arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
|
||||||
|
@ -649,8 +772,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
|
||||||
available -= 2 * BTT_PG_SIZE;
|
available -= 2 * BTT_PG_SIZE;
|
||||||
|
|
||||||
/* The log takes a fixed amount of space based on nfree */
|
/* The log takes a fixed amount of space based on nfree */
|
||||||
logsize = roundup(2 * arena->nfree * sizeof(struct log_entry),
|
logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
|
||||||
BTT_PG_SIZE);
|
|
||||||
available -= logsize;
|
available -= logsize;
|
||||||
|
|
||||||
/* Calculate optimal split between map and data area */
|
/* Calculate optimal split between map and data area */
|
||||||
|
@ -667,6 +789,10 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
|
||||||
arena->mapoff = arena->dataoff + datasize;
|
arena->mapoff = arena->dataoff + datasize;
|
||||||
arena->logoff = arena->mapoff + mapsize;
|
arena->logoff = arena->mapoff + mapsize;
|
||||||
arena->info2off = arena->logoff + logsize;
|
arena->info2off = arena->logoff + logsize;
|
||||||
|
|
||||||
|
/* Default log indices are (0,1) */
|
||||||
|
arena->log_index[0] = 0;
|
||||||
|
arena->log_index[1] = 1;
|
||||||
return arena;
|
return arena;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -757,6 +883,13 @@ static int discover_arenas(struct btt *btt)
|
||||||
arena->external_lba_start = cur_nlba;
|
arena->external_lba_start = cur_nlba;
|
||||||
parse_arena_meta(arena, super, cur_off);
|
parse_arena_meta(arena, super, cur_off);
|
||||||
|
|
||||||
|
ret = log_set_indices(arena);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(to_dev(arena),
|
||||||
|
"Unable to deduce log/padding indices\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_init(&arena->err_lock);
|
mutex_init(&arena->err_lock);
|
||||||
ret = btt_freelist_init(arena);
|
ret = btt_freelist_init(arena);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#define MAP_ERR_MASK (1 << MAP_ERR_SHIFT)
|
#define MAP_ERR_MASK (1 << MAP_ERR_SHIFT)
|
||||||
#define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT)))
|
#define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT)))
|
||||||
#define MAP_ENT_NORMAL 0xC0000000
|
#define MAP_ENT_NORMAL 0xC0000000
|
||||||
|
#define LOG_GRP_SIZE sizeof(struct log_group)
|
||||||
#define LOG_ENT_SIZE sizeof(struct log_entry)
|
#define LOG_ENT_SIZE sizeof(struct log_entry)
|
||||||
#define ARENA_MIN_SIZE (1UL << 24) /* 16 MB */
|
#define ARENA_MIN_SIZE (1UL << 24) /* 16 MB */
|
||||||
#define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */
|
#define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */
|
||||||
|
@ -50,12 +51,52 @@ enum btt_init_state {
|
||||||
INIT_READY
|
INIT_READY
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A log group represents one log 'lane', and consists of four log entries.
|
||||||
|
* Two of the four entries are valid entries, and the remaining two are
|
||||||
|
* padding. Due to an old bug in the padding location, we need to perform a
|
||||||
|
* test to determine the padding scheme being used, and use that scheme
|
||||||
|
* thereafter.
|
||||||
|
*
|
||||||
|
* In kernels prior to 4.15, 'log group' would have actual log entries at
|
||||||
|
* indices (0, 2) and padding at indices (1, 3), where as the correct/updated
|
||||||
|
* format has log entries at indices (0, 1) and padding at indices (2, 3).
|
||||||
|
*
|
||||||
|
* Old (pre 4.15) format:
|
||||||
|
* +-----------------+-----------------+
|
||||||
|
* | ent[0] | ent[1] |
|
||||||
|
* | 16B | 16B |
|
||||||
|
* | lba/old/new/seq | pad |
|
||||||
|
* +-----------------------------------+
|
||||||
|
* | ent[2] | ent[3] |
|
||||||
|
* | 16B | 16B |
|
||||||
|
* | lba/old/new/seq | pad |
|
||||||
|
* +-----------------+-----------------+
|
||||||
|
*
|
||||||
|
* New format:
|
||||||
|
* +-----------------+-----------------+
|
||||||
|
* | ent[0] | ent[1] |
|
||||||
|
* | 16B | 16B |
|
||||||
|
* | lba/old/new/seq | lba/old/new/seq |
|
||||||
|
* +-----------------------------------+
|
||||||
|
* | ent[2] | ent[3] |
|
||||||
|
* | 16B | 16B |
|
||||||
|
* | pad | pad |
|
||||||
|
* +-----------------+-----------------+
|
||||||
|
*
|
||||||
|
* We detect during start-up which format is in use, and set
|
||||||
|
* arena->log_index[(0, 1)] with the detected format.
|
||||||
|
*/
|
||||||
|
|
||||||
struct log_entry {
|
struct log_entry {
|
||||||
__le32 lba;
|
__le32 lba;
|
||||||
__le32 old_map;
|
__le32 old_map;
|
||||||
__le32 new_map;
|
__le32 new_map;
|
||||||
__le32 seq;
|
__le32 seq;
|
||||||
__le64 padding[2];
|
};
|
||||||
|
|
||||||
|
struct log_group {
|
||||||
|
struct log_entry ent[4];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct btt_sb {
|
struct btt_sb {
|
||||||
|
@ -125,6 +166,7 @@ struct aligned_lock {
|
||||||
* @list: List head for list of arenas
|
* @list: List head for list of arenas
|
||||||
* @debugfs_dir: Debugfs dentry
|
* @debugfs_dir: Debugfs dentry
|
||||||
* @flags: Arena flags - may signify error states.
|
* @flags: Arena flags - may signify error states.
|
||||||
|
* @log_index: Indices of the valid log entries in a log_group
|
||||||
*
|
*
|
||||||
* arena_info is a per-arena handle. Once an arena is narrowed down for an
|
* arena_info is a per-arena handle. Once an arena is narrowed down for an
|
||||||
* IO, this struct is passed around for the duration of the IO.
|
* IO, this struct is passed around for the duration of the IO.
|
||||||
|
@ -157,6 +199,7 @@ struct arena_info {
|
||||||
/* Arena flags */
|
/* Arena flags */
|
||||||
u32 flags;
|
u32 flags;
|
||||||
struct mutex err_lock;
|
struct mutex err_lock;
|
||||||
|
int log_index[2];
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -364,9 +364,9 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
|
||||||
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
|
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
|
||||||
{
|
{
|
||||||
u64 checksum, offset;
|
u64 checksum, offset;
|
||||||
unsigned long align;
|
|
||||||
enum nd_pfn_mode mode;
|
enum nd_pfn_mode mode;
|
||||||
struct nd_namespace_io *nsio;
|
struct nd_namespace_io *nsio;
|
||||||
|
unsigned long align, start_pad;
|
||||||
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
|
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
|
||||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||||
const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev);
|
const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev);
|
||||||
|
@ -410,6 +410,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
|
||||||
|
|
||||||
align = le32_to_cpu(pfn_sb->align);
|
align = le32_to_cpu(pfn_sb->align);
|
||||||
offset = le64_to_cpu(pfn_sb->dataoff);
|
offset = le64_to_cpu(pfn_sb->dataoff);
|
||||||
|
start_pad = le32_to_cpu(pfn_sb->start_pad);
|
||||||
if (align == 0)
|
if (align == 0)
|
||||||
align = 1UL << ilog2(offset);
|
align = 1UL << ilog2(offset);
|
||||||
mode = le32_to_cpu(pfn_sb->mode);
|
mode = le32_to_cpu(pfn_sb->mode);
|
||||||
|
@ -468,7 +469,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((align && !IS_ALIGNED(offset, align))
|
if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align))
|
||||||
|| !IS_ALIGNED(offset, PAGE_SIZE)) {
|
|| !IS_ALIGNED(offset, PAGE_SIZE)) {
|
||||||
dev_err(&nd_pfn->dev,
|
dev_err(&nd_pfn->dev,
|
||||||
"bad offset: %#llx dax disabled align: %#lx\n",
|
"bad offset: %#llx dax disabled align: %#lx\n",
|
||||||
|
@ -582,6 +583,12 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
|
||||||
return altmap;
|
return altmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
|
||||||
|
{
|
||||||
|
return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys),
|
||||||
|
ALIGN_DOWN(phys, nd_pfn->align));
|
||||||
|
}
|
||||||
|
|
||||||
static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||||
{
|
{
|
||||||
u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
|
u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
|
||||||
|
@ -637,13 +644,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||||
start = nsio->res.start;
|
start = nsio->res.start;
|
||||||
size = PHYS_SECTION_ALIGN_UP(start + size) - start;
|
size = PHYS_SECTION_ALIGN_UP(start + size) - start;
|
||||||
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
||||||
IORES_DESC_NONE) == REGION_MIXED) {
|
IORES_DESC_NONE) == REGION_MIXED
|
||||||
|
|| !IS_ALIGNED(start + resource_size(&nsio->res),
|
||||||
|
nd_pfn->align)) {
|
||||||
size = resource_size(&nsio->res);
|
size = resource_size(&nsio->res);
|
||||||
end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
|
end_trunc = start + size - phys_pmem_align_down(nd_pfn,
|
||||||
|
start + size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (start_pad + end_trunc)
|
if (start_pad + end_trunc)
|
||||||
dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
|
dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
|
||||||
dev_name(&ndns->dev), start_pad + end_trunc);
|
dev_name(&ndns->dev), start_pad + end_trunc);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1692,3 +1692,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
|
||||||
iounmap(base_addr);
|
iounmap(base_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The design of the Diva management card in rp34x0 machines (rp3410, rp3440)
|
||||||
|
* seems rushed, so that many built-in components simply don't work.
|
||||||
|
* The following quirks disable the serial AUX port and the built-in ATI RV100
|
||||||
|
* Radeon 7000 graphics card which both don't have any external connectors and
|
||||||
|
* thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as
|
||||||
|
* such makes those machines the only PARISC machines on which we can't use
|
||||||
|
* ttyS0 as boot console.
|
||||||
|
*/
|
||||||
|
static void quirk_diva_ati_card(struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
|
||||||
|
dev->subsystem_device != 0x1292)
|
||||||
|
return;
|
||||||
|
|
||||||
|
dev_info(&dev->dev, "Hiding Diva built-in ATI card");
|
||||||
|
dev->device = 0;
|
||||||
|
}
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
|
||||||
|
quirk_diva_ati_card);
|
||||||
|
|
||||||
|
static void quirk_diva_aux_disable(struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
|
||||||
|
dev->subsystem_device != 0x1291)
|
||||||
|
return;
|
||||||
|
|
||||||
|
dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
|
||||||
|
dev->device = 0;
|
||||||
|
}
|
||||||
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
|
||||||
|
quirk_diva_aux_disable);
|
||||||
|
|
|
@ -968,7 +968,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
|
||||||
if (pci_has_legacy_pm_support(pci_dev))
|
if (pci_has_legacy_pm_support(pci_dev))
|
||||||
return pci_legacy_resume_early(dev);
|
return pci_legacy_resume_early(dev);
|
||||||
|
|
||||||
pci_update_current_state(pci_dev, PCI_D0);
|
/*
|
||||||
|
* pci_restore_state() requires the device to be in D0 (because of MSI
|
||||||
|
* restoration among other things), so force it into D0 in case the
|
||||||
|
* driver's "freeze" callbacks put it into a low-power state directly.
|
||||||
|
*/
|
||||||
|
pci_set_power_state(pci_dev, PCI_D0);
|
||||||
pci_restore_state(pci_dev);
|
pci_restore_state(pci_dev);
|
||||||
|
|
||||||
if (drv && drv->pm && drv->pm->thaw_noirq)
|
if (drv && drv->pm && drv->pm->thaw_noirq)
|
||||||
|
|
|
@ -1620,6 +1620,22 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
|
||||||
clear_bit(i, chip->irq_valid_mask);
|
clear_bit(i, chip->irq_valid_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The same set of machines in chv_no_valid_mask[] have incorrectly
|
||||||
|
* configured GPIOs that generate spurious interrupts so we use
|
||||||
|
* this same list to apply another quirk for them.
|
||||||
|
*
|
||||||
|
* See also https://bugzilla.kernel.org/show_bug.cgi?id=197953.
|
||||||
|
*/
|
||||||
|
if (!need_valid_mask) {
|
||||||
|
/*
|
||||||
|
* Mask all interrupts the community is able to generate
|
||||||
|
* but leave the ones that can only generate GPEs unmasked.
|
||||||
|
*/
|
||||||
|
chv_writel(GENMASK(31, pctrl->community->nirqs),
|
||||||
|
pctrl->regs + CHV_INTMASK);
|
||||||
|
}
|
||||||
|
|
||||||
/* Clear all interrupts */
|
/* Clear all interrupts */
|
||||||
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
|
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
|
||||||
|
|
||||||
|
|
|
@ -79,6 +79,7 @@
|
||||||
#define A3700_SPI_BYTE_LEN BIT(5)
|
#define A3700_SPI_BYTE_LEN BIT(5)
|
||||||
#define A3700_SPI_CLK_PRESCALE BIT(0)
|
#define A3700_SPI_CLK_PRESCALE BIT(0)
|
||||||
#define A3700_SPI_CLK_PRESCALE_MASK (0x1f)
|
#define A3700_SPI_CLK_PRESCALE_MASK (0x1f)
|
||||||
|
#define A3700_SPI_CLK_EVEN_OFFS (0x10)
|
||||||
|
|
||||||
#define A3700_SPI_WFIFO_THRS_BIT 28
|
#define A3700_SPI_WFIFO_THRS_BIT 28
|
||||||
#define A3700_SPI_RFIFO_THRS_BIT 24
|
#define A3700_SPI_RFIFO_THRS_BIT 24
|
||||||
|
@ -220,6 +221,13 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
|
||||||
|
|
||||||
prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
|
prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
|
||||||
|
|
||||||
|
/* For prescaler values over 15, we can only set it by steps of 2.
|
||||||
|
* Starting from A3700_SPI_CLK_EVEN_OFFS, we set values from 0 up to
|
||||||
|
* 30. We only use this range from 16 to 30.
|
||||||
|
*/
|
||||||
|
if (prescale > 15)
|
||||||
|
prescale = A3700_SPI_CLK_EVEN_OFFS + DIV_ROUND_UP(prescale, 2);
|
||||||
|
|
||||||
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
|
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
|
||||||
val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
|
val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
|
||||||
|
|
||||||
|
|
|
@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
|
||||||
while (remaining_words) {
|
while (remaining_words) {
|
||||||
int n_words, tx_words, rx_words;
|
int n_words, tx_words, rx_words;
|
||||||
u32 sr;
|
u32 sr;
|
||||||
|
int stalled;
|
||||||
|
|
||||||
n_words = min(remaining_words, xspi->buffer_size);
|
n_words = min(remaining_words, xspi->buffer_size);
|
||||||
|
|
||||||
|
@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
|
||||||
|
|
||||||
/* Read out all the data from the Rx FIFO */
|
/* Read out all the data from the Rx FIFO */
|
||||||
rx_words = n_words;
|
rx_words = n_words;
|
||||||
|
stalled = 10;
|
||||||
while (rx_words) {
|
while (rx_words) {
|
||||||
|
if (rx_words == n_words && !(stalled--) &&
|
||||||
|
!(sr & XSPI_SR_TX_EMPTY_MASK) &&
|
||||||
|
(sr & XSPI_SR_RX_EMPTY_MASK)) {
|
||||||
|
dev_err(&spi->dev,
|
||||||
|
"Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
|
||||||
|
xspi_init_hw(xspi);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
|
|
||||||
if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
|
if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
|
||||||
xilinx_spi_rx(xspi);
|
xilinx_spi_rx(xspi);
|
||||||
rx_words--;
|
rx_words--;
|
||||||
|
|
|
@ -7,9 +7,10 @@
|
||||||
#ifndef _ASM_GENERIC_MM_HOOKS_H
|
#ifndef _ASM_GENERIC_MM_HOOKS_H
|
||||||
#define _ASM_GENERIC_MM_HOOKS_H
|
#define _ASM_GENERIC_MM_HOOKS_H
|
||||||
|
|
||||||
static inline void arch_dup_mmap(struct mm_struct *oldmm,
|
static inline int arch_dup_mmap(struct mm_struct *oldmm,
|
||||||
struct mm_struct *mm)
|
struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void arch_exit_mmap(struct mm_struct *mm)
|
static inline void arch_exit_mmap(struct mm_struct *mm)
|
||||||
|
|
|
@ -1025,6 +1025,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
|
||||||
struct file;
|
struct file;
|
||||||
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||||
unsigned long size, pgprot_t *vma_prot);
|
unsigned long size, pgprot_t *vma_prot);
|
||||||
|
|
||||||
|
#ifndef CONFIG_X86_ESPFIX64
|
||||||
|
static inline void init_espfix_bsp(void) { }
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
|
||||||
#ifndef io_remap_pfn_range
|
#ifndef io_remap_pfn_range
|
||||||
|
|
|
@ -27,6 +27,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
|
||||||
|
|
||||||
struct mcryptd_cpu_queue {
|
struct mcryptd_cpu_queue {
|
||||||
struct crypto_queue queue;
|
struct crypto_queue queue;
|
||||||
|
spinlock_t q_lock;
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -504,6 +504,8 @@ extern unsigned int bvec_nr_vecs(unsigned short idx);
|
||||||
|
|
||||||
#define bio_set_dev(bio, bdev) \
|
#define bio_set_dev(bio, bdev) \
|
||||||
do { \
|
do { \
|
||||||
|
if ((bio)->bi_disk != (bdev)->bd_disk) \
|
||||||
|
bio_clear_flag(bio, BIO_THROTTLED);\
|
||||||
(bio)->bi_disk = (bdev)->bd_disk; \
|
(bio)->bi_disk = (bdev)->bd_disk; \
|
||||||
(bio)->bi_partno = (bdev)->bd_partno; \
|
(bio)->bi_partno = (bdev)->bd_partno; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
|
@ -50,8 +50,6 @@ struct blk_issue_stat {
|
||||||
struct bio {
|
struct bio {
|
||||||
struct bio *bi_next; /* request queue link */
|
struct bio *bi_next; /* request queue link */
|
||||||
struct gendisk *bi_disk;
|
struct gendisk *bi_disk;
|
||||||
u8 bi_partno;
|
|
||||||
blk_status_t bi_status;
|
|
||||||
unsigned int bi_opf; /* bottom bits req flags,
|
unsigned int bi_opf; /* bottom bits req flags,
|
||||||
* top bits REQ_OP. Use
|
* top bits REQ_OP. Use
|
||||||
* accessors.
|
* accessors.
|
||||||
|
@ -59,8 +57,8 @@ struct bio {
|
||||||
unsigned short bi_flags; /* status, etc and bvec pool number */
|
unsigned short bi_flags; /* status, etc and bvec pool number */
|
||||||
unsigned short bi_ioprio;
|
unsigned short bi_ioprio;
|
||||||
unsigned short bi_write_hint;
|
unsigned short bi_write_hint;
|
||||||
|
blk_status_t bi_status;
|
||||||
struct bvec_iter bi_iter;
|
u8 bi_partno;
|
||||||
|
|
||||||
/* Number of segments in this BIO after
|
/* Number of segments in this BIO after
|
||||||
* physical address coalescing is performed.
|
* physical address coalescing is performed.
|
||||||
|
@ -74,8 +72,9 @@ struct bio {
|
||||||
unsigned int bi_seg_front_size;
|
unsigned int bi_seg_front_size;
|
||||||
unsigned int bi_seg_back_size;
|
unsigned int bi_seg_back_size;
|
||||||
|
|
||||||
atomic_t __bi_remaining;
|
struct bvec_iter bi_iter;
|
||||||
|
|
||||||
|
atomic_t __bi_remaining;
|
||||||
bio_end_io_t *bi_end_io;
|
bio_end_io_t *bi_end_io;
|
||||||
|
|
||||||
void *bi_private;
|
void *bi_private;
|
||||||
|
|
|
@ -135,7 +135,7 @@ typedef __u32 __bitwise req_flags_t;
|
||||||
struct request {
|
struct request {
|
||||||
struct list_head queuelist;
|
struct list_head queuelist;
|
||||||
union {
|
union {
|
||||||
call_single_data_t csd;
|
struct __call_single_data csd;
|
||||||
u64 fifo_time;
|
u64 fifo_time;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -504,6 +504,8 @@ static void __init mm_init(void)
|
||||||
pgtable_init();
|
pgtable_init();
|
||||||
vmalloc_init();
|
vmalloc_init();
|
||||||
ioremap_huge_init();
|
ioremap_huge_init();
|
||||||
|
/* Should be run before the first non-init thread is created */
|
||||||
|
init_espfix_bsp();
|
||||||
}
|
}
|
||||||
|
|
||||||
asmlinkage __visible void __init start_kernel(void)
|
asmlinkage __visible void __init start_kernel(void)
|
||||||
|
@ -673,10 +675,6 @@ asmlinkage __visible void __init start_kernel(void)
|
||||||
#ifdef CONFIG_X86
|
#ifdef CONFIG_X86
|
||||||
if (efi_enabled(EFI_RUNTIME_SERVICES))
|
if (efi_enabled(EFI_RUNTIME_SERVICES))
|
||||||
efi_enter_virtual_mode();
|
efi_enter_virtual_mode();
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_X86_ESPFIX64
|
|
||||||
/* Should be run before the first non-init thread is created */
|
|
||||||
init_espfix_bsp();
|
|
||||||
#endif
|
#endif
|
||||||
thread_stack_cache_init();
|
thread_stack_cache_init();
|
||||||
cred_init();
|
cred_init();
|
||||||
|
|
|
@ -721,8 +721,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
/* a new mm has just been created */
|
/* a new mm has just been created */
|
||||||
arch_dup_mmap(oldmm, mm);
|
retval = arch_dup_mmap(oldmm, mm);
|
||||||
retval = 0;
|
|
||||||
out:
|
out:
|
||||||
up_write(&mm->mmap_sem);
|
up_write(&mm->mmap_sem);
|
||||||
flush_tlb_mm(oldmm);
|
flush_tlb_mm(oldmm);
|
||||||
|
|
|
@ -1055,6 +1055,7 @@ static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
|
||||||
|
|
||||||
static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
|
static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
|
||||||
{
|
{
|
||||||
|
struct fib6_table *table = rt->rt6i_table;
|
||||||
struct rt6_info *pcpu_rt, *prev, **p;
|
struct rt6_info *pcpu_rt, *prev, **p;
|
||||||
|
|
||||||
pcpu_rt = ip6_rt_pcpu_alloc(rt);
|
pcpu_rt = ip6_rt_pcpu_alloc(rt);
|
||||||
|
@ -1065,20 +1066,28 @@ static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
|
||||||
return net->ipv6.ip6_null_entry;
|
return net->ipv6.ip6_null_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
dst_hold(&pcpu_rt->dst);
|
read_lock_bh(&table->tb6_lock);
|
||||||
|
if (rt->rt6i_pcpu) {
|
||||||
p = this_cpu_ptr(rt->rt6i_pcpu);
|
p = this_cpu_ptr(rt->rt6i_pcpu);
|
||||||
prev = cmpxchg(p, NULL, pcpu_rt);
|
prev = cmpxchg(p, NULL, pcpu_rt);
|
||||||
if (prev) {
|
if (prev) {
|
||||||
/* If someone did it before us, return prev instead */
|
/* If someone did it before us, return prev instead */
|
||||||
/* release refcnt taken by ip6_rt_pcpu_alloc() */
|
|
||||||
dst_release_immediate(&pcpu_rt->dst);
|
dst_release_immediate(&pcpu_rt->dst);
|
||||||
/* release refcnt taken by above dst_hold() */
|
|
||||||
dst_release_immediate(&pcpu_rt->dst);
|
|
||||||
dst_hold(&prev->dst);
|
|
||||||
pcpu_rt = prev;
|
pcpu_rt = prev;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
/* rt has been removed from the fib6 tree
|
||||||
|
* before we have a chance to acquire the read_lock.
|
||||||
|
* In this case, don't brother to create a pcpu rt
|
||||||
|
* since rt is going away anyway. The next
|
||||||
|
* dst_check() will trigger a re-lookup.
|
||||||
|
*/
|
||||||
|
dst_release_immediate(&pcpu_rt->dst);
|
||||||
|
pcpu_rt = rt;
|
||||||
|
}
|
||||||
|
dst_hold(&pcpu_rt->dst);
|
||||||
rt6_dst_from_metrics_check(pcpu_rt);
|
rt6_dst_from_metrics_check(pcpu_rt);
|
||||||
|
read_unlock_bh(&table->tb6_lock);
|
||||||
return pcpu_rt;
|
return pcpu_rt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1168,28 +1177,19 @@ redo_rt6_select:
|
||||||
if (pcpu_rt) {
|
if (pcpu_rt) {
|
||||||
read_unlock_bh(&table->tb6_lock);
|
read_unlock_bh(&table->tb6_lock);
|
||||||
} else {
|
} else {
|
||||||
/* atomic_inc_not_zero() is needed when using rcu */
|
|
||||||
if (atomic_inc_not_zero(&rt->rt6i_ref)) {
|
|
||||||
/* We have to do the read_unlock first
|
/* We have to do the read_unlock first
|
||||||
* because rt6_make_pcpu_route() may trigger
|
* because rt6_make_pcpu_route() may trigger
|
||||||
* ip6_dst_gc() which will take the write_lock.
|
* ip6_dst_gc() which will take the write_lock.
|
||||||
*
|
|
||||||
* No dst_hold() on rt is needed because grabbing
|
|
||||||
* rt->rt6i_ref makes sure rt can't be released.
|
|
||||||
*/
|
*/
|
||||||
|
dst_hold(&rt->dst);
|
||||||
read_unlock_bh(&table->tb6_lock);
|
read_unlock_bh(&table->tb6_lock);
|
||||||
pcpu_rt = rt6_make_pcpu_route(rt);
|
pcpu_rt = rt6_make_pcpu_route(rt);
|
||||||
rt6_release(rt);
|
dst_release(&rt->dst);
|
||||||
} else {
|
|
||||||
/* rt is already removed from tree */
|
|
||||||
read_unlock_bh(&table->tb6_lock);
|
|
||||||
pcpu_rt = net->ipv6.ip6_null_entry;
|
|
||||||
dst_hold(&pcpu_rt->dst);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_fib6_table_lookup(net, pcpu_rt, table->tb6_id, fl6);
|
trace_fib6_table_lookup(net, pcpu_rt, table->tb6_id, fl6);
|
||||||
return pcpu_rt;
|
return pcpu_rt;
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ip6_pol_route);
|
EXPORT_SYMBOL_GPL(ip6_pol_route);
|
||||||
|
|
|
@ -579,15 +579,14 @@ static int snd_rawmidi_info_user(struct snd_rawmidi_substream *substream,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
|
static int __snd_rawmidi_info_select(struct snd_card *card,
|
||||||
|
struct snd_rawmidi_info *info)
|
||||||
{
|
{
|
||||||
struct snd_rawmidi *rmidi;
|
struct snd_rawmidi *rmidi;
|
||||||
struct snd_rawmidi_str *pstr;
|
struct snd_rawmidi_str *pstr;
|
||||||
struct snd_rawmidi_substream *substream;
|
struct snd_rawmidi_substream *substream;
|
||||||
|
|
||||||
mutex_lock(®ister_mutex);
|
|
||||||
rmidi = snd_rawmidi_search(card, info->device);
|
rmidi = snd_rawmidi_search(card, info->device);
|
||||||
mutex_unlock(®ister_mutex);
|
|
||||||
if (!rmidi)
|
if (!rmidi)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
if (info->stream < 0 || info->stream > 1)
|
if (info->stream < 0 || info->stream > 1)
|
||||||
|
@ -603,6 +602,16 @@ int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info
|
||||||
}
|
}
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
mutex_lock(®ister_mutex);
|
||||||
|
ret = __snd_rawmidi_info_select(card, info);
|
||||||
|
mutex_unlock(®ister_mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
EXPORT_SYMBOL(snd_rawmidi_info_select);
|
EXPORT_SYMBOL(snd_rawmidi_info_select);
|
||||||
|
|
||||||
static int snd_rawmidi_info_select_user(struct snd_card *card,
|
static int snd_rawmidi_info_select_user(struct snd_card *card,
|
||||||
|
|
|
@ -55,10 +55,11 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
|
||||||
#define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b)
|
#define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b)
|
||||||
#define is_geminilake(codec) (((codec)->core.vendor_id == 0x8086280d) || \
|
#define is_geminilake(codec) (((codec)->core.vendor_id == 0x8086280d) || \
|
||||||
((codec)->core.vendor_id == 0x80862800))
|
((codec)->core.vendor_id == 0x80862800))
|
||||||
|
#define is_cannonlake(codec) ((codec)->core.vendor_id == 0x8086280c)
|
||||||
#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
|
#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
|
||||||
|| is_skylake(codec) || is_broxton(codec) \
|
|| is_skylake(codec) || is_broxton(codec) \
|
||||||
|| is_kabylake(codec)) || is_geminilake(codec)
|
|| is_kabylake(codec)) || is_geminilake(codec) \
|
||||||
|
|| is_cannonlake(codec)
|
||||||
#define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
|
#define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
|
||||||
#define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
|
#define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
|
||||||
#define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec))
|
#define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec))
|
||||||
|
@ -3841,6 +3842,7 @@ HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI", patch_i915_hsw_hdmi),
|
||||||
HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_i915_hsw_hdmi),
|
HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_i915_hsw_hdmi),
|
||||||
HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_i915_hsw_hdmi),
|
HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_i915_hsw_hdmi),
|
||||||
HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_i915_hsw_hdmi),
|
HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_i915_hsw_hdmi),
|
||||||
|
HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi),
|
||||||
HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
|
HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
|
||||||
HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi),
|
HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi),
|
||||||
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
|
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
|
||||||
|
|
|
@ -5162,6 +5162,22 @@ static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */
|
||||||
|
static void alc274_fixup_bind_dacs(struct hda_codec *codec,
|
||||||
|
const struct hda_fixup *fix, int action)
|
||||||
|
{
|
||||||
|
struct alc_spec *spec = codec->spec;
|
||||||
|
static hda_nid_t preferred_pairs[] = {
|
||||||
|
0x21, 0x03, 0x1b, 0x03, 0x16, 0x02,
|
||||||
|
0
|
||||||
|
};
|
||||||
|
|
||||||
|
if (action != HDA_FIXUP_ACT_PRE_PROBE)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spec->gen.preferred_dacs = preferred_pairs;
|
||||||
|
}
|
||||||
|
|
||||||
/* for hda_fixup_thinkpad_acpi() */
|
/* for hda_fixup_thinkpad_acpi() */
|
||||||
#include "thinkpad_helper.c"
|
#include "thinkpad_helper.c"
|
||||||
|
|
||||||
|
@ -5279,6 +5295,8 @@ enum {
|
||||||
ALC233_FIXUP_LENOVO_MULTI_CODECS,
|
ALC233_FIXUP_LENOVO_MULTI_CODECS,
|
||||||
ALC294_FIXUP_LENOVO_MIC_LOCATION,
|
ALC294_FIXUP_LENOVO_MIC_LOCATION,
|
||||||
ALC700_FIXUP_INTEL_REFERENCE,
|
ALC700_FIXUP_INTEL_REFERENCE,
|
||||||
|
ALC274_FIXUP_DELL_BIND_DACS,
|
||||||
|
ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct hda_fixup alc269_fixups[] = {
|
static const struct hda_fixup alc269_fixups[] = {
|
||||||
|
@ -6089,6 +6107,21 @@ static const struct hda_fixup alc269_fixups[] = {
|
||||||
{}
|
{}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
[ALC274_FIXUP_DELL_BIND_DACS] = {
|
||||||
|
.type = HDA_FIXUP_FUNC,
|
||||||
|
.v.func = alc274_fixup_bind_dacs,
|
||||||
|
.chained = true,
|
||||||
|
.chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
|
||||||
|
},
|
||||||
|
[ALC274_FIXUP_DELL_AIO_LINEOUT_VERB] = {
|
||||||
|
.type = HDA_FIXUP_PINS,
|
||||||
|
.v.pins = (const struct hda_pintbl[]) {
|
||||||
|
{ 0x1b, 0x0401102f },
|
||||||
|
{ }
|
||||||
|
},
|
||||||
|
.chained = true,
|
||||||
|
.chain_id = ALC274_FIXUP_DELL_BIND_DACS
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||||
|
@ -6550,7 +6583,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
|
||||||
{0x14, 0x90170110},
|
{0x14, 0x90170110},
|
||||||
{0x1b, 0x90a70130},
|
{0x1b, 0x90a70130},
|
||||||
{0x21, 0x03211020}),
|
{0x21, 0x03211020}),
|
||||||
SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
|
SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
|
||||||
{0x12, 0xb7a60130},
|
{0x12, 0xb7a60130},
|
||||||
{0x13, 0xb8a61140},
|
{0x13, 0xb8a61140},
|
||||||
{0x16, 0x90170110},
|
{0x16, 0x90170110},
|
||||||
|
|
|
@ -2173,20 +2173,25 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
|
||||||
kctl->private_value = (unsigned long)namelist;
|
kctl->private_value = (unsigned long)namelist;
|
||||||
kctl->private_free = usb_mixer_selector_elem_free;
|
kctl->private_free = usb_mixer_selector_elem_free;
|
||||||
|
|
||||||
nameid = uac_selector_unit_iSelector(desc);
|
/* check the static mapping table at first */
|
||||||
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
|
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
|
||||||
if (len)
|
if (!len) {
|
||||||
;
|
/* no mapping ? */
|
||||||
else if (nameid)
|
/* if iSelector is given, use it */
|
||||||
len = snd_usb_copy_string_desc(state, nameid, kctl->id.name,
|
nameid = uac_selector_unit_iSelector(desc);
|
||||||
|
if (nameid)
|
||||||
|
len = snd_usb_copy_string_desc(state, nameid,
|
||||||
|
kctl->id.name,
|
||||||
sizeof(kctl->id.name));
|
sizeof(kctl->id.name));
|
||||||
else
|
/* ... or pick up the terminal name at next */
|
||||||
|
if (!len)
|
||||||
len = get_term_name(state, &state->oterm,
|
len = get_term_name(state, &state->oterm,
|
||||||
kctl->id.name, sizeof(kctl->id.name), 0);
|
kctl->id.name, sizeof(kctl->id.name), 0);
|
||||||
|
/* ... or use the fixed string "USB" as the last resort */
|
||||||
if (!len) {
|
if (!len)
|
||||||
strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
|
strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
|
||||||
|
|
||||||
|
/* and add the proper suffix */
|
||||||
if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
|
if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
|
||||||
append_ctl_name(kctl, " Clock Source");
|
append_ctl_name(kctl, " Clock Source");
|
||||||
else if ((state->oterm.type & 0xff00) == 0x0100)
|
else if ((state->oterm.type & 0xff00) == 0x0100)
|
||||||
|
|
|
@ -1172,10 +1172,11 @@ static bool is_marantz_denon_dac(unsigned int id)
|
||||||
/* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
|
/* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
|
||||||
* between PCM/DOP and native DSD mode
|
* between PCM/DOP and native DSD mode
|
||||||
*/
|
*/
|
||||||
static bool is_teac_50X_dac(unsigned int id)
|
static bool is_teac_dsd_dac(unsigned int id)
|
||||||
{
|
{
|
||||||
switch (id) {
|
switch (id) {
|
||||||
case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
|
case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
|
||||||
|
case USB_ID(0x0644, 0x8044): /* Esoteric D-05X */
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
@ -1208,7 +1209,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
mdelay(20);
|
mdelay(20);
|
||||||
} else if (is_teac_50X_dac(subs->stream->chip->usb_id)) {
|
} else if (is_teac_dsd_dac(subs->stream->chip->usb_id)) {
|
||||||
/* Vendor mode switch cmd is required. */
|
/* Vendor mode switch cmd is required. */
|
||||||
switch (fmt->altsetting) {
|
switch (fmt->altsetting) {
|
||||||
case 3: /* DSD mode (DSD_U32) requested */
|
case 3: /* DSD mode (DSD_U32) requested */
|
||||||
|
@ -1398,7 +1399,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TEAC devices with USB DAC functionality */
|
/* TEAC devices with USB DAC functionality */
|
||||||
if (is_teac_50X_dac(chip->usb_id)) {
|
if (is_teac_dsd_dac(chip->usb_id)) {
|
||||||
if (fp->altsetting == 3)
|
if (fp->altsetting == 3)
|
||||||
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
|
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
|
||||||
}
|
}
|
||||||
|
|
2
kernel/tools/objtool/.gitignore
vendored
2
kernel/tools/objtool/.gitignore
vendored
|
@ -1,3 +1,3 @@
|
||||||
arch/x86/insn/inat-tables.c
|
arch/x86/lib/inat-tables.c
|
||||||
objtool
|
objtool
|
||||||
fixdep
|
fixdep
|
||||||
|
|
|
@ -7,8 +7,10 @@ ARCH := x86
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# always use the host compiler
|
# always use the host compiler
|
||||||
CC = gcc
|
HOSTCC ?= gcc
|
||||||
LD = ld
|
HOSTLD ?= ld
|
||||||
|
CC = $(HOSTCC)
|
||||||
|
LD = $(HOSTLD)
|
||||||
AR = ar
|
AR = ar
|
||||||
|
|
||||||
ifeq ($(srctree),)
|
ifeq ($(srctree),)
|
||||||
|
@ -25,7 +27,9 @@ OBJTOOL_IN := $(OBJTOOL)-in.o
|
||||||
|
|
||||||
all: $(OBJTOOL)
|
all: $(OBJTOOL)
|
||||||
|
|
||||||
INCLUDES := -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi
|
INCLUDES := -I$(srctree)/tools/include \
|
||||||
|
-I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
|
||||||
|
-I$(srctree)/tools/objtool/arch/$(ARCH)/include
|
||||||
WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
|
WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
|
||||||
CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
|
CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
|
||||||
LDFLAGS += -lelf $(LIBSUBCMD)
|
LDFLAGS += -lelf $(LIBSUBCMD)
|
||||||
|
@ -41,22 +45,8 @@ include $(srctree)/tools/build/Makefile.include
|
||||||
$(OBJTOOL_IN): fixdep FORCE
|
$(OBJTOOL_IN): fixdep FORCE
|
||||||
@$(MAKE) $(build)=objtool
|
@$(MAKE) $(build)=objtool
|
||||||
|
|
||||||
# Busybox's diff doesn't have -I, avoid warning in that case
|
|
||||||
#
|
|
||||||
$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
|
$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
|
||||||
@(diff -I 2>&1 | grep -q 'option requires an argument' && \
|
@./sync-check.sh
|
||||||
test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
|
|
||||||
diff -I'^#include' arch/x86/insn/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
|
|
||||||
diff -I'^#include' arch/x86/insn/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
|
|
||||||
diff arch/x86/insn/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
|
|
||||||
diff arch/x86/insn/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \
|
|
||||||
diff -I'^#include' arch/x86/insn/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \
|
|
||||||
diff -I'^#include' arch/x86/insn/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \
|
|
||||||
diff -I'^#include' arch/x86/insn/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \
|
|
||||||
|| echo "warning: objtool: x86 instruction decoder differs from kernel" >&2 )) || true
|
|
||||||
@(test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
|
|
||||||
diff ../../arch/x86/include/asm/orc_types.h orc_types.h >/dev/null) \
|
|
||||||
|| echo "warning: objtool: orc_types.h differs from kernel" >&2 )) || true
|
|
||||||
$(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
|
$(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
|
||||||
|
|
||||||
|
|
||||||
|
@ -66,7 +56,7 @@ $(LIBSUBCMD): fixdep FORCE
|
||||||
clean:
|
clean:
|
||||||
$(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
|
$(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
|
||||||
$(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
|
$(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
|
||||||
$(Q)$(RM) $(OUTPUT)arch/x86/insn/inat-tables.c $(OUTPUT)fixdep
|
$(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep
|
||||||
|
|
||||||
FORCE:
|
FORCE:
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
objtool-y += decode.o
|
objtool-y += decode.o
|
||||||
|
|
||||||
inat_tables_script = arch/x86/insn/gen-insn-attr-x86.awk
|
inat_tables_script = arch/x86/tools/gen-insn-attr-x86.awk
|
||||||
inat_tables_maps = arch/x86/insn/x86-opcode-map.txt
|
inat_tables_maps = arch/x86/lib/x86-opcode-map.txt
|
||||||
|
|
||||||
$(OUTPUT)arch/x86/insn/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
|
$(OUTPUT)arch/x86/lib/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
|
||||||
$(call rule_mkdir)
|
$(call rule_mkdir)
|
||||||
$(Q)$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@
|
$(Q)$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@
|
||||||
|
|
||||||
$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/insn/inat-tables.c
|
$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/lib/inat-tables.c
|
||||||
|
|
||||||
CFLAGS_decode.o += -I$(OUTPUT)arch/x86/insn
|
CFLAGS_decode.o += -I$(OUTPUT)arch/x86/lib
|
||||||
|
|
|
@ -19,9 +19,9 @@
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
#define unlikely(cond) (cond)
|
#define unlikely(cond) (cond)
|
||||||
#include "insn/insn.h"
|
#include <asm/insn.h>
|
||||||
#include "insn/inat.c"
|
#include "lib/inat.c"
|
||||||
#include "insn/insn.c"
|
#include "lib/insn.c"
|
||||||
|
|
||||||
#include "../../elf.h"
|
#include "../../elf.h"
|
||||||
#include "../../arch.h"
|
#include "../../arch.h"
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
#include "inat_types.h"
|
#include <asm/inat_types.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internal bits. Don't use bitmasks directly, because these bits are
|
* Internal bits. Don't use bitmasks directly, because these bits are
|
||||||
|
@ -97,6 +97,16 @@
|
||||||
#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
|
#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
|
||||||
#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
|
#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
|
||||||
|
|
||||||
|
/* Identifiers for segment registers */
|
||||||
|
#define INAT_SEG_REG_IGNORE 0
|
||||||
|
#define INAT_SEG_REG_DEFAULT 1
|
||||||
|
#define INAT_SEG_REG_CS 2
|
||||||
|
#define INAT_SEG_REG_SS 3
|
||||||
|
#define INAT_SEG_REG_DS 4
|
||||||
|
#define INAT_SEG_REG_ES 5
|
||||||
|
#define INAT_SEG_REG_FS 6
|
||||||
|
#define INAT_SEG_REG_GS 7
|
||||||
|
|
||||||
/* Attribute search APIs */
|
/* Attribute search APIs */
|
||||||
extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
|
extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
|
||||||
extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
|
extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
|
|
@ -21,7 +21,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* insn_attr_t is defined in inat.h */
|
/* insn_attr_t is defined in inat.h */
|
||||||
#include "inat.h"
|
#include <asm/inat.h>
|
||||||
|
|
||||||
struct insn_field {
|
struct insn_field {
|
||||||
union {
|
union {
|
1392
kernel/tools/objtool/arch/x86/insn/inat-tables.c
Normal file
1392
kernel/tools/objtool/arch/x86/insn/inat-tables.c
Normal file
File diff suppressed because it is too large
Load diff
|
@ -18,7 +18,7 @@
|
||||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
#include "insn.h"
|
#include <asm/insn.h>
|
||||||
|
|
||||||
/* Attribute tables are generated from opcode map */
|
/* Attribute tables are generated from opcode map */
|
||||||
#include "inat-tables.c"
|
#include "inat-tables.c"
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue