updating to mainline 4.12.10

This commit is contained in:
Jake Day 2017-08-30 13:21:01 -04:00
parent 25414d1b87
commit 3969fece56
110 changed files with 863 additions and 358 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 12 PATCHLEVEL = 12
SUBLEVEL = 9 SUBLEVEL = 10
EXTRAVERSION = EXTRAVERSION =
NAME = Fearless Coyote NAME = Fearless Coyote

View file

@ -96,7 +96,9 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_SLC_FLUSH 0x904 #define ARC_REG_SLC_FLUSH 0x904
#define ARC_REG_SLC_INVALIDATE 0x905 #define ARC_REG_SLC_INVALIDATE 0x905
#define ARC_REG_SLC_RGN_START 0x914 #define ARC_REG_SLC_RGN_START 0x914
#define ARC_REG_SLC_RGN_START1 0x915
#define ARC_REG_SLC_RGN_END 0x916 #define ARC_REG_SLC_RGN_END 0x916
#define ARC_REG_SLC_RGN_END1 0x917
/* Bit val in SLC_CONTROL */ /* Bit val in SLC_CONTROL */
#define SLC_CTRL_DIS 0x001 #define SLC_CTRL_DIS 0x001

View file

@ -94,6 +94,8 @@ static inline int is_pae40_enabled(void)
return IS_ENABLED(CONFIG_ARC_HAS_PAE40); return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
} }
extern int pae40_exist_but_not_enab(void);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif #endif

View file

@ -665,6 +665,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
static DEFINE_SPINLOCK(lock); static DEFINE_SPINLOCK(lock);
unsigned long flags; unsigned long flags;
unsigned int ctrl; unsigned int ctrl;
phys_addr_t end;
spin_lock_irqsave(&lock, flags); spin_lock_irqsave(&lock, flags);
@ -694,8 +695,19 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
* END needs to be setup before START (latter triggers the operation) * END needs to be setup before START (latter triggers the operation)
* END can't be same as START, so add (l2_line_sz - 1) to sz * END can't be same as START, so add (l2_line_sz - 1) to sz
*/ */
write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1)); end = paddr + sz + l2_line_sz - 1;
write_aux_reg(ARC_REG_SLC_RGN_START, paddr); if (is_pae40_enabled())
write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
if (is_pae40_enabled())
write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
read_aux_reg(ARC_REG_SLC_CTRL);
while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
@ -1111,6 +1123,13 @@ noinline void __init arc_ioc_setup(void)
__dc_enable(); __dc_enable();
} }
/*
* Cache related boot time checks/setups only needed on master CPU:
* - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
* Assume SMP only, so all cores will have same cache config. A check on
* one core suffices for all
* - IOC setup / dma callbacks only need to be done once
*/
void __init arc_cache_init_master(void) void __init arc_cache_init_master(void)
{ {
unsigned int __maybe_unused cpu = smp_processor_id(); unsigned int __maybe_unused cpu = smp_processor_id();
@ -1190,12 +1209,27 @@ void __ref arc_cache_init(void)
printk(arc_cache_mumbojumbo(0, str, sizeof(str))); printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
/*
* Only master CPU needs to execute rest of function:
* - Assume SMP so all cores will have same cache config so
* any geomtry checks will be same for all
* - IOC setup / dma callbacks only need to be setup once
*/
if (!cpu) if (!cpu)
arc_cache_init_master(); arc_cache_init_master();
/*
* In PAE regime, TLB and cache maintenance ops take wider addresses
* And even if PAE is not enabled in kernel, the upper 32-bits still need
* to be zeroed to keep the ops sane.
* As an optimization for more common !PAE enabled case, zero them out
* once at init, rather than checking/setting to 0 for every runtime op
*/
if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
if (l2_line_sz) {
write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
}
}
} }

View file

@ -104,6 +104,8 @@
/* A copy of the ASID from the PID reg is kept in asid_cache */ /* A copy of the ASID from the PID reg is kept in asid_cache */
DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE; DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
static int __read_mostly pae_exists;
/* /*
* Utility Routine to erase a J-TLB entry * Utility Routine to erase a J-TLB entry
* Caller needs to setup Index Reg (manually or via getIndex) * Caller needs to setup Index Reg (manually or via getIndex)
@ -784,7 +786,7 @@ void read_decode_mmu_bcr(void)
mmu->u_dtlb = mmu4->u_dtlb * 4; mmu->u_dtlb = mmu4->u_dtlb * 4;
mmu->u_itlb = mmu4->u_itlb * 4; mmu->u_itlb = mmu4->u_itlb * 4;
mmu->sasid = mmu4->sasid; mmu->sasid = mmu4->sasid;
mmu->pae = mmu4->pae; pae_exists = mmu->pae = mmu4->pae;
} }
} }
@ -809,6 +811,11 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
return buf; return buf;
} }
int pae40_exist_but_not_enab(void)
{
return pae_exists && !is_pae40_enabled();
}
void arc_mmu_init(void) void arc_mmu_init(void)
{ {
char str[256]; char str[256];
@ -859,6 +866,9 @@ void arc_mmu_init(void)
/* swapper_pg_dir is the pgd for the kernel, used by vmalloc */ /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir); write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
#endif #endif
if (pae40_exist_but_not_enab())
write_aux_reg(ARC_REG_TLBPD1HI, 0);
} }
/* /*

View file

@ -161,9 +161,11 @@ void fpsimd_flush_thread(void)
{ {
if (!system_supports_fpsimd()) if (!system_supports_fpsimd())
return; return;
preempt_disable();
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current); fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE); set_thread_flag(TIF_FOREIGN_FPSTATE);
preempt_enable();
} }
/* /*

View file

@ -80,9 +80,27 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
struct task_struct *tsk) struct task_struct *tsk)
{ {
/* Mark this context has been used on the new CPU */ /* Mark this context has been used on the new CPU */
if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
/*
* This full barrier orders the store to the cpumask above vs
* a subsequent operation which allows this CPU to begin loading
* translations for next.
*
* When using the radix MMU that operation is the load of the
* MMU context id, which is then moved to SPRN_PID.
*
* For the hash MMU it is either the first load from slb_cache
* in switch_slb(), and/or the store of paca->mm_ctx_id in
* copy_mm_to_paca().
*
* On the read side the barrier is in pte_xchg(), which orders
* the store to the PTE vs the load of mm_cpumask.
*/
smp_mb();
}
/* 32-bit keeps track of the current PGDIR in the thread struct */ /* 32-bit keeps track of the current PGDIR in the thread struct */
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
tsk->thread.pgdir = next->pgd; tsk->thread.pgdir = next->pgd;

View file

@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
unsigned long *p = (unsigned long *)ptep; unsigned long *p = (unsigned long *)ptep;
__be64 prev; __be64 prev;
/* See comment in switch_mm_irqs_off() */
prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old), prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
(__force unsigned long)pte_raw(new)); (__force unsigned long)pte_raw(new));

View file

@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
{ {
unsigned long *p = (unsigned long *)ptep; unsigned long *p = (unsigned long *)ptep;
/* See comment in switch_mm_irqs_off() */
return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new)); return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
} }
#endif #endif

View file

@ -394,7 +394,7 @@ static int sthyi(u64 vaddr)
"srl %[cc],28\n" "srl %[cc],28\n"
: [cc] "=d" (cc) : [cc] "=d" (cc)
: [code] "d" (code), [addr] "a" (addr) : [code] "d" (code), [addr] "a" (addr)
: "memory", "cc"); : "3", "memory", "cc");
return cc; return cc;
} }
@ -425,7 +425,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr); VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
trace_kvm_s390_handle_sthyi(vcpu, code, addr); trace_kvm_s390_handle_sthyi(vcpu, code, addr);
if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK) if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
if (code & 0xffff) { if (code & 0xffff) {
@ -433,6 +433,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
goto out; goto out;
} }
if (addr & ~PAGE_MASK)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* /*
* If the page has not yet been faulted in, we want to do that * If the page has not yet been faulted in, we want to do that
* now and not after all the expensive calculations. * now and not after all the expensive calculations.

View file

@ -1241,8 +1241,6 @@ static int pci_sun4v_probe(struct platform_device *op)
* ATU group, but ATU hcalls won't be available. * ATU group, but ATU hcalls won't be available.
*/ */
hv_atu = false; hv_atu = false;
pr_err(PFX "Could not register hvapi ATU err=%d\n",
err);
} else { } else {
pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n", pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
vatu_major, vatu_minor); vatu_major, vatu_minor);

View file

@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
return 0; return 0;
} }
static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate) static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
{ {
if (use_xsave()) { if (use_xsave()) {
copy_kernel_to_xregs(&fpstate->xsave, -1); copy_kernel_to_xregs(&fpstate->xsave, mask);
} else { } else {
if (use_fxsr()) if (use_fxsr())
copy_kernel_to_fxregs(&fpstate->fxsave); copy_kernel_to_fxregs(&fpstate->fxsave);
@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
: : [addr] "m" (fpstate)); : : [addr] "m" (fpstate));
} }
__copy_kernel_to_fpregs(fpstate); __copy_kernel_to_fpregs(fpstate, -1);
} }
extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);

View file

@ -486,6 +486,7 @@ struct kvm_vcpu_arch {
unsigned long cr4; unsigned long cr4;
unsigned long cr4_guest_owned_bits; unsigned long cr4_guest_owned_bits;
unsigned long cr8; unsigned long cr8;
u32 pkru;
u32 hflags; u32 hflags;
u64 efer; u64 efer;
u64 apic_base; u64 apic_base;

View file

@ -116,9 +116,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.execute_only_pkey = -1; mm->context.execute_only_pkey = -1;
} }
#endif #endif
init_new_context_ldt(tsk, mm); return init_new_context_ldt(tsk, mm);
return 0;
} }
static inline void destroy_context(struct mm_struct *mm) static inline void destroy_context(struct mm_struct *mm)
{ {

View file

@ -469,7 +469,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
cpuid_mask(&entry->ecx, CPUID_7_ECX); cpuid_mask(&entry->ecx, CPUID_7_ECX);
/* PKU is not yet implemented for shadow paging. */ /* PKU is not yet implemented for shadow paging. */
if (!tdp_enabled) if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
entry->ecx &= ~F(PKU); entry->ecx &= ~F(PKU);
entry->edx &= kvm_cpuid_7_0_edx_x86_features; entry->edx &= kvm_cpuid_7_0_edx_x86_features;
entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX); entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);

View file

@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
| ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32); | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
} }
static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
{
return kvm_x86_ops->get_pkru(vcpu);
}
static inline void enter_guest_mode(struct kvm_vcpu *vcpu) static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.hflags |= HF_GUEST_MASK; vcpu->arch.hflags |= HF_GUEST_MASK;

View file

@ -182,7 +182,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
* index of the protection domain, so pte_pkey * 2 is * index of the protection domain, so pte_pkey * 2 is
* is the index of the first bit for the domain. * is the index of the first bit for the domain.
*/ */
pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3; pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */ /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
offset = (pfec & ~1) + offset = (pfec & ~1) +

View file

@ -1725,11 +1725,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
to_svm(vcpu)->vmcb->save.rflags = rflags; to_svm(vcpu)->vmcb->save.rflags = rflags;
} }
static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
{
return 0;
}
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{ {
switch (reg) { switch (reg) {
@ -5313,8 +5308,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.get_rflags = svm_get_rflags, .get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags, .set_rflags = svm_set_rflags,
.get_pkru = svm_get_pkru,
.tlb_flush = svm_flush_tlb, .tlb_flush = svm_flush_tlb,
.run = svm_vcpu_run, .run = svm_vcpu_run,

View file

@ -636,8 +636,6 @@ struct vcpu_vmx {
u64 current_tsc_ratio; u64 current_tsc_ratio;
bool guest_pkru_valid;
u32 guest_pkru;
u32 host_pkru; u32 host_pkru;
/* /*
@ -2368,11 +2366,6 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
vmcs_writel(GUEST_RFLAGS, rflags); vmcs_writel(GUEST_RFLAGS, rflags);
} }
static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
{
return to_vmx(vcpu)->guest_pkru;
}
static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
{ {
u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
@ -8860,8 +8853,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmx_set_interrupt_shadow(vcpu, 0); vmx_set_interrupt_shadow(vcpu, 0);
if (vmx->guest_pkru_valid) if (static_cpu_has(X86_FEATURE_PKU) &&
__write_pkru(vmx->guest_pkru); kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
vcpu->arch.pkru != vmx->host_pkru)
__write_pkru(vcpu->arch.pkru);
atomic_switch_perf_msrs(vmx); atomic_switch_perf_msrs(vmx);
debugctlmsr = get_debugctlmsr(); debugctlmsr = get_debugctlmsr();
@ -9009,13 +9004,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* back on host, so it is safe to read guest PKRU from current * back on host, so it is safe to read guest PKRU from current
* XSAVE. * XSAVE.
*/ */
if (boot_cpu_has(X86_FEATURE_OSPKE)) { if (static_cpu_has(X86_FEATURE_PKU) &&
vmx->guest_pkru = __read_pkru(); kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
if (vmx->guest_pkru != vmx->host_pkru) { vcpu->arch.pkru = __read_pkru();
vmx->guest_pkru_valid = true; if (vcpu->arch.pkru != vmx->host_pkru)
__write_pkru(vmx->host_pkru); __write_pkru(vmx->host_pkru);
} else
vmx->guest_pkru_valid = false;
} }
/* /*
@ -11507,8 +11500,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.get_rflags = vmx_get_rflags, .get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags, .set_rflags = vmx_set_rflags,
.get_pkru = vmx_get_pkru,
.tlb_flush = vmx_flush_tlb, .tlb_flush = vmx_flush_tlb,
.run = vmx_vcpu_run, .run = vmx_vcpu_run,

View file

@ -3236,7 +3236,12 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
u32 size, offset, ecx, edx; u32 size, offset, ecx, edx;
cpuid_count(XSTATE_CPUID, index, cpuid_count(XSTATE_CPUID, index,
&size, &offset, &ecx, &edx); &size, &offset, &ecx, &edx);
memcpy(dest + offset, src, size); if (feature == XFEATURE_MASK_PKRU)
memcpy(dest + offset, &vcpu->arch.pkru,
sizeof(vcpu->arch.pkru));
else
memcpy(dest + offset, src, size);
} }
valid -= feature; valid -= feature;
@ -3274,7 +3279,11 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
u32 size, offset, ecx, edx; u32 size, offset, ecx, edx;
cpuid_count(XSTATE_CPUID, index, cpuid_count(XSTATE_CPUID, index,
&size, &offset, &ecx, &edx); &size, &offset, &ecx, &edx);
memcpy(dest, src + offset, size); if (feature == XFEATURE_MASK_PKRU)
memcpy(&vcpu->arch.pkru, src + offset,
sizeof(vcpu->arch.pkru));
else
memcpy(dest, src + offset, size);
} }
valid -= feature; valid -= feature;
@ -7616,7 +7625,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
*/ */
vcpu->guest_fpu_loaded = 1; vcpu->guest_fpu_loaded = 1;
__kernel_fpu_begin(); __kernel_fpu_begin();
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state); /* PKRU is separately restored in kvm_x86_ops->run. */
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
~XFEATURE_MASK_PKRU);
trace_kvm_fpu(1); trace_kvm_fpu(1);
} }

View file

@ -180,8 +180,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
{ "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, { "APMC0D0F", APD_ADDR(xgene_i2c_desc) },
{ "BRCM900D", APD_ADDR(vulcan_spi_desc) }, { "BRCM900D", APD_ADDR(vulcan_spi_desc) },
{ "CAV900D", APD_ADDR(vulcan_spi_desc) }, { "CAV900D", APD_ADDR(vulcan_spi_desc) },
{ "HISI0A21", APD_ADDR(hip07_i2c_desc) }, { "HISI02A1", APD_ADDR(hip07_i2c_desc) },
{ "HISI0A22", APD_ADDR(hip08_i2c_desc) }, { "HISI02A2", APD_ADDR(hip08_i2c_desc) },
#endif #endif
{ } { }
}; };

View file

@ -1703,7 +1703,7 @@ error:
* functioning ECDT EC first in order to handle the events. * functioning ECDT EC first in order to handle the events.
* https://bugzilla.kernel.org/show_bug.cgi?id=115021 * https://bugzilla.kernel.org/show_bug.cgi?id=115021
*/ */
int __init acpi_ec_ecdt_start(void) static int __init acpi_ec_ecdt_start(void)
{ {
acpi_handle handle; acpi_handle handle;
@ -1906,20 +1906,17 @@ static inline void acpi_ec_query_exit(void)
int __init acpi_ec_init(void) int __init acpi_ec_init(void)
{ {
int result; int result;
int ecdt_fail, dsdt_fail;
/* register workqueue for _Qxx evaluations */ /* register workqueue for _Qxx evaluations */
result = acpi_ec_query_init(); result = acpi_ec_query_init();
if (result) if (result)
goto err_exit; return result;
/* Now register the driver for the EC */
result = acpi_bus_register_driver(&acpi_ec_driver);
if (result)
goto err_exit;
err_exit: /* Drivers must be started after acpi_ec_query_init() */
if (result) ecdt_fail = acpi_ec_ecdt_start();
acpi_ec_query_exit(); dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
return result; return ecdt_fail && dsdt_fail ? -ENODEV : 0;
} }
/* EC driver currently not unloadable */ /* EC driver currently not unloadable */

View file

@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data);
int acpi_ec_init(void); int acpi_ec_init(void);
int acpi_ec_ecdt_probe(void); int acpi_ec_ecdt_probe(void);
int acpi_ec_dsdt_probe(void); int acpi_ec_dsdt_probe(void);
int acpi_ec_ecdt_start(void);
void acpi_ec_block_transactions(void); void acpi_ec_block_transactions(void);
void acpi_ec_unblock_transactions(void); void acpi_ec_unblock_transactions(void);
int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,

View file

@ -1046,7 +1046,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
fwnode_for_each_child_node(fwnode, child) { fwnode_for_each_child_node(fwnode, child) {
u32 nr; u32 nr;
if (!fwnode_property_read_u32(fwnode, prop_name, &nr)) if (fwnode_property_read_u32(child, prop_name, &nr))
continue; continue;
if (val == nr) if (val == nr)

View file

@ -2085,7 +2085,6 @@ int __init acpi_scan_init(void)
acpi_gpe_apply_masked_gpes(); acpi_gpe_apply_masked_gpes();
acpi_update_all_gpes(); acpi_update_all_gpes();
acpi_ec_ecdt_start();
acpi_scan_initialized = true; acpi_scan_initialized = true;

View file

@ -2200,8 +2200,12 @@ static void binder_transaction(struct binder_proc *proc,
list_add_tail(&t->work.entry, target_list); list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo); list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait) if (target_wait) {
wake_up_interruptible(target_wait); if (reply || !(t->flags & TF_ONE_WAY))
wake_up_interruptible_sync(target_wait);
else
wake_up_interruptible(target_wait);
}
return; return;
err_translate_failed: err_translate_failed:
@ -3247,10 +3251,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/*pr_info("binder_ioctl: %d:%d %x %lx\n", /*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/ proc->pid, current->pid, cmd, arg);*/
if (unlikely(current->mm != proc->vma_vm_mm)) {
pr_err("current mm mismatch proc mm\n");
return -EINVAL;
}
trace_binder_ioctl(cmd, arg); trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@ -3362,7 +3362,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
const char *failure_string; const char *failure_string;
struct binder_buffer *buffer; struct binder_buffer *buffer;
if (proc->tsk != current) if (proc->tsk != current->group_leader)
return -EINVAL; return -EINVAL;
if ((vma->vm_end - vma->vm_start) > SZ_4M) if ((vma->vm_end - vma->vm_start) > SZ_4M)
@ -3464,9 +3464,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc = kzalloc(sizeof(*proc), GFP_KERNEL); proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL) if (proc == NULL)
return -ENOMEM; return -ENOMEM;
get_task_struct(current); get_task_struct(current->group_leader);
proc->tsk = current; proc->tsk = current->group_leader;
proc->vma_vm_mm = current->mm;
INIT_LIST_HEAD(&proc->todo); INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->wait); init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current); proc->default_priority = task_nice(current);

View file

@ -839,8 +839,6 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
mode_info = info->mode_info; mode_info = info->mode_info;
if (mode_info) { if (mode_info) {
/* if the displays are off, vblank time is max */
mode_info->vblank_time_us = 0xffffffff;
/* always set the reference clock */ /* always set the reference clock */
mode_info->ref_clock = adev->clock.spll.reference_freq; mode_info->ref_clock = adev->clock.spll.reference_freq;
} }

View file

@ -1581,6 +1581,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
if (config->funcs->atomic_check) if (config->funcs->atomic_check)
ret = config->funcs->atomic_check(state->dev, state); ret = config->funcs->atomic_check(state->dev, state);
if (ret)
return ret;
if (!state->allow_modeset) { if (!state->allow_modeset) {
for_each_new_crtc_in_state(state, crtc, crtc_state, i) { for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state)) { if (drm_atomic_crtc_needs_modeset(crtc_state)) {
@ -1591,7 +1594,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
} }
} }
return ret; return 0;
} }
EXPORT_SYMBOL(drm_atomic_check_only); EXPORT_SYMBOL(drm_atomic_check_only);
@ -2093,10 +2096,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_atomic_state *state; struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx; struct drm_modeset_acquire_ctx ctx;
struct drm_plane *plane; struct drm_plane *plane;
struct drm_out_fence_state *fence_state = NULL; struct drm_out_fence_state *fence_state;
unsigned plane_mask; unsigned plane_mask;
int ret = 0; int ret = 0;
unsigned int i, j, num_fences = 0; unsigned int i, j, num_fences;
/* disallow for drivers not supporting atomic: */ /* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@ -2137,6 +2140,8 @@ retry:
plane_mask = 0; plane_mask = 0;
copied_objs = 0; copied_objs = 0;
copied_props = 0; copied_props = 0;
fence_state = NULL;
num_fences = 0;
for (i = 0; i < arg->count_objs; i++) { for (i = 0; i < arg->count_objs; i++) {
uint32_t obj_id, count_props; uint32_t obj_id, count_props;

View file

@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr; struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
if (drm_core_check_feature(dev, DRIVER_PRIME)) if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_gem_remove_prime_handles(obj, file_priv); drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv); drm_vma_node_revoke(&obj->vma_node, file_priv);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
drm_gem_object_handle_put_unlocked(obj); drm_gem_object_handle_put_unlocked(obj);
return 0; return 0;

View file

@ -601,6 +601,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
crtc = drm_crtc_find(dev, plane_req->crtc_id); crtc = drm_crtc_find(dev, plane_req->crtc_id);
if (!crtc) { if (!crtc) {
drm_framebuffer_put(fb);
DRM_DEBUG_KMS("Unknown crtc ID %d\n", DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id); plane_req->crtc_id);
return -ENOENT; return -ENOENT;

View file

@ -2754,7 +2754,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
unmap_src: unmap_src:
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
put_obj: put_obj:
i915_gem_object_put(wa_ctx->indirect_ctx.obj); i915_gem_object_put(obj);
return ret; return ret;
} }

View file

@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
uint8_t aux_channel, ddc_pin; uint8_t aux_channel, ddc_pin;
/* Each DDI port can have more than one value on the "DVO Port" field, /* Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port and abort if more * so look for all the possible values for each port.
* than one is found. */ */
int dvo_ports[][3] = { int dvo_ports[][3] = {
{DVO_PORT_HDMIA, DVO_PORT_DPA, -1}, {DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
{DVO_PORT_HDMIB, DVO_PORT_DPB, -1}, {DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
}; };
/* Find the child device to use, abort if more than one found. */ /*
* Find the first child device to reference the port, report if more
* than one found.
*/
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
it = dev_priv->vbt.child_dev + i; it = dev_priv->vbt.child_dev + i;
@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (it->common.dvo_port == dvo_ports[port][j]) { if (it->common.dvo_port == dvo_ports[port][j]) {
if (child) { if (child) {
DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n", DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
port_name(port)); port_name(port));
return; } else {
child = it;
} }
child = it;
} }
} }
} }

View file

@ -25,12 +25,20 @@
#include "sun4i_framebuffer.h" #include "sun4i_framebuffer.h"
#include "sun4i_tcon.h" #include "sun4i_tcon.h"
static void sun4i_drv_lastclose(struct drm_device *dev)
{
struct sun4i_drv *drv = dev->dev_private;
drm_fbdev_cma_restore_mode(drv->fbdev);
}
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops); DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
static struct drm_driver sun4i_drv_driver = { static struct drm_driver sun4i_drv_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
/* Generic Operations */ /* Generic Operations */
.lastclose = sun4i_drv_lastclose,
.fops = &sun4i_drv_fops, .fops = &sun4i_drv_fops,
.name = "sun4i-drm", .name = "sun4i-drm",
.desc = "Allwinner sun4i Display Engine", .desc = "Allwinner sun4i Display Engine",

View file

@ -392,7 +392,7 @@ static void dw_i2c_plat_complete(struct device *dev)
#endif #endif
#ifdef CONFIG_PM #ifdef CONFIG_PM
static int dw_i2c_plat_suspend(struct device *dev) static int dw_i2c_plat_runtime_suspend(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
@ -414,11 +414,21 @@ static int dw_i2c_plat_resume(struct device *dev)
return 0; return 0;
} }
#ifdef CONFIG_PM_SLEEP
static int dw_i2c_plat_suspend(struct device *dev)
{
pm_runtime_resume(dev);
return dw_i2c_plat_runtime_suspend(dev);
}
#endif
static const struct dev_pm_ops dw_i2c_dev_pm_ops = { static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
.prepare = dw_i2c_plat_prepare, .prepare = dw_i2c_plat_prepare,
.complete = dw_i2c_plat_complete, .complete = dw_i2c_plat_complete,
SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume) SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL) SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
dw_i2c_plat_resume,
NULL)
}; };
#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops) #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)

View file

@ -36,8 +36,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
s32 poll_value = 0; s32 poll_value = 0;
if (state) { if (state) {
if (!atomic_read(&st->user_requested_state))
return 0;
if (sensor_hub_device_open(st->hsdev)) if (sensor_hub_device_open(st->hsdev))
return -EIO; return -EIO;
@ -86,6 +84,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
&report_val); &report_val);
} }
pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
st->pdev->name, state_val, report_val);
sensor_hub_get_feature(st->hsdev, st->power_state.report_id, sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
st->power_state.index, st->power_state.index,
sizeof(state_val), &state_val); sizeof(state_val), &state_val);
@ -107,6 +108,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
ret = pm_runtime_get_sync(&st->pdev->dev); ret = pm_runtime_get_sync(&st->pdev->dev);
else { else {
pm_runtime_mark_last_busy(&st->pdev->dev); pm_runtime_mark_last_busy(&st->pdev->dev);
pm_runtime_use_autosuspend(&st->pdev->dev);
ret = pm_runtime_put_autosuspend(&st->pdev->dev); ret = pm_runtime_put_autosuspend(&st->pdev->dev);
} }
if (ret < 0) { if (ret < 0) {
@ -205,8 +207,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
/* Default to 3 seconds, but can be changed from sysfs */ /* Default to 3 seconds, but can be changed from sysfs */
pm_runtime_set_autosuspend_delay(&attrb->pdev->dev, pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
3000); 3000);
pm_runtime_use_autosuspend(&attrb->pdev->dev);
return ret; return ret;
error_unreg_trigger: error_unreg_trigger:
iio_trigger_unregister(trig); iio_trigger_unregister(trig);

View file

@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.gyro_max_val = IIO_RAD_TO_DEGREE(22500), .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
.gyro_max_scale = 450, .gyro_max_scale = 450,
.accel_max_val = IIO_M_S_2_TO_G(12500), .accel_max_val = IIO_M_S_2_TO_G(12500),
.accel_max_scale = 5, .accel_max_scale = 10,
}, },
[ADIS16485] = { [ADIS16485] = {
.channels = adis16485_channels, .channels = adis16485_channels,

View file

@ -356,9 +356,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
.drdy_irq = { .drdy_irq = {
.addr = 0x62, .addr = 0x62,
.mask_int1 = 0x01, .mask_int1 = 0x01,
.addr_ihl = 0x63, .addr_stat_drdy = 0x67,
.mask_ihl = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
}, },
.multi_read_bit = false, .multi_read_bit = false,
.bootime = 2, .bootime = 2,

View file

@ -1015,7 +1015,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
cq->uobject = &obj->uobject; cq->uobject = &obj->uobject;
cq->comp_handler = ib_uverbs_comp_handler; cq->comp_handler = ib_uverbs_comp_handler;
cq->event_handler = ib_uverbs_cq_event_handler; cq->event_handler = ib_uverbs_cq_event_handler;
cq->cq_context = &ev_file->ev_queue; cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
atomic_set(&cq->usecnt, 0); atomic_set(&cq->usecnt, 0);
obj->uobject.object = cq; obj->uobject.object = cq;

View file

@ -1215,14 +1215,24 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
case SS4_PACKET_ID_TWO: case SS4_PACKET_ID_TWO:
if (priv->flags & ALPS_BUTTONPAD) { if (priv->flags & ALPS_BUTTONPAD) {
f->mt[0].x = SS4_BTL_MF_X_V2(p, 0); if (IS_SS4PLUS_DEV(priv->dev_id)) {
f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
} else {
f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
}
f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0); f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0);
f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1); f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1);
} else { } else {
f->mt[0].x = SS4_STD_MF_X_V2(p, 0); if (IS_SS4PLUS_DEV(priv->dev_id)) {
f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
} else {
f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
}
f->mt[0].y = SS4_STD_MF_Y_V2(p, 0); f->mt[0].y = SS4_STD_MF_Y_V2(p, 0);
f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
f->mt[1].y = SS4_STD_MF_Y_V2(p, 1); f->mt[1].y = SS4_STD_MF_Y_V2(p, 1);
} }
f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0; f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0;
@ -1239,16 +1249,27 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
case SS4_PACKET_ID_MULTI: case SS4_PACKET_ID_MULTI:
if (priv->flags & ALPS_BUTTONPAD) { if (priv->flags & ALPS_BUTTONPAD) {
f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); if (IS_SS4PLUS_DEV(priv->dev_id)) {
f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
} else {
f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
}
f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0); f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1); f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
no_data_x = SS4_MFPACKET_NO_AX_BL; no_data_x = SS4_MFPACKET_NO_AX_BL;
no_data_y = SS4_MFPACKET_NO_AY_BL; no_data_y = SS4_MFPACKET_NO_AY_BL;
} else { } else {
f->mt[2].x = SS4_STD_MF_X_V2(p, 0); if (IS_SS4PLUS_DEV(priv->dev_id)) {
f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
} else {
f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
}
f->mt[2].y = SS4_STD_MF_Y_V2(p, 0); f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
f->mt[3].y = SS4_STD_MF_Y_V2(p, 1); f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
no_data_x = SS4_MFPACKET_NO_AX; no_data_x = SS4_MFPACKET_NO_AX;
no_data_y = SS4_MFPACKET_NO_AY; no_data_y = SS4_MFPACKET_NO_AY;
@ -2541,8 +2562,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
memset(otp, 0, sizeof(otp)); memset(otp, 0, sizeof(otp));
if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) || if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) ||
alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0])) alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]))
return -1; return -1;
alps_update_device_area_ss4_v2(otp, priv); alps_update_device_area_ss4_v2(otp, priv);

View file

@ -100,6 +100,10 @@ enum SS4_PACKET_ID {
((_b[1 + _i * 3] << 5) & 0x1F00) \ ((_b[1 + _i * 3] << 5) & 0x1F00) \
) )
#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \
((_b[1 + (_i) * 3] << 4) & 0x0F80) \
)
#define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \ #define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \
((_b[2 + (_i) * 3] << 5) & 0x01E0) | \ ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \
((_b[2 + (_i) * 3] << 4) & 0x0E00) \ ((_b[2 + (_i) * 3] << 4) & 0x0E00) \
@ -109,6 +113,10 @@ enum SS4_PACKET_ID {
((_b[0 + (_i) * 3] >> 3) & 0x0010) \ ((_b[0 + (_i) * 3] >> 3) & 0x0010) \
) )
#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) | \
((_b[0 + (_i) * 3] >> 4) & 0x0008) \
)
#define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \ #define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \
((_b[0 + (_i) * 3] >> 3) & 0x0008) \ ((_b[0 + (_i) * 3] >> 3) & 0x0008) \
) )

View file

@ -1223,6 +1223,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 }, { "ELAN0000", 0 },
{ "ELAN0100", 0 }, { "ELAN0100", 0 },
{ "ELAN0600", 0 }, { "ELAN0600", 0 },
{ "ELAN0602", 0 },
{ "ELAN0605", 0 }, { "ELAN0605", 0 },
{ "ELAN0608", 0 }, { "ELAN0608", 0 },
{ "ELAN0605", 0 }, { "ELAN0605", 0 },

View file

@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir
if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
return -1; return -1;
if (param[0] != TP_MAGIC_IDENT) /* add new TP ID. */
if (!(param[0] & TP_MAGIC_IDENT))
return -1; return -1;
if (firmware_id) if (firmware_id)

View file

@ -21,8 +21,9 @@
#define TP_COMMAND 0xE2 /* Commands start with this */ #define TP_COMMAND 0xE2 /* Commands start with this */
#define TP_READ_ID 0xE1 /* Sent for device identification */ #define TP_READ_ID 0xE1 /* Sent for device identification */
#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */ #define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
/* by the firmware ID */ /* by the firmware ID */
/* Firmware ID includes 0x1, 0x2, 0x3 */
/* /*

View file

@ -571,7 +571,9 @@ struct amd_iommu {
static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
{ {
return container_of(dev, struct amd_iommu, iommu.dev); struct iommu_device *iommu = dev_to_iommu_device(dev);
return container_of(iommu, struct amd_iommu, iommu);
} }
#define ACPIHID_UID_LEN 256 #define ACPIHID_UID_LEN 256

View file

@ -4749,7 +4749,9 @@ static void intel_disable_iommus(void)
static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{ {
return container_of(dev, struct intel_iommu, iommu.dev); struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
return container_of(iommu_dev, struct intel_iommu, iommu);
} }
static ssize_t intel_iommu_show_version(struct device *dev, static ssize_t intel_iommu_show_version(struct device *dev,

View file

@ -62,32 +62,40 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
va_list vargs; va_list vargs;
int ret; int ret;
device_initialize(&iommu->dev); iommu->dev = kzalloc(sizeof(*iommu->dev), GFP_KERNEL);
if (!iommu->dev)
return -ENOMEM;
iommu->dev.class = &iommu_class; device_initialize(iommu->dev);
iommu->dev.parent = parent;
iommu->dev.groups = groups; iommu->dev->class = &iommu_class;
iommu->dev->parent = parent;
iommu->dev->groups = groups;
va_start(vargs, fmt); va_start(vargs, fmt);
ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs); ret = kobject_set_name_vargs(&iommu->dev->kobj, fmt, vargs);
va_end(vargs); va_end(vargs);
if (ret) if (ret)
goto error; goto error;
ret = device_add(&iommu->dev); ret = device_add(iommu->dev);
if (ret) if (ret)
goto error; goto error;
dev_set_drvdata(iommu->dev, iommu);
return 0; return 0;
error: error:
put_device(&iommu->dev); put_device(iommu->dev);
return ret; return ret;
} }
void iommu_device_sysfs_remove(struct iommu_device *iommu) void iommu_device_sysfs_remove(struct iommu_device *iommu)
{ {
device_unregister(&iommu->dev); dev_set_drvdata(iommu->dev, NULL);
device_unregister(iommu->dev);
iommu->dev = NULL;
} }
/* /*
* IOMMU drivers can indicate a device is managed by a given IOMMU using * IOMMU drivers can indicate a device is managed by a given IOMMU using
@ -102,14 +110,14 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
if (!iommu || IS_ERR(iommu)) if (!iommu || IS_ERR(iommu))
return -ENODEV; return -ENODEV;
ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices", ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices",
&link->kobj, dev_name(link)); &link->kobj, dev_name(link));
if (ret) if (ret)
return ret; return ret;
ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu"); ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev->kobj, "iommu");
if (ret) if (ret)
sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", sysfs_remove_link_from_group(&iommu->dev->kobj, "devices",
dev_name(link)); dev_name(link));
return ret; return ret;
@ -121,5 +129,5 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
return; return;
sysfs_remove_link(&link->kobj, "iommu"); sysfs_remove_link(&link->kobj, "iommu");
sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link)); sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
} }

View file

@ -1569,7 +1569,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
new_slave->delay = 0; new_slave->delay = 0;
new_slave->link_failure_count = 0; new_slave->link_failure_count = 0;
if (bond_update_speed_duplex(new_slave)) if (bond_update_speed_duplex(new_slave) &&
bond_needs_speed_duplex(bond))
new_slave->link = BOND_LINK_DOWN; new_slave->link = BOND_LINK_DOWN;
new_slave->last_rx = jiffies - new_slave->last_rx = jiffies -
@ -2137,11 +2138,13 @@ static void bond_miimon_commit(struct bonding *bond)
continue; continue;
case BOND_LINK_UP: case BOND_LINK_UP:
if (bond_update_speed_duplex(slave)) { if (bond_update_speed_duplex(slave) &&
bond_needs_speed_duplex(bond)) {
slave->link = BOND_LINK_DOWN; slave->link = BOND_LINK_DOWN;
netdev_warn(bond->dev, if (net_ratelimit())
"failed to get link speed/duplex for %s\n", netdev_warn(bond->dev,
slave->dev->name); "failed to get link speed/duplex for %s\n",
slave->dev->name);
continue; continue;
} }
bond_set_slave_link_state(slave, BOND_LINK_UP, bond_set_slave_link_state(slave, BOND_LINK_UP,

View file

@ -430,7 +430,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
/* Virtual PCI function needs to determine UAR page size from /* Virtual PCI function needs to determine UAR page size from
* firmware. Only master PCI function can set the uar page size * firmware. Only master PCI function can set the uar page size
*/ */
if (enable_4k_uar) if (enable_4k_uar || !dev->persist->num_vfs)
dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
else else
dev->uar_page_shift = PAGE_SHIFT; dev->uar_page_shift = PAGE_SHIFT;
@ -2275,7 +2275,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
if (enable_4k_uar) { if (enable_4k_uar || !dev->persist->num_vfs) {
init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;

View file

@ -881,8 +881,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
err_unmap: err_unmap:
--f; while (--f >= 0) {
while (f >= 0) {
frag = &skb_shinfo(skb)->frags[f]; frag = &skb_shinfo(skb)->frags[f];
dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE); skb_frag_size(frag), DMA_TO_DEVICE);

View file

@ -1876,6 +1876,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
err_detach: err_detach:
tun_detach_all(dev); tun_detach_all(dev);
/* register_netdevice() already called tun_free_netdev() */
goto err_free_dev;
err_free_flow: err_free_flow:
tun_flow_uninit(tun); tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security); security_tun_dev_free_security(tun->security);

View file

@ -920,10 +920,8 @@ out1:
ntb_free_mw(nt, i); ntb_free_mw(nt, i);
/* if there's an actual failure, we should just bail */ /* if there's an actual failure, we should just bail */
if (rc < 0) { if (rc < 0)
ntb_link_disable(ndev);
return; return;
}
out: out:
if (ntb_link_is_up(ndev, NULL, NULL) == 1) if (ntb_link_is_up(ndev, NULL, NULL) == 1)

View file

@ -45,6 +45,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
{} /* Terminating entry */ {} /* Terminating entry */
}; };

View file

@ -107,6 +107,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
{ {
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
const char *name = dev_name(&vp_dev->vdev.dev); const char *name = dev_name(&vp_dev->vdev.dev);
unsigned flags = PCI_IRQ_MSIX;
unsigned i, v; unsigned i, v;
int err = -ENOMEM; int err = -ENOMEM;
@ -126,10 +127,13 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
GFP_KERNEL)) GFP_KERNEL))
goto error; goto error;
if (desc) {
flags |= PCI_IRQ_AFFINITY;
desc->pre_vectors++; /* virtio config vector */
}
err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
nvectors, PCI_IRQ_MSIX | nvectors, flags, desc);
(desc ? PCI_IRQ_AFFINITY : 0),
desc);
if (err < 0) if (err < 0)
goto error; goto error;
vp_dev->msix_enabled = 1; vp_dev->msix_enabled = 1;

View file

@ -194,15 +194,20 @@ cifs_bp_rename_retry:
} }
/* /*
* Don't allow path components longer than the server max.
* Don't allow the separator character in a path component. * Don't allow the separator character in a path component.
* The VFS will not allow "/", but "\" is allowed by posix. * The VFS will not allow "/", but "\" is allowed by posix.
*/ */
static int static int
check_name(struct dentry *direntry) check_name(struct dentry *direntry, struct cifs_tcon *tcon)
{ {
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
int i; int i;
if (unlikely(direntry->d_name.len >
tcon->fsAttrInfo.MaxPathNameComponentLength))
return -ENAMETOOLONG;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
for (i = 0; i < direntry->d_name.len; i++) { for (i = 0; i < direntry->d_name.len; i++) {
if (direntry->d_name.name[i] == '\\') { if (direntry->d_name.name[i] == '\\') {
@ -500,10 +505,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
return finish_no_open(file, res); return finish_no_open(file, res);
} }
rc = check_name(direntry);
if (rc)
return rc;
xid = get_xid(); xid = get_xid();
cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n", cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
@ -516,6 +517,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
} }
tcon = tlink_tcon(tlink); tcon = tlink_tcon(tlink);
rc = check_name(direntry, tcon);
if (rc)
goto out_free_xid;
server = tcon->ses->server; server = tcon->ses->server;
if (server->ops->new_lease_key) if (server->ops->new_lease_key)
@ -776,7 +782,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
} }
pTcon = tlink_tcon(tlink); pTcon = tlink_tcon(tlink);
rc = check_name(direntry); rc = check_name(direntry, pTcon);
if (rc) if (rc)
goto lookup_out; goto lookup_out;

View file

@ -3195,8 +3195,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits); kst->f_bfree = kst->f_bavail =
kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
return; return;
} }

View file

@ -1380,6 +1380,16 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
/*
* Make sure that the faulting address's PMD offset (color) matches
* the PMD offset from the start of the file. This is necessary so
* that a PMD range in the page table overlaps exactly with a PMD
* range in the radix tree.
*/
if ((vmf->pgoff & PG_PMD_COLOUR) !=
((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
goto fallback;
/* Fall back to PTEs if we're going to COW */ /* Fall back to PTEs if we're going to COW */
if (write && !(vma->vm_flags & VM_SHARED)) if (write && !(vma->vm_flags & VM_SHARED))
goto fallback; goto fallback;

View file

@ -144,7 +144,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp)
argp->p = page_address(argp->pagelist[0]); argp->p = page_address(argp->pagelist[0]);
argp->pagelist++; argp->pagelist++;
if (argp->pagelen < PAGE_SIZE) { if (argp->pagelen < PAGE_SIZE) {
argp->end = argp->p + (argp->pagelen>>2); argp->end = argp->p + XDR_QUADLEN(argp->pagelen);
argp->pagelen = 0; argp->pagelen = 0;
} else { } else {
argp->end = argp->p + (PAGE_SIZE>>2); argp->end = argp->p + (PAGE_SIZE>>2);
@ -1279,9 +1279,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
argp->pagelen -= pages * PAGE_SIZE; argp->pagelen -= pages * PAGE_SIZE;
len -= pages * PAGE_SIZE; len -= pages * PAGE_SIZE;
argp->p = (__be32 *)page_address(argp->pagelist[0]); next_decode_page(argp);
argp->pagelist++;
argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
} }
argp->p += XDR_QUADLEN(len); argp->p += XDR_QUADLEN(len);

View file

@ -59,6 +59,22 @@
/* Align . to a 8 byte boundary equals to maximum function alignment. */ /* Align . to a 8 byte boundary equals to maximum function alignment. */
#define ALIGN_FUNCTION() . = ALIGN(8) #define ALIGN_FUNCTION() . = ALIGN(8)
/*
* LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
* generates .data.identifier sections, which need to be pulled in with
* .data. We don't want to pull in .data..other sections, which Linux
* has defined. Same for text and bss.
*/
#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
#else
#define TEXT_MAIN .text
#define DATA_MAIN .data
#define BSS_MAIN .bss
#endif
/* /*
* Align to a 32 byte boundary equal to the * Align to a 32 byte boundary equal to the
* alignment gcc 4.5 uses for a struct * alignment gcc 4.5 uses for a struct
@ -199,12 +215,9 @@
/* /*
* .data section * .data section
* LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates
* .data.identifier which needs to be pulled in with .data, but don't want to
* pull in .data..stuff which has its own requirements. Same for bss.
*/ */
#define DATA_DATA \ #define DATA_DATA \
*(.data .data.[0-9a-zA-Z_]*) \ *(DATA_MAIN) \
*(.ref.data) \ *(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \ *(.data..shared_aligned) /* percpu related */ \
MEM_KEEP(init.data) \ MEM_KEEP(init.data) \
@ -435,16 +448,17 @@
VMLINUX_SYMBOL(__security_initcall_end) = .; \ VMLINUX_SYMBOL(__security_initcall_end) = .; \
} }
/* .text section. Map to function alignment to avoid address changes /*
* .text section. Map to function alignment to avoid address changes
* during second ld run in second ld pass when generating System.map * during second ld run in second ld pass when generating System.map
* LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates *
* .text.identifier which needs to be pulled in with .text , but some * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
* architectures define .text.foo which is not intended to be pulled in here. * code elimination is enabled, so these sections should be converted
* Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have * to use ".." first.
* conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */ */
#define TEXT_TEXT \ #define TEXT_TEXT \
ALIGN_FUNCTION(); \ ALIGN_FUNCTION(); \
*(.text.hot .text .text.fixup .text.unlikely) \ *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
*(.ref.text) \ *(.ref.text) \
MEM_KEEP(init.text) \ MEM_KEEP(init.text) \
MEM_KEEP(exit.text) \ MEM_KEEP(exit.text) \
@ -613,7 +627,7 @@
BSS_FIRST_SECTIONS \ BSS_FIRST_SECTIONS \
*(.bss..page_aligned) \ *(.bss..page_aligned) \
*(.dynbss) \ *(.dynbss) \
*(.bss .bss.[0-9a-zA-Z_]*) \ *(BSS_MAIN) \
*(COMMON) \ *(COMMON) \
} }

View file

@ -43,6 +43,7 @@ struct bpf_reg_state {
u32 min_align; u32 min_align;
u32 aux_off; u32 aux_off;
u32 aux_off_align; u32 aux_off_align;
bool value_from_signed;
}; };
enum bpf_stack_slot_type { enum bpf_stack_slot_type {

View file

@ -891,9 +891,9 @@ static inline struct file *get_file(struct file *f)
/* Page cache limit. The filesystems should put that into their s_maxbytes /* Page cache limit. The filesystems should put that into their s_maxbytes
limits, otherwise bad things can happen in VM. */ limits, otherwise bad things can happen in VM. */
#if BITS_PER_LONG==32 #if BITS_PER_LONG==32
#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1) #define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
#elif BITS_PER_LONG==64 #elif BITS_PER_LONG==64
#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) #define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
#endif #endif
#define FL_POSIX 1 #define FL_POSIX 1

View file

@ -240,7 +240,7 @@ struct iommu_device {
struct list_head list; struct list_head list;
const struct iommu_ops *ops; const struct iommu_ops *ops;
struct fwnode_handle *fwnode; struct fwnode_handle *fwnode;
struct device dev; struct device *dev;
}; };
int iommu_device_register(struct iommu_device *iommu); int iommu_device_register(struct iommu_device *iommu);
@ -265,6 +265,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
iommu->fwnode = fwnode; iommu->fwnode = fwnode;
} }
static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
{
return (struct iommu_device *)dev_get_drvdata(dev);
}
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
@ -589,6 +594,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
{ {
} }
static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
{
return NULL;
}
static inline void iommu_device_unregister(struct iommu_device *iommu) static inline void iommu_device_unregister(struct iommu_device *iommu)
{ {
} }

View file

@ -371,9 +371,9 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
__PTR_RING_PEEK_CALL_v; \ __PTR_RING_PEEK_CALL_v; \
}) })
static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp) static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
{ {
return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp); return kcalloc(size, sizeof(void *), gfp);
} }
static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
@ -462,7 +462,8 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
* In particular if you consume ring in interrupt or BH context, you must * In particular if you consume ring in interrupt or BH context, you must
* disable interrupts/BH when doing so. * disable interrupts/BH when doing so.
*/ */
static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
unsigned int nrings,
int size, int size,
gfp_t gfp, void (*destroy)(void *)) gfp_t gfp, void (*destroy)(void *))
{ {
@ -470,7 +471,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
void ***queues; void ***queues;
int i; int i;
queues = kmalloc(nrings * sizeof *queues, gfp); queues = kmalloc_array(nrings, sizeof(*queues), gfp);
if (!queues) if (!queues)
goto noqueues; goto noqueues;

View file

@ -162,7 +162,8 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
} }
static inline int skb_array_resize_multiple(struct skb_array **rings, static inline int skb_array_resize_multiple(struct skb_array **rings,
int nrings, int size, gfp_t gfp) int nrings, unsigned int size,
gfp_t gfp)
{ {
BUILD_BUG_ON(offsetof(struct skb_array, ring)); BUILD_BUG_ON(offsetof(struct skb_array, ring));
return ptr_ring_resize_multiple((struct ptr_ring **)rings, return ptr_ring_resize_multiple((struct ptr_ring **)rings,

View file

@ -277,6 +277,11 @@ static inline bool bond_is_lb(const struct bonding *bond)
BOND_MODE(bond) == BOND_MODE_ALB; BOND_MODE(bond) == BOND_MODE_ALB;
} }
static inline bool bond_needs_speed_duplex(const struct bonding *bond)
{
return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond);
}
static inline bool bond_is_nondyn_tlb(const struct bonding *bond) static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
{ {
return (BOND_MODE(bond) == BOND_MODE_TLB) && return (BOND_MODE(bond) == BOND_MODE_TLB) &&

View file

@ -352,7 +352,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
!forwarding) !forwarding)
return dst_mtu(dst); return dst_mtu(dst);
return min(dst->dev->mtu, IP_MAX_MTU); return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
} }
static inline unsigned int ip_skb_dst_mtu(struct sock *sk, static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
@ -364,7 +364,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
} }
return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU); return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
} }
u32 ip_idents_reserve(u32 hash, int segs); u32 ip_idents_reserve(u32 hash, int segs);

View file

@ -785,8 +785,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
old = *pold; old = *pold;
*pold = new; *pold = new;
if (old != NULL) { if (old != NULL) {
qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); unsigned int qlen = old->q.qlen;
unsigned int backlog = old->qstats.backlog;
qdisc_reset(old); qdisc_reset(old);
qdisc_tree_reduce_backlog(old, qlen, backlog);
} }
sch_tree_unlock(sch); sch_tree_unlock(sch);

View file

@ -504,6 +504,7 @@ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
{ {
regs[regno].min_value = BPF_REGISTER_MIN_RANGE; regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
regs[regno].max_value = BPF_REGISTER_MAX_RANGE; regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
regs[regno].value_from_signed = false;
regs[regno].min_align = 0; regs[regno].min_align = 0;
} }
@ -777,12 +778,13 @@ static int check_ctx_access(struct bpf_verifier_env *env, int off, int size,
return -EACCES; return -EACCES;
} }
static bool is_pointer_value(struct bpf_verifier_env *env, int regno) static bool __is_pointer_value(bool allow_ptr_leaks,
const struct bpf_reg_state *reg)
{ {
if (env->allow_ptr_leaks) if (allow_ptr_leaks)
return false; return false;
switch (env->cur_state.regs[regno].type) { switch (reg->type) {
case UNKNOWN_VALUE: case UNKNOWN_VALUE:
case CONST_IMM: case CONST_IMM:
return false; return false;
@ -791,6 +793,11 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
} }
} }
static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
{
return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
}
static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
int off, int size, bool strict) int off, int size, bool strict)
{ {
@ -1650,6 +1657,65 @@ static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn)
return 0; return 0;
} }
static int evaluate_reg_imm_alu_unknown(struct bpf_verifier_env *env,
struct bpf_insn *insn)
{
struct bpf_reg_state *regs = env->cur_state.regs;
struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
struct bpf_reg_state *src_reg = &regs[insn->src_reg];
u8 opcode = BPF_OP(insn->code);
s64 imm_log2 = __ilog2_u64((long long)dst_reg->imm);
/* BPF_X code with src_reg->type UNKNOWN_VALUE here. */
if (src_reg->imm > 0 && dst_reg->imm) {
switch (opcode) {
case BPF_ADD:
/* dreg += sreg
* where both have zero upper bits. Adding them
* can only result making one more bit non-zero
* in the larger value.
* Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47)
* 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47)
*/
dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
dst_reg->imm--;
break;
case BPF_AND:
/* dreg &= sreg
* AND can not extend zero bits only shrink
* Ex. 0x00..00ffffff
* & 0x0f..ffffffff
* ----------------
* 0x00..00ffffff
*/
dst_reg->imm = max(src_reg->imm, 63 - imm_log2);
break;
case BPF_OR:
/* dreg |= sreg
* OR can only extend zero bits
* Ex. 0x00..00ffffff
* | 0x0f..ffffffff
* ----------------
* 0x0f..00ffffff
*/
dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
break;
case BPF_SUB:
case BPF_MUL:
case BPF_RSH:
case BPF_LSH:
/* These may be flushed out later */
default:
mark_reg_unknown_value(regs, insn->dst_reg);
}
} else {
mark_reg_unknown_value(regs, insn->dst_reg);
}
dst_reg->type = UNKNOWN_VALUE;
return 0;
}
static int evaluate_reg_imm_alu(struct bpf_verifier_env *env, static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn) struct bpf_insn *insn)
{ {
@ -1659,6 +1725,9 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
u8 opcode = BPF_OP(insn->code); u8 opcode = BPF_OP(insn->code);
u64 dst_imm = dst_reg->imm; u64 dst_imm = dst_reg->imm;
if (BPF_SRC(insn->code) == BPF_X && src_reg->type == UNKNOWN_VALUE)
return evaluate_reg_imm_alu_unknown(env, insn);
/* dst_reg->type == CONST_IMM here. Simulate execution of insns /* dst_reg->type == CONST_IMM here. Simulate execution of insns
* containing ALU ops. Don't care about overflow or negative * containing ALU ops. Don't care about overflow or negative
* values, just add/sub/... them; registers are in u64. * values, just add/sub/... them; registers are in u64.
@ -1763,10 +1832,24 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
dst_align = dst_reg->min_align; dst_align = dst_reg->min_align;
/* We don't know anything about what was done to this register, mark it /* We don't know anything about what was done to this register, mark it
* as unknown. * as unknown. Also, if both derived bounds came from signed/unsigned
* mixed compares and one side is unbounded, we cannot really do anything
* with them as boundaries cannot be trusted. Thus, arithmetic of two
* regs of such kind will get invalidated bounds on the dst side.
*/ */
if (min_val == BPF_REGISTER_MIN_RANGE && if ((min_val == BPF_REGISTER_MIN_RANGE &&
max_val == BPF_REGISTER_MAX_RANGE) { max_val == BPF_REGISTER_MAX_RANGE) ||
(BPF_SRC(insn->code) == BPF_X &&
((min_val != BPF_REGISTER_MIN_RANGE &&
max_val == BPF_REGISTER_MAX_RANGE) ||
(min_val == BPF_REGISTER_MIN_RANGE &&
max_val != BPF_REGISTER_MAX_RANGE) ||
(dst_reg->min_value != BPF_REGISTER_MIN_RANGE &&
dst_reg->max_value == BPF_REGISTER_MAX_RANGE) ||
(dst_reg->min_value == BPF_REGISTER_MIN_RANGE &&
dst_reg->max_value != BPF_REGISTER_MAX_RANGE)) &&
regs[insn->dst_reg].value_from_signed !=
regs[insn->src_reg].value_from_signed)) {
reset_reg_range_values(regs, insn->dst_reg); reset_reg_range_values(regs, insn->dst_reg);
return; return;
} }
@ -1775,10 +1858,12 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
* do our normal operations to the register, we need to set the values * do our normal operations to the register, we need to set the values
* to the min/max since they are undefined. * to the min/max since they are undefined.
*/ */
if (min_val == BPF_REGISTER_MIN_RANGE) if (opcode != BPF_SUB) {
dst_reg->min_value = BPF_REGISTER_MIN_RANGE; if (min_val == BPF_REGISTER_MIN_RANGE)
if (max_val == BPF_REGISTER_MAX_RANGE) dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
dst_reg->max_value = BPF_REGISTER_MAX_RANGE; if (max_val == BPF_REGISTER_MAX_RANGE)
dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
}
switch (opcode) { switch (opcode) {
case BPF_ADD: case BPF_ADD:
@ -1789,10 +1874,17 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
dst_reg->min_align = min(src_align, dst_align); dst_reg->min_align = min(src_align, dst_align);
break; break;
case BPF_SUB: case BPF_SUB:
/* If one of our values was at the end of our ranges, then the
* _opposite_ value in the dst_reg goes to the end of our range.
*/
if (min_val == BPF_REGISTER_MIN_RANGE)
dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
if (max_val == BPF_REGISTER_MAX_RANGE)
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
dst_reg->min_value -= min_val; dst_reg->min_value -= max_val;
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
dst_reg->max_value -= max_val; dst_reg->max_value -= min_val;
dst_reg->min_align = min(src_align, dst_align); dst_reg->min_align = min(src_align, dst_align);
break; break;
case BPF_MUL: case BPF_MUL:
@ -1953,6 +2045,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
regs[insn->dst_reg].max_value = insn->imm; regs[insn->dst_reg].max_value = insn->imm;
regs[insn->dst_reg].min_value = insn->imm; regs[insn->dst_reg].min_value = insn->imm;
regs[insn->dst_reg].min_align = calc_align(insn->imm); regs[insn->dst_reg].min_align = calc_align(insn->imm);
regs[insn->dst_reg].value_from_signed = false;
} }
} else if (opcode > BPF_END) { } else if (opcode > BPF_END) {
@ -2128,40 +2221,63 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val, struct bpf_reg_state *false_reg, u64 val,
u8 opcode) u8 opcode)
{ {
bool value_from_signed = true;
bool is_range = true;
switch (opcode) { switch (opcode) {
case BPF_JEQ: case BPF_JEQ:
/* If this is false then we know nothing Jon Snow, but if it is /* If this is false then we know nothing Jon Snow, but if it is
* true then we know for sure. * true then we know for sure.
*/ */
true_reg->max_value = true_reg->min_value = val; true_reg->max_value = true_reg->min_value = val;
is_range = false;
break; break;
case BPF_JNE: case BPF_JNE:
/* If this is true we know nothing Jon Snow, but if it is false /* If this is true we know nothing Jon Snow, but if it is false
* we know the value for sure; * we know the value for sure;
*/ */
false_reg->max_value = false_reg->min_value = val; false_reg->max_value = false_reg->min_value = val;
is_range = false;
break; break;
case BPF_JGT: case BPF_JGT:
/* Unsigned comparison, the minimum value is 0. */ value_from_signed = false;
false_reg->min_value = 0;
/* fallthrough */ /* fallthrough */
case BPF_JSGT: case BPF_JSGT:
if (true_reg->value_from_signed != value_from_signed)
reset_reg_range_values(true_reg, 0);
if (false_reg->value_from_signed != value_from_signed)
reset_reg_range_values(false_reg, 0);
if (opcode == BPF_JGT) {
/* Unsigned comparison, the minimum value is 0. */
false_reg->min_value = 0;
}
/* If this is false then we know the maximum val is val, /* If this is false then we know the maximum val is val,
* otherwise we know the min val is val+1. * otherwise we know the min val is val+1.
*/ */
false_reg->max_value = val; false_reg->max_value = val;
false_reg->value_from_signed = value_from_signed;
true_reg->min_value = val + 1; true_reg->min_value = val + 1;
true_reg->value_from_signed = value_from_signed;
break; break;
case BPF_JGE: case BPF_JGE:
/* Unsigned comparison, the minimum value is 0. */ value_from_signed = false;
false_reg->min_value = 0;
/* fallthrough */ /* fallthrough */
case BPF_JSGE: case BPF_JSGE:
if (true_reg->value_from_signed != value_from_signed)
reset_reg_range_values(true_reg, 0);
if (false_reg->value_from_signed != value_from_signed)
reset_reg_range_values(false_reg, 0);
if (opcode == BPF_JGE) {
/* Unsigned comparison, the minimum value is 0. */
false_reg->min_value = 0;
}
/* If this is false then we know the maximum value is val - 1, /* If this is false then we know the maximum value is val - 1,
* otherwise we know the mimimum value is val. * otherwise we know the mimimum value is val.
*/ */
false_reg->max_value = val - 1; false_reg->max_value = val - 1;
false_reg->value_from_signed = value_from_signed;
true_reg->min_value = val; true_reg->min_value = val;
true_reg->value_from_signed = value_from_signed;
break; break;
default: default:
break; break;
@ -2169,6 +2285,12 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
check_reg_overflow(false_reg); check_reg_overflow(false_reg);
check_reg_overflow(true_reg); check_reg_overflow(true_reg);
if (is_range) {
if (__is_pointer_value(false, false_reg))
reset_reg_range_values(false_reg, 0);
if (__is_pointer_value(false, true_reg))
reset_reg_range_values(true_reg, 0);
}
} }
/* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg /* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg
@ -2178,41 +2300,64 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
struct bpf_reg_state *false_reg, u64 val, struct bpf_reg_state *false_reg, u64 val,
u8 opcode) u8 opcode)
{ {
bool value_from_signed = true;
bool is_range = true;
switch (opcode) { switch (opcode) {
case BPF_JEQ: case BPF_JEQ:
/* If this is false then we know nothing Jon Snow, but if it is /* If this is false then we know nothing Jon Snow, but if it is
* true then we know for sure. * true then we know for sure.
*/ */
true_reg->max_value = true_reg->min_value = val; true_reg->max_value = true_reg->min_value = val;
is_range = false;
break; break;
case BPF_JNE: case BPF_JNE:
/* If this is true we know nothing Jon Snow, but if it is false /* If this is true we know nothing Jon Snow, but if it is false
* we know the value for sure; * we know the value for sure;
*/ */
false_reg->max_value = false_reg->min_value = val; false_reg->max_value = false_reg->min_value = val;
is_range = false;
break; break;
case BPF_JGT: case BPF_JGT:
/* Unsigned comparison, the minimum value is 0. */ value_from_signed = false;
true_reg->min_value = 0;
/* fallthrough */ /* fallthrough */
case BPF_JSGT: case BPF_JSGT:
if (true_reg->value_from_signed != value_from_signed)
reset_reg_range_values(true_reg, 0);
if (false_reg->value_from_signed != value_from_signed)
reset_reg_range_values(false_reg, 0);
if (opcode == BPF_JGT) {
/* Unsigned comparison, the minimum value is 0. */
true_reg->min_value = 0;
}
/* /*
* If this is false, then the val is <= the register, if it is * If this is false, then the val is <= the register, if it is
* true the register <= to the val. * true the register <= to the val.
*/ */
false_reg->min_value = val; false_reg->min_value = val;
false_reg->value_from_signed = value_from_signed;
true_reg->max_value = val - 1; true_reg->max_value = val - 1;
true_reg->value_from_signed = value_from_signed;
break; break;
case BPF_JGE: case BPF_JGE:
/* Unsigned comparison, the minimum value is 0. */ value_from_signed = false;
true_reg->min_value = 0;
/* fallthrough */ /* fallthrough */
case BPF_JSGE: case BPF_JSGE:
if (true_reg->value_from_signed != value_from_signed)
reset_reg_range_values(true_reg, 0);
if (false_reg->value_from_signed != value_from_signed)
reset_reg_range_values(false_reg, 0);
if (opcode == BPF_JGE) {
/* Unsigned comparison, the minimum value is 0. */
true_reg->min_value = 0;
}
/* If this is false then constant < register, if it is true then /* If this is false then constant < register, if it is true then
* the register < constant. * the register < constant.
*/ */
false_reg->min_value = val + 1; false_reg->min_value = val + 1;
false_reg->value_from_signed = value_from_signed;
true_reg->max_value = val; true_reg->max_value = val;
true_reg->value_from_signed = value_from_signed;
break; break;
default: default:
break; break;
@ -2220,6 +2365,12 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
check_reg_overflow(false_reg); check_reg_overflow(false_reg);
check_reg_overflow(true_reg); check_reg_overflow(true_reg);
if (is_range) {
if (__is_pointer_value(false, false_reg))
reset_reg_range_values(false_reg, 0);
if (__is_pointer_value(false, true_reg))
reset_reg_range_values(true_reg, 0);
}
} }
static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,

View file

@ -9996,28 +9996,27 @@ SYSCALL_DEFINE5(perf_event_open,
goto err_context; goto err_context;
/* /*
* Do not allow to attach to a group in a different * Make sure we're both events for the same CPU;
* task or CPU context: * grouping events for different CPUs is broken; since
* you can never concurrently schedule them anyhow.
*/ */
if (move_group) { if (group_leader->cpu != event->cpu)
/* goto err_context;
* Make sure we're both on the same task, or both
* per-cpu events.
*/
if (group_leader->ctx->task != ctx->task)
goto err_context;
/* /*
* Make sure we're both events for the same CPU; * Make sure we're both on the same task, or both
* grouping events for different CPUs is broken; since * per-CPU events.
* you can never concurrently schedule them anyhow. */
*/ if (group_leader->ctx->task != ctx->task)
if (group_leader->cpu != event->cpu) goto err_context;
goto err_context;
} else { /*
if (group_leader->ctx != ctx) * Do not allow to attach to a group in a different task
goto err_context; * or CPU context. If we're moving SW events, we'll fix
} * this up later, so allow that.
*/
if (!move_group && group_leader->ctx != ctx)
goto err_context;
/* /*
* Only a group leader can be exclusive or pinned * Only a group leader can be exclusive or pinned

View file

@ -802,6 +802,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm_init_cpumask(mm); mm_init_cpumask(mm);
mm_init_aio(mm); mm_init_aio(mm);
mm_init_owner(mm, p); mm_init_owner(mm, p);
RCU_INIT_POINTER(mm->exe_file, NULL);
mmu_notifier_mm_init(mm); mmu_notifier_mm_init(mm);
clear_tlb_flush_pending(mm); clear_tlb_flush_pending(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS

View file

@ -203,6 +203,7 @@ struct timer_base {
bool migration_enabled; bool migration_enabled;
bool nohz_active; bool nohz_active;
bool is_idle; bool is_idle;
bool must_forward_clk;
DECLARE_BITMAP(pending_map, WHEEL_SIZE); DECLARE_BITMAP(pending_map, WHEEL_SIZE);
struct hlist_head vectors[WHEEL_SIZE]; struct hlist_head vectors[WHEEL_SIZE];
} ____cacheline_aligned; } ____cacheline_aligned;
@ -856,13 +857,19 @@ get_target_base(struct timer_base *base, unsigned tflags)
static inline void forward_timer_base(struct timer_base *base) static inline void forward_timer_base(struct timer_base *base)
{ {
unsigned long jnow = READ_ONCE(jiffies); unsigned long jnow;
/* /*
* We only forward the base when it's idle and we have a delta between * We only forward the base when we are idle or have just come out of
* base clock and jiffies. * idle (must_forward_clk logic), and have a delta between base clock
* and jiffies. In the common case, run_timers will take care of it.
*/ */
if (!base->is_idle || (long) (jnow - base->clk) < 2) if (likely(!base->must_forward_clk))
return;
jnow = READ_ONCE(jiffies);
base->must_forward_clk = base->is_idle;
if ((long)(jnow - base->clk) < 2)
return; return;
/* /*
@ -938,6 +945,11 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
* same array bucket then just return: * same array bucket then just return:
*/ */
if (timer_pending(timer)) { if (timer_pending(timer)) {
/*
* The downside of this optimization is that it can result in
* larger granularity than you would get from adding a new
* timer with this expiry.
*/
if (timer->expires == expires) if (timer->expires == expires)
return 1; return 1;
@ -948,6 +960,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
* dequeue/enqueue dance. * dequeue/enqueue dance.
*/ */
base = lock_timer_base(timer, &flags); base = lock_timer_base(timer, &flags);
forward_timer_base(base);
clk = base->clk; clk = base->clk;
idx = calc_wheel_index(expires, clk); idx = calc_wheel_index(expires, clk);
@ -964,6 +977,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
} }
} else { } else {
base = lock_timer_base(timer, &flags); base = lock_timer_base(timer, &flags);
forward_timer_base(base);
} }
ret = detach_if_pending(timer, base, false); ret = detach_if_pending(timer, base, false);
@ -991,12 +1005,10 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
spin_lock(&base->lock); spin_lock(&base->lock);
WRITE_ONCE(timer->flags, WRITE_ONCE(timer->flags,
(timer->flags & ~TIMER_BASEMASK) | base->cpu); (timer->flags & ~TIMER_BASEMASK) | base->cpu);
forward_timer_base(base);
} }
} }
/* Try to forward a stale timer base clock */
forward_timer_base(base);
timer->expires = expires; timer->expires = expires;
/* /*
* If 'idx' was calculated above and the base time did not advance * If 'idx' was calculated above and the base time did not advance
@ -1112,6 +1124,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
WRITE_ONCE(timer->flags, WRITE_ONCE(timer->flags,
(timer->flags & ~TIMER_BASEMASK) | cpu); (timer->flags & ~TIMER_BASEMASK) | cpu);
} }
forward_timer_base(base);
debug_activate(timer, timer->expires); debug_activate(timer, timer->expires);
internal_add_timer(base, timer); internal_add_timer(base, timer);
@ -1497,10 +1510,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
if (!is_max_delta) if (!is_max_delta)
expires = basem + (u64)(nextevt - basej) * TICK_NSEC; expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
/* /*
* If we expect to sleep more than a tick, mark the base idle: * If we expect to sleep more than a tick, mark the base idle.
* Also the tick is stopped so any added timer must forward
* the base clk itself to keep granularity small. This idle
* logic is only maintained for the BASE_STD base, deferrable
* timers may still see large granularity skew (by design).
*/ */
if ((expires - basem) > TICK_NSEC) if ((expires - basem) > TICK_NSEC) {
base->must_forward_clk = true;
base->is_idle = true; base->is_idle = true;
}
} }
spin_unlock(&base->lock); spin_unlock(&base->lock);
@ -1611,6 +1630,19 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
{ {
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
/*
* must_forward_clk must be cleared before running timers so that any
* timer functions that call mod_timer will not try to forward the
* base. idle trcking / clock forwarding logic is only used with
* BASE_STD timers.
*
* The deferrable base does not do idle tracking at all, so we do
* not forward it. This can result in very large variations in
* granularity for deferrable timers, but they can be deferred for
* long periods due to idle.
*/
base->must_forward_clk = false;
__run_timers(base); __run_timers(base);
if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));

View file

@ -203,10 +203,36 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
fmt_cnt++; fmt_cnt++;
} }
return __trace_printk(1/* fake ip will not be printed */, fmt, /* Horrid workaround for getting va_list handling working with different
mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1, * argument type combinations generically for 32 and 64 bit archs.
mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2, */
mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3); #define __BPF_TP_EMIT() __BPF_ARG3_TP()
#define __BPF_TP(...) \
__trace_printk(1 /* Fake ip will not be printed. */, \
fmt, ##__VA_ARGS__)
#define __BPF_ARG1_TP(...) \
((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
? __BPF_TP(arg1, ##__VA_ARGS__) \
: ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
? __BPF_TP((long)arg1, ##__VA_ARGS__) \
: __BPF_TP((u32)arg1, ##__VA_ARGS__)))
#define __BPF_ARG2_TP(...) \
((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
: ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
: __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
#define __BPF_ARG3_TP(...) \
((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
: ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
: __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
return __BPF_TP_EMIT();
} }
static const struct bpf_func_proto bpf_trace_printk_proto = { static const struct bpf_func_proto bpf_trace_printk_proto = {

View file

@ -878,6 +878,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace)
function_profile_call(trace->func, 0, NULL, NULL); function_profile_call(trace->func, 0, NULL, NULL);
/* If function graph is shutting down, ret_stack can be NULL */
if (!current->ret_stack)
return 0;
if (index >= 0 && index < FTRACE_RETFUNC_DEPTH) if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
current->ret_stack[index].subtime = 0; current->ret_stack[index].subtime = 0;

View file

@ -4386,15 +4386,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
* the page that was allocated, with the read page of the buffer. * the page that was allocated, with the read page of the buffer.
* *
* Returns: * Returns:
* The page allocated, or NULL on error. * The page allocated, or ERR_PTR
*/ */
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_data_page *bpage = NULL; struct buffer_data_page *bpage = NULL;
unsigned long flags; unsigned long flags;
struct page *page; struct page *page;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return ERR_PTR(-ENODEV);
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags); local_irq_save(flags);
arch_spin_lock(&cpu_buffer->lock); arch_spin_lock(&cpu_buffer->lock);
@ -4412,7 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
page = alloc_pages_node(cpu_to_node(cpu), page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_NORETRY, 0); GFP_KERNEL | __GFP_NORETRY, 0);
if (!page) if (!page)
return NULL; return ERR_PTR(-ENOMEM);
bpage = page_address(page); bpage = page_address(page);
@ -4467,8 +4471,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
* *
* for example: * for example:
* rpage = ring_buffer_alloc_read_page(buffer, cpu); * rpage = ring_buffer_alloc_read_page(buffer, cpu);
* if (!rpage) * if (IS_ERR(rpage))
* return error; * return PTR_ERR(rpage);
* ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
* if (ret >= 0) * if (ret >= 0)
* process_page(rpage, ret); * process_page(rpage, ret);

View file

@ -113,7 +113,7 @@ static enum event_status read_page(int cpu)
int i; int i;
bpage = ring_buffer_alloc_read_page(buffer, cpu); bpage = ring_buffer_alloc_read_page(buffer, cpu);
if (!bpage) if (IS_ERR(bpage))
return EVENT_DROPPED; return EVENT_DROPPED;
ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);

View file

@ -6403,7 +6403,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
{ {
struct ftrace_buffer_info *info = filp->private_data; struct ftrace_buffer_info *info = filp->private_data;
struct trace_iterator *iter = &info->iter; struct trace_iterator *iter = &info->iter;
ssize_t ret; ssize_t ret = 0;
ssize_t size; ssize_t size;
if (!count) if (!count)
@ -6417,10 +6417,15 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
if (!info->spare) { if (!info->spare) {
info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
iter->cpu_file); iter->cpu_file);
info->spare_cpu = iter->cpu_file; if (IS_ERR(info->spare)) {
ret = PTR_ERR(info->spare);
info->spare = NULL;
} else {
info->spare_cpu = iter->cpu_file;
}
} }
if (!info->spare) if (!info->spare)
return -ENOMEM; return ret;
/* Do we have previous read data to read? */ /* Do we have previous read data to read? */
if (info->read < PAGE_SIZE) if (info->read < PAGE_SIZE)
@ -6595,8 +6600,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
ref->ref = 1; ref->ref = 1;
ref->buffer = iter->trace_buffer->buffer; ref->buffer = iter->trace_buffer->buffer;
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
if (!ref->page) { if (IS_ERR(ref->page)) {
ret = -ENOMEM; ret = PTR_ERR(ref->page);
ref->page = NULL;
kfree(ref); kfree(ref);
break; break;
} }
@ -8110,6 +8116,7 @@ __init static int tracer_alloc_buffers(void)
if (ret < 0) if (ret < 0)
goto out_free_cpumask; goto out_free_cpumask;
/* Used for event triggers */ /* Used for event triggers */
ret = -ENOMEM;
temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
if (!temp_buffer) if (!temp_buffer)
goto out_rm_hp_state; goto out_rm_hp_state;
@ -8224,4 +8231,4 @@ __init static int clear_boot_tracer(void)
} }
fs_initcall(tracer_init_tracefs); fs_initcall(tracer_init_tracefs);
late_initcall(clear_boot_tracer); late_initcall_sync(clear_boot_tracer);

View file

@ -1959,6 +1959,10 @@ static int create_filter(struct trace_event_call *call,
if (err && set_str) if (err && set_str)
append_filter_err(ps, filter); append_filter_err(ps, filter);
} }
if (err && !set_str) {
free_event_filter(filter);
filter = NULL;
}
create_filter_finish(ps); create_filter_finish(ps);
*filterp = filter; *filterp = filter;

View file

@ -221,16 +221,19 @@ void tracing_map_array_free(struct tracing_map_array *a)
if (!a) if (!a)
return; return;
if (!a->pages) { if (!a->pages)
kfree(a); goto free;
return;
}
for (i = 0; i < a->n_pages; i++) { for (i = 0; i < a->n_pages; i++) {
if (!a->pages[i]) if (!a->pages[i])
break; break;
free_page((unsigned long)a->pages[i]); free_page((unsigned long)a->pages[i]);
} }
kfree(a->pages);
free:
kfree(a);
} }
struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts, struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,

View file

@ -368,8 +368,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
pte_offset_map_lock(mm, pmd, addr, &ptl); pte_offset_map_lock(mm, pmd, addr, &ptl);
goto out; goto out;
} }
put_page(page);
unlock_page(page); unlock_page(page);
put_page(page);
pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte--; pte--;
addr -= PAGE_SIZE; addr -= PAGE_SIZE;

View file

@ -302,7 +302,7 @@ void __init memblock_discard(void)
__memblock_free_late(addr, size); __memblock_free_late(addr, size);
} }
if (memblock.memory.regions == memblock_memory_init_regions) { if (memblock.memory.regions != memblock_memory_init_regions) {
addr = __pa(memblock.memory.regions); addr = __pa(memblock.memory.regions);
size = PAGE_ALIGN(sizeof(struct memblock_region) * size = PAGE_ALIGN(sizeof(struct memblock_region) *
memblock.memory.max); memblock.memory.max);

View file

@ -66,6 +66,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/nmi.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
@ -2495,9 +2496,14 @@ void drain_all_pages(struct zone *zone)
#ifdef CONFIG_HIBERNATION #ifdef CONFIG_HIBERNATION
/*
* Touch the watchdog for every WD_PAGE_COUNT pages.
*/
#define WD_PAGE_COUNT (128*1024)
void mark_free_pages(struct zone *zone) void mark_free_pages(struct zone *zone)
{ {
unsigned long pfn, max_zone_pfn; unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
unsigned long flags; unsigned long flags;
unsigned int order, t; unsigned int order, t;
struct page *page; struct page *page;
@ -2512,6 +2518,11 @@ void mark_free_pages(struct zone *zone)
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (!--page_count) {
touch_nmi_watchdog();
page_count = WD_PAGE_COUNT;
}
if (page_zone(page) != zone) if (page_zone(page) != zone)
continue; continue;
@ -2525,8 +2536,13 @@ void mark_free_pages(struct zone *zone)
unsigned long i; unsigned long i;
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
for (i = 0; i < (1UL << order); i++) for (i = 0; i < (1UL << order); i++) {
if (!--page_count) {
touch_nmi_watchdog();
page_count = WD_PAGE_COUNT;
}
swsusp_set_page_free(pfn_to_page(pfn + i)); swsusp_set_page_free(pfn_to_page(pfn + i));
}
} }
} }
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);

View file

@ -3964,7 +3964,7 @@ int __init shmem_init(void)
} }
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY) if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
else else
shmem_huge = 0; /* just in case it was patched */ shmem_huge = 0; /* just in case it was patched */
@ -4025,7 +4025,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
return -EINVAL; return -EINVAL;
shmem_huge = huge; shmem_huge = huge;
if (shmem_huge < SHMEM_HUGE_DENY) if (shmem_huge > SHMEM_HUGE_DENY)
SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
return count; return count;
} }

View file

@ -484,16 +484,16 @@ static int bnep_session(void *arg)
struct net_device *dev = s->dev; struct net_device *dev = s->dev;
struct sock *sk = s->sock->sk; struct sock *sk = s->sock->sk;
struct sk_buff *skb; struct sk_buff *skb;
wait_queue_t wait; DEFINE_WAIT_FUNC(wait, woken_wake_function);
BT_DBG(""); BT_DBG("");
set_user_nice(current, -15); set_user_nice(current, -15);
init_waitqueue_entry(&wait, current);
add_wait_queue(sk_sleep(sk), &wait); add_wait_queue(sk_sleep(sk), &wait);
while (1) { while (1) {
set_current_state(TASK_INTERRUPTIBLE); /* Ensure session->terminate is updated */
smp_mb__before_atomic();
if (atomic_read(&s->terminate)) if (atomic_read(&s->terminate))
break; break;
@ -515,9 +515,8 @@ static int bnep_session(void *arg)
break; break;
netif_wake_queue(dev); netif_wake_queue(dev);
schedule(); wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
} }
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait); remove_wait_queue(sk_sleep(sk), &wait);
/* Cleanup session */ /* Cleanup session */
@ -666,7 +665,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
s = __bnep_get_session(req->dst); s = __bnep_get_session(req->dst);
if (s) { if (s) {
atomic_inc(&s->terminate); atomic_inc(&s->terminate);
wake_up_process(s->task); wake_up_interruptible(sk_sleep(s->sock->sk));
} else } else
err = -ENOENT; err = -ENOENT;

View file

@ -280,16 +280,16 @@ static int cmtp_session(void *arg)
struct cmtp_session *session = arg; struct cmtp_session *session = arg;
struct sock *sk = session->sock->sk; struct sock *sk = session->sock->sk;
struct sk_buff *skb; struct sk_buff *skb;
wait_queue_t wait; DEFINE_WAIT_FUNC(wait, woken_wake_function);
BT_DBG("session %p", session); BT_DBG("session %p", session);
set_user_nice(current, -15); set_user_nice(current, -15);
init_waitqueue_entry(&wait, current);
add_wait_queue(sk_sleep(sk), &wait); add_wait_queue(sk_sleep(sk), &wait);
while (1) { while (1) {
set_current_state(TASK_INTERRUPTIBLE); /* Ensure session->terminate is updated */
smp_mb__before_atomic();
if (atomic_read(&session->terminate)) if (atomic_read(&session->terminate))
break; break;
@ -306,9 +306,8 @@ static int cmtp_session(void *arg)
cmtp_process_transmit(session); cmtp_process_transmit(session);
schedule(); wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
} }
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait); remove_wait_queue(sk_sleep(sk), &wait);
down_write(&cmtp_session_sem); down_write(&cmtp_session_sem);
@ -393,7 +392,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
err = cmtp_attach_device(session); err = cmtp_attach_device(session);
if (err < 0) { if (err < 0) {
atomic_inc(&session->terminate); atomic_inc(&session->terminate);
wake_up_process(session->task); wake_up_interruptible(sk_sleep(session->sock->sk));
up_write(&cmtp_session_sem); up_write(&cmtp_session_sem);
return err; return err;
} }
@ -431,7 +430,11 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
/* Stop session thread */ /* Stop session thread */
atomic_inc(&session->terminate); atomic_inc(&session->terminate);
wake_up_process(session->task);
/* Ensure session->terminate is updated */
smp_mb__after_atomic();
wake_up_interruptible(sk_sleep(session->sock->sk));
} else } else
err = -ENOENT; err = -ENOENT;

View file

@ -36,6 +36,7 @@
#define VERSION "1.2" #define VERSION "1.2"
static DECLARE_RWSEM(hidp_session_sem); static DECLARE_RWSEM(hidp_session_sem);
static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq);
static LIST_HEAD(hidp_session_list); static LIST_HEAD(hidp_session_list);
static unsigned char hidp_keycode[256] = { static unsigned char hidp_keycode[256] = {
@ -1068,12 +1069,12 @@ static int hidp_session_start_sync(struct hidp_session *session)
* Wake up session thread and notify it to stop. This is asynchronous and * Wake up session thread and notify it to stop. This is asynchronous and
* returns immediately. Call this whenever a runtime error occurs and you want * returns immediately. Call this whenever a runtime error occurs and you want
* the session to stop. * the session to stop.
* Note: wake_up_process() performs any necessary memory-barriers for us. * Note: wake_up_interruptible() performs any necessary memory-barriers for us.
*/ */
static void hidp_session_terminate(struct hidp_session *session) static void hidp_session_terminate(struct hidp_session *session)
{ {
atomic_inc(&session->terminate); atomic_inc(&session->terminate);
wake_up_process(session->task); wake_up_interruptible(&hidp_session_wq);
} }
/* /*
@ -1180,7 +1181,9 @@ static void hidp_session_run(struct hidp_session *session)
struct sock *ctrl_sk = session->ctrl_sock->sk; struct sock *ctrl_sk = session->ctrl_sock->sk;
struct sock *intr_sk = session->intr_sock->sk; struct sock *intr_sk = session->intr_sock->sk;
struct sk_buff *skb; struct sk_buff *skb;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
add_wait_queue(&hidp_session_wq, &wait);
for (;;) { for (;;) {
/* /*
* This thread can be woken up two ways: * This thread can be woken up two ways:
@ -1188,12 +1191,10 @@ static void hidp_session_run(struct hidp_session *session)
* session->terminate flag and wakes this thread up. * session->terminate flag and wakes this thread up.
* - Via modifying the socket state of ctrl/intr_sock. This * - Via modifying the socket state of ctrl/intr_sock. This
* thread is woken up by ->sk_state_changed(). * thread is woken up by ->sk_state_changed().
*
* Note: set_current_state() performs any necessary
* memory-barriers for us.
*/ */
set_current_state(TASK_INTERRUPTIBLE);
/* Ensure session->terminate is updated */
smp_mb__before_atomic();
if (atomic_read(&session->terminate)) if (atomic_read(&session->terminate))
break; break;
@ -1227,11 +1228,22 @@ static void hidp_session_run(struct hidp_session *session)
hidp_process_transmit(session, &session->ctrl_transmit, hidp_process_transmit(session, &session->ctrl_transmit,
session->ctrl_sock); session->ctrl_sock);
schedule(); wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
} }
remove_wait_queue(&hidp_session_wq, &wait);
atomic_inc(&session->terminate); atomic_inc(&session->terminate);
set_current_state(TASK_RUNNING);
/* Ensure session->terminate is updated */
smp_mb__after_atomic();
}
static int hidp_session_wake_function(wait_queue_t *wait,
unsigned int mode,
int sync, void *key)
{
wake_up_interruptible(&hidp_session_wq);
return false;
} }
/* /*
@ -1244,7 +1256,8 @@ static void hidp_session_run(struct hidp_session *session)
static int hidp_session_thread(void *arg) static int hidp_session_thread(void *arg)
{ {
struct hidp_session *session = arg; struct hidp_session *session = arg;
wait_queue_t ctrl_wait, intr_wait; DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function);
DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function);
BT_DBG("session %p", session); BT_DBG("session %p", session);
@ -1254,8 +1267,6 @@ static int hidp_session_thread(void *arg)
set_user_nice(current, -15); set_user_nice(current, -15);
hidp_set_timer(session); hidp_set_timer(session);
init_waitqueue_entry(&ctrl_wait, current);
init_waitqueue_entry(&intr_wait, current);
add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait); add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
/* This memory barrier is paired with wq_has_sleeper(). See /* This memory barrier is paired with wq_has_sleeper(). See

View file

@ -24,6 +24,7 @@
#include <net/checksum.h> #include <net/checksum.h>
#include <net/inet_sock.h> #include <net/inet_sock.h>
#include <net/inet_common.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/xfrm.h> #include <net/xfrm.h>
@ -170,6 +171,15 @@ const char *dccp_packet_name(const int type)
EXPORT_SYMBOL_GPL(dccp_packet_name); EXPORT_SYMBOL_GPL(dccp_packet_name);
static void dccp_sk_destruct(struct sock *sk)
{
struct dccp_sock *dp = dccp_sk(sk);
ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
dp->dccps_hc_tx_ccid = NULL;
inet_sock_destruct(sk);
}
int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
{ {
struct dccp_sock *dp = dccp_sk(sk); struct dccp_sock *dp = dccp_sk(sk);
@ -179,6 +189,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
icsk->icsk_syn_retries = sysctl_dccp_request_retries; icsk->icsk_syn_retries = sysctl_dccp_request_retries;
sk->sk_state = DCCP_CLOSED; sk->sk_state = DCCP_CLOSED;
sk->sk_write_space = dccp_write_space; sk->sk_write_space = dccp_write_space;
sk->sk_destruct = dccp_sk_destruct;
icsk->icsk_sync_mss = dccp_sync_mss; icsk->icsk_sync_mss = dccp_sync_mss;
dp->dccps_mss_cache = 536; dp->dccps_mss_cache = 536;
dp->dccps_rate_last = jiffies; dp->dccps_rate_last = jiffies;
@ -201,10 +212,7 @@ void dccp_destroy_sock(struct sock *sk)
{ {
struct dccp_sock *dp = dccp_sk(sk); struct dccp_sock *dp = dccp_sk(sk);
/* __skb_queue_purge(&sk->sk_write_queue);
* DCCP doesn't use sk_write_queue, just sk_send_head
* for retransmissions
*/
if (sk->sk_send_head != NULL) { if (sk->sk_send_head != NULL) {
kfree_skb(sk->sk_send_head); kfree_skb(sk->sk_send_head);
sk->sk_send_head = NULL; sk->sk_send_head = NULL;
@ -222,8 +230,7 @@ void dccp_destroy_sock(struct sock *sk)
dp->dccps_hc_rx_ackvec = NULL; dp->dccps_hc_rx_ackvec = NULL;
} }
ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); dp->dccps_hc_rx_ccid = NULL;
dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
/* clean up feature negotiation state */ /* clean up feature negotiation state */
dccp_feat_list_purge(&dp->dccps_featneg); dccp_feat_list_purge(&dp->dccps_featneg);

View file

@ -1033,15 +1033,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
if (!fi) if (!fi)
goto failure; goto failure;
fib_info_cnt++;
if (cfg->fc_mx) { if (cfg->fc_mx) {
fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL); fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
if (!fi->fib_metrics) if (unlikely(!fi->fib_metrics)) {
goto failure; kfree(fi);
return ERR_PTR(err);
}
atomic_set(&fi->fib_metrics->refcnt, 1); atomic_set(&fi->fib_metrics->refcnt, 1);
} else } else {
fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics; fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
}
fib_info_cnt++;
fi->fib_net = net; fi->fib_net = net;
fi->fib_protocol = cfg->fc_protocol; fi->fib_protocol = cfg->fc_protocol;
fi->fib_scope = cfg->fc_scope; fi->fib_scope = cfg->fc_scope;

View file

@ -1007,10 +1007,18 @@ int igmp_rcv(struct sk_buff *skb)
{ {
/* This basically follows the spec line by line -- see RFC1112 */ /* This basically follows the spec line by line -- see RFC1112 */
struct igmphdr *ih; struct igmphdr *ih;
struct in_device *in_dev = __in_dev_get_rcu(skb->dev); struct net_device *dev = skb->dev;
struct in_device *in_dev;
int len = skb->len; int len = skb->len;
bool dropped = true; bool dropped = true;
if (netif_is_l3_master(dev)) {
dev = dev_get_by_index_rcu(dev_net(dev), IPCB(skb)->iif);
if (!dev)
goto drop;
}
in_dev = __in_dev_get_rcu(dev);
if (!in_dev) if (!in_dev)
goto drop; goto drop;

View file

@ -1268,7 +1268,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
if (mtu) if (mtu)
return mtu; return mtu;
mtu = dst->dev->mtu; mtu = READ_ONCE(dst->dev->mtu);
if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
if (rt->rt_uses_gateway && mtu > 576) if (rt->rt_uses_gateway && mtu > 576)

View file

@ -3007,8 +3007,7 @@ void tcp_rearm_rto(struct sock *sk)
/* delta may not be positive if the socket is locked /* delta may not be positive if the socket is locked
* when the retrans timer fires and is rescheduled. * when the retrans timer fires and is rescheduled.
*/ */
if (delta > 0) rto = max(delta, 1);
rto = delta;
} }
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
TCP_RTO_MAX); TCP_RTO_MAX);

View file

@ -912,6 +912,8 @@ add:
} }
nsiblings = iter->rt6i_nsiblings; nsiblings = iter->rt6i_nsiblings;
fib6_purge_rt(iter, fn, info->nl_net); fib6_purge_rt(iter, fn, info->nl_net);
if (fn->rr_ptr == iter)
fn->rr_ptr = NULL;
rt6_release(iter); rt6_release(iter);
if (nsiblings) { if (nsiblings) {
@ -924,6 +926,8 @@ add:
if (rt6_qualify_for_ecmp(iter)) { if (rt6_qualify_for_ecmp(iter)) {
*ins = iter->dst.rt6_next; *ins = iter->dst.rt6_next;
fib6_purge_rt(iter, fn, info->nl_net); fib6_purge_rt(iter, fn, info->nl_net);
if (fn->rr_ptr == iter)
fn->rr_ptr = NULL;
rt6_release(iter); rt6_release(iter);
nsiblings--; nsiblings--;
} else { } else {
@ -1012,7 +1016,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
/* Create subtree root node */ /* Create subtree root node */
sfn = node_alloc(); sfn = node_alloc();
if (!sfn) if (!sfn)
goto st_failure; goto failure;
sfn->leaf = info->nl_net->ipv6.ip6_null_entry; sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref); atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
@ -1028,12 +1032,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
if (IS_ERR(sn)) { if (IS_ERR(sn)) {
/* If it is failed, discard just allocated /* If it is failed, discard just allocated
root, and then (in st_failure) stale node root, and then (in failure) stale node
in main tree. in main tree.
*/ */
node_free(sfn); node_free(sfn);
err = PTR_ERR(sn); err = PTR_ERR(sn);
goto st_failure; goto failure;
} }
/* Now link new subtree to main tree */ /* Now link new subtree to main tree */
@ -1047,7 +1051,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
if (IS_ERR(sn)) { if (IS_ERR(sn)) {
err = PTR_ERR(sn); err = PTR_ERR(sn);
goto st_failure; goto failure;
} }
} }
@ -1089,22 +1093,22 @@ out:
atomic_inc(&pn->leaf->rt6i_ref); atomic_inc(&pn->leaf->rt6i_ref);
} }
#endif #endif
if (!(rt->dst.flags & DST_NOCACHE)) goto failure;
dst_free(&rt->dst);
} }
return err; return err;
#ifdef CONFIG_IPV6_SUBTREES failure:
/* Subtree creation failed, probably main tree node /* fn->leaf could be NULL if fn is an intermediate node and we
is orphan. If it is, shoot it. * failed to add the new route to it in both subtree creation
* failure and fib6_add_rt2node() failure case.
* In both cases, fib6_repair_tree() should be called to fix
* fn->leaf.
*/ */
st_failure:
if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
fib6_repair_tree(info->nl_net, fn); fib6_repair_tree(info->nl_net, fn);
if (!(rt->dst.flags & DST_NOCACHE)) if (!(rt->dst.flags & DST_NOCACHE))
dst_free(&rt->dst); dst_free(&rt->dst);
return err; return err;
#endif
} }
/* /*

View file

@ -2225,7 +2225,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk); struct irda_sock *self = irda_sk(sk);
struct irda_device_list list; struct irda_device_list list = { 0 };
struct irda_device_info *discoveries; struct irda_device_info *discoveries;
struct irda_ias_set * ias_opt; /* IAS get/query params */ struct irda_ias_set * ias_opt; /* IAS get/query params */
struct ias_object * ias_obj; /* Object in IAS */ struct ias_object * ias_obj; /* Object in IAS */

View file

@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
#define BROADCAST_ONE 1 #define BROADCAST_ONE 1
#define BROADCAST_REGISTERED 2 #define BROADCAST_REGISTERED 2
#define BROADCAST_PROMISC_ONLY 4 #define BROADCAST_PROMISC_ONLY 4
static int pfkey_broadcast(struct sk_buff *skb, static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
int broadcast_flags, struct sock *one_sk, int broadcast_flags, struct sock *one_sk,
struct net *net) struct net *net)
{ {
@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb,
rcu_read_unlock(); rcu_read_unlock();
if (one_sk != NULL) if (one_sk != NULL)
err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk); err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
kfree_skb(skb2); kfree_skb(skb2);
kfree_skb(skb); kfree_skb(skb);
@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
hdr = (struct sadb_msg *) pfk->dump.skb->data; hdr = (struct sadb_msg *) pfk->dump.skb->data;
hdr->sadb_msg_seq = 0; hdr->sadb_msg_seq = 0;
hdr->sadb_msg_errno = rc; hdr->sadb_msg_errno = rc;
pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk)); &pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = NULL; pfk->dump.skb = NULL;
} }
@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
sizeof(uint64_t)); sizeof(uint64_t));
pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
return 0; return 0;
} }
@ -1396,7 +1396,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
xfrm_state_put(x); xfrm_state_put(x);
pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net); pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
return 0; return 0;
} }
@ -1483,7 +1483,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
hdr->sadb_msg_seq = c->seq; hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->portid; hdr->sadb_msg_pid = c->portid;
pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x)); pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
return 0; return 0;
} }
@ -1596,7 +1596,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
out_hdr->sadb_msg_reserved = 0; out_hdr->sadb_msg_reserved = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk)); pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
return 0; return 0;
} }
@ -1701,8 +1701,8 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
return -ENOBUFS; return -ENOBUFS;
} }
pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk)); pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
sock_net(sk));
return 0; return 0;
} }
@ -1720,7 +1720,8 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
sock_net(sk));
} }
static int key_notify_sa_flush(const struct km_event *c) static int key_notify_sa_flush(const struct km_event *c)
@ -1741,7 +1742,7 @@ static int key_notify_sa_flush(const struct km_event *c)
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0; hdr->sadb_msg_reserved = 0;
pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net); pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0; return 0;
} }
@ -1798,7 +1799,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr)
out_hdr->sadb_msg_pid = pfk->dump.msg_portid; out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
if (pfk->dump.skb) if (pfk->dump.skb)
pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk)); &pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb; pfk->dump.skb = out_skb;
@ -1886,7 +1887,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb
new_hdr->sadb_msg_errno = 0; new_hdr->sadb_msg_errno = 0;
} }
pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk)); pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
return 0; return 0;
} }
@ -2219,7 +2220,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
out_hdr->sadb_msg_errno = 0; out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = c->seq; out_hdr->sadb_msg_seq = c->seq;
out_hdr->sadb_msg_pid = c->portid; out_hdr->sadb_msg_pid = c->portid;
pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp)); pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
return 0; return 0;
} }
@ -2439,7 +2440,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
out_hdr->sadb_msg_errno = 0; out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp)); pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
err = 0; err = 0;
out: out:
@ -2695,7 +2696,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
out_hdr->sadb_msg_pid = pfk->dump.msg_portid; out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
if (pfk->dump.skb) if (pfk->dump.skb)
pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk)); &pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb; pfk->dump.skb = out_skb;
@ -2752,7 +2753,7 @@ static int key_notify_policy_flush(const struct km_event *c)
hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0; hdr->sadb_msg_reserved = 0;
pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net); pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0; return 0;
} }
@ -2816,7 +2817,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
void *ext_hdrs[SADB_EXT_MAX]; void *ext_hdrs[SADB_EXT_MAX];
int err; int err;
pfkey_broadcast(skb_clone(skb, GFP_KERNEL), pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
BROADCAST_PROMISC_ONLY, NULL, sock_net(sk)); BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
memset(ext_hdrs, 0, sizeof(ext_hdrs)); memset(ext_hdrs, 0, sizeof(ext_hdrs));
@ -3038,7 +3039,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
out_hdr->sadb_msg_seq = 0; out_hdr->sadb_msg_seq = 0;
out_hdr->sadb_msg_pid = 0; out_hdr->sadb_msg_pid = 0;
pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x)); pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
xs_net(x));
return 0; return 0;
} }
@ -3228,7 +3230,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
xfrm_ctx->ctx_len); xfrm_ctx->ctx_len);
} }
return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
xs_net(x));
} }
static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
@ -3426,7 +3429,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
n_port->sadb_x_nat_t_port_port = sport; n_port->sadb_x_nat_t_port_port = sport;
n_port->sadb_x_nat_t_port_reserved = 0; n_port->sadb_x_nat_t_port_reserved = 0;
return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
xs_net(x));
} }
#ifdef CONFIG_NET_KEY_MIGRATE #ifdef CONFIG_NET_KEY_MIGRATE
@ -3618,7 +3622,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
} }
/* broadcast migrate message to sockets */ /* broadcast migrate message to sockets */
pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net); pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
return 0; return 0;

View file

@ -422,7 +422,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
h = nf_ct_expect_dst_hash(net, &expect->tuple); h = nf_ct_expect_dst_hash(net, &expect->tuple);
hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
if (expect_matches(i, expect)) { if (expect_matches(i, expect)) {
if (nf_ct_remove_expect(expect)) if (nf_ct_remove_expect(i))
break; break;
} else if (expect_clash(i, expect)) { } else if (expect_clash(i, expect)) {
ret = -EBUSY; ret = -EBUSY;

View file

@ -222,20 +222,21 @@ find_appropriate_src(struct net *net,
.tuple = tuple, .tuple = tuple,
.zone = zone .zone = zone
}; };
struct rhlist_head *hl; struct rhlist_head *hl, *h;
hl = rhltable_lookup(&nf_nat_bysource_table, &key, hl = rhltable_lookup(&nf_nat_bysource_table, &key,
nf_nat_bysource_params); nf_nat_bysource_params);
if (!hl)
return 0;
ct = container_of(hl, typeof(*ct), nat_bysource); rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
nf_ct_invert_tuplepr(result,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
result->dst = tuple->dst;
nf_ct_invert_tuplepr(result, if (in_range(l3proto, l4proto, result, range))
&ct->tuplehash[IP_CT_DIR_REPLY].tuple); return 1;
result->dst = tuple->dst; }
return in_range(l3proto, l4proto, result, range); return 0;
} }
/* For [FUTURE] fragmentation handling, we want the least-used /* For [FUTURE] fragmentation handling, we want the least-used

View file

@ -463,8 +463,7 @@ static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh)
if (msglen > skb->len) if (msglen > skb->len)
msglen = skb->len; msglen = skb->len;
if (nlh->nlmsg_len < NLMSG_HDRLEN || if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
return; return;
err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy, err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy,
@ -491,7 +490,8 @@ static void nfnetlink_rcv(struct sk_buff *skb)
{ {
struct nlmsghdr *nlh = nlmsg_hdr(skb); struct nlmsghdr *nlh = nlmsg_hdr(skb);
if (nlh->nlmsg_len < NLMSG_HDRLEN || if (skb->len < NLMSG_HDRLEN ||
nlh->nlmsg_len < NLMSG_HDRLEN ||
skb->len < nlh->nlmsg_len) skb->len < nlh->nlmsg_len)
return; return;

View file

@ -1337,6 +1337,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
goto out; goto out;
} }
OVS_CB(skb)->acts_origlen = acts->orig_len;
err = do_execute_actions(dp, skb, key, err = do_execute_actions(dp, skb, key,
acts->actions, acts->actions_len); acts->actions, acts->actions_len);

View file

@ -381,7 +381,7 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
} }
static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info, static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
unsigned int hdrlen) unsigned int hdrlen, int actions_attrlen)
{ {
size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
+ nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */ + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
@ -398,7 +398,7 @@ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
/* OVS_PACKET_ATTR_ACTIONS */ /* OVS_PACKET_ATTR_ACTIONS */
if (upcall_info->actions_len) if (upcall_info->actions_len)
size += nla_total_size(upcall_info->actions_len); size += nla_total_size(actions_attrlen);
/* OVS_PACKET_ATTR_MRU */ /* OVS_PACKET_ATTR_MRU */
if (upcall_info->mru) if (upcall_info->mru)
@ -465,7 +465,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
else else
hlen = skb->len; hlen = skb->len;
len = upcall_msg_size(upcall_info, hlen - cutlen); len = upcall_msg_size(upcall_info, hlen - cutlen,
OVS_CB(skb)->acts_origlen);
user_skb = genlmsg_new(len, GFP_ATOMIC); user_skb = genlmsg_new(len, GFP_ATOMIC);
if (!user_skb) { if (!user_skb) {
err = -ENOMEM; err = -ENOMEM;

View file

@ -98,12 +98,14 @@ struct datapath {
* @input_vport: The original vport packet came in on. This value is cached * @input_vport: The original vport packet came in on. This value is cached
* when a packet is received by OVS. * when a packet is received by OVS.
* @mru: The maximum received fragement size; 0 if the packet is not * @mru: The maximum received fragement size; 0 if the packet is not
* @acts_origlen: The netlink size of the flow actions applied to this skb.
* @cutlen: The number of bytes from the packet end to be removed. * @cutlen: The number of bytes from the packet end to be removed.
* fragmented. * fragmented.
*/ */
struct ovs_skb_cb { struct ovs_skb_cb {
struct vport *input_vport; struct vport *input_vport;
u16 mru; u16 mru;
u16 acts_origlen;
u32 cutlen; u32 cutlen;
}; };
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb) #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)

View file

@ -41,6 +41,7 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
{ {
struct xt_tgchk_param par; struct xt_tgchk_param par;
struct xt_target *target; struct xt_target *target;
struct ipt_entry e = {};
int ret = 0; int ret = 0;
target = xt_request_find_target(AF_INET, t->u.user.name, target = xt_request_find_target(AF_INET, t->u.user.name,
@ -52,6 +53,7 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
memset(&par, 0, sizeof(par)); memset(&par, 0, sizeof(par));
par.net = net; par.net = net;
par.table = table; par.table = table;
par.entryinfo = &e;
par.target = target; par.target = target;
par.targinfo = t->data; par.targinfo = t->data;
par.hook_mask = hook; par.hook_mask = hook;

View file

@ -286,9 +286,6 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
void qdisc_hash_add(struct Qdisc *q, bool invisible) void qdisc_hash_add(struct Qdisc *q, bool invisible)
{ {
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
struct Qdisc *root = qdisc_dev(q)->qdisc;
WARN_ON_ONCE(root == &noop_qdisc);
ASSERT_RTNL(); ASSERT_RTNL();
hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle); hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
if (invisible) if (invisible)

Some files were not shown because too many files have changed in this diff Show more