updating to mainline 4.14.0

This commit is contained in:
Jake Day 2017-11-12 19:29:17 -05:00
parent b278e5dc81
commit fd6f186bf0
118 changed files with 1278 additions and 331 deletions

View file

@ -102,6 +102,7 @@ Leonid I Ananiev <leonid.i.ananiev@intel.com>
Linas Vepstas <linas@austin.ibm.com> Linas Vepstas <linas@austin.ibm.com>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de> Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch> Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com> Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Mark Brown <broonie@sirena.org.uk> Mark Brown <broonie@sirena.org.uk>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com> Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>

View file

@ -2113,6 +2113,10 @@ S: J. Obrechtstr 23
S: NL-5216 GP 's-Hertogenbosch S: NL-5216 GP 's-Hertogenbosch
S: The Netherlands S: The Netherlands
N: Ashley Lai
E: ashleydlai@gmail.com
D: IBM VTPM driver
N: Savio Lam N: Savio Lam
E: lam836@cs.cuhk.hk E: lam836@cs.cuhk.hk
D: Author of the dialog utility, foundation D: Author of the dialog utility, foundation
@ -3333,6 +3337,10 @@ S: Braunschweiger Strasse 79
S: 31134 Hildesheim S: 31134 Hildesheim
S: Germany S: Germany
N: Marcel Selhorst
E: tpmdd@selhorst.net
D: TPM driver
N: Darren Senn N: Darren Senn
E: sinster@darkwater.com E: sinster@darkwater.com
D: Whatever I notice needs doing (so far: itimers, /proc) D: Whatever I notice needs doing (so far: itimers, /proc)
@ -4128,7 +4136,6 @@ D: MD driver
D: EISA/sysfs subsystem D: EISA/sysfs subsystem
S: France S: France
# Don't add your name here, unless you really _are_ after Marc # Don't add your name here, unless you really _are_ after Marc
# alphabetically. Leonard used to be very proud of being the # alphabetically. Leonard used to be very proud of being the
# last entry, and he'll get positively pissed if he can't even # last entry, and he'll get positively pissed if he can't even

View file

@ -7745,6 +7745,11 @@ S: Maintained
F: Documentation/scsi/53c700.txt F: Documentation/scsi/53c700.txt
F: drivers/scsi/53c700* F: drivers/scsi/53c700*
LEAKING_ADDRESSES
M: Tobin C. Harding <me@tobin.cc>
S: Maintained
F: scripts/leaking_addresses.pl
LED SUBSYSTEM LED SUBSYSTEM
M: Richard Purdie <rpurdie@rpsys.net> M: Richard Purdie <rpurdie@rpsys.net>
M: Jacek Anaszewski <jacek.anaszewski@gmail.com> M: Jacek Anaszewski <jacek.anaszewski@gmail.com>
@ -10336,7 +10341,6 @@ F: drivers/pci/host/vmd.c
PCI DRIVER FOR MICROSEMI SWITCHTEC PCI DRIVER FOR MICROSEMI SWITCHTEC
M: Kurt Schwemmer <kurt.schwemmer@microsemi.com> M: Kurt Schwemmer <kurt.schwemmer@microsemi.com>
M: Stephen Bates <stephen.bates@microsemi.com>
M: Logan Gunthorpe <logang@deltatee.com> M: Logan Gunthorpe <logang@deltatee.com>
L: linux-pci@vger.kernel.org L: linux-pci@vger.kernel.org
S: Maintained S: Maintained
@ -10401,6 +10405,7 @@ F: drivers/pci/dwc/*keystone*
PCI ENDPOINT SUBSYSTEM PCI ENDPOINT SUBSYSTEM
M: Kishon Vijay Abraham I <kishon@ti.com> M: Kishon Vijay Abraham I <kishon@ti.com>
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
L: linux-pci@vger.kernel.org L: linux-pci@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kishon/pci-endpoint.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/kishon/pci-endpoint.git
S: Supported S: Supported
@ -10452,6 +10457,15 @@ F: include/linux/pci*
F: arch/x86/pci/ F: arch/x86/pci/
F: arch/x86/kernel/quirks.c F: arch/x86/kernel/quirks.c
PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
L: linux-pci@vger.kernel.org
Q: http://patchwork.ozlabs.org/project/linux-pci/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git/
S: Supported
F: drivers/pci/host/
F: drivers/pci/dwc/
PCIE DRIVER FOR AXIS ARTPEC PCIE DRIVER FOR AXIS ARTPEC
M: Niklas Cassel <niklas.cassel@axis.com> M: Niklas Cassel <niklas.cassel@axis.com>
M: Jesper Nilsson <jesper.nilsson@axis.com> M: Jesper Nilsson <jesper.nilsson@axis.com>
@ -10471,7 +10485,6 @@ F: drivers/pci/host/pci-thunder-*
PCIE DRIVER FOR HISILICON PCIE DRIVER FOR HISILICON
M: Zhou Wang <wangzhou1@hisilicon.com> M: Zhou Wang <wangzhou1@hisilicon.com>
M: Gabriele Paoloni <gabriele.paoloni@huawei.com>
L: linux-pci@vger.kernel.org L: linux-pci@vger.kernel.org
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
@ -13598,23 +13611,14 @@ F: drivers/platform/x86/toshiba-wmi.c
TPM DEVICE DRIVER TPM DEVICE DRIVER
M: Peter Huewe <peterhuewe@gmx.de> M: Peter Huewe <peterhuewe@gmx.de>
M: Marcel Selhorst <tpmdd@selhorst.net>
M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com> M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
R: Jason Gunthorpe <jgunthorpe@obsidianresearch.com> R: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
W: http://tpmdd.sourceforge.net L: linux-integrity@vger.kernel.org
L: tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers) Q: https://patchwork.kernel.org/project/linux-integrity/list/
Q: https://patchwork.kernel.org/project/tpmdd-devel/list/
T: git git://git.infradead.org/users/jjs/linux-tpmdd.git T: git git://git.infradead.org/users/jjs/linux-tpmdd.git
S: Maintained S: Maintained
F: drivers/char/tpm/ F: drivers/char/tpm/
TPM IBM_VTPM DEVICE DRIVER
M: Ashley Lai <ashleydlai@gmail.com>
W: http://tpmdd.sourceforge.net
L: tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers)
S: Maintained
F: drivers/char/tpm/tpm_ibmvtpm*
TRACING TRACING
M: Steven Rostedt <rostedt@goodmis.org> M: Steven Rostedt <rostedt@goodmis.org>
M: Ingo Molnar <mingo@redhat.com> M: Ingo Molnar <mingo@redhat.com>

View file

@ -2,7 +2,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 14 PATCHLEVEL = 14
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc8 EXTRAVERSION =
NAME = Fearless Coyote NAME = Fearless Coyote
# *DOCUMENTATION* # *DOCUMENTATION*

View file

@ -154,30 +154,26 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
set_fs(fs); set_fs(fs);
} }
static void dump_instr(const char *lvl, struct pt_regs *regs) static void __dump_instr(const char *lvl, struct pt_regs *regs)
{ {
unsigned long addr = instruction_pointer(regs); unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs); const int thumb = thumb_mode(regs);
const int width = thumb ? 4 : 8; const int width = thumb ? 4 : 8;
mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i; int i;
/* /*
* We need to switch to kernel mode so that we can use __get_user * Note that we now dump the code first, just in case the backtrace
* to safely read from kernel space. Note that we now dump the * kills us.
* code first, just in case the backtrace kills us.
*/ */
fs = get_fs();
set_fs(KERNEL_DS);
for (i = -4; i < 1 + !!thumb; i++) { for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad; unsigned int val, bad;
if (thumb) if (thumb)
bad = __get_user(val, &((u16 *)addr)[i]); bad = get_user(val, &((u16 *)addr)[i]);
else else
bad = __get_user(val, &((u32 *)addr)[i]); bad = get_user(val, &((u32 *)addr)[i]);
if (!bad) if (!bad)
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ", p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
@ -188,8 +184,20 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
} }
} }
printk("%sCode: %s\n", lvl, str); printk("%sCode: %s\n", lvl, str);
}
set_fs(fs); static void dump_instr(const char *lvl, struct pt_regs *regs)
{
mm_segment_t fs;
if (!user_mode(regs)) {
fs = get_fs();
set_fs(KERNEL_DS);
__dump_instr(lvl, regs);
set_fs(fs);
} else {
__dump_instr(lvl, regs);
}
} }
#ifdef CONFIG_ARM_UNWIND #ifdef CONFIG_ARM_UNWIND

View file

@ -575,6 +575,7 @@ static int __init ar7_register_uarts(void)
uart_port.type = PORT_AR7; uart_port.type = PORT_AR7;
uart_port.uartclk = clk_get_rate(bus_clk) / 2; uart_port.uartclk = clk_get_rate(bus_clk) / 2;
uart_port.iotype = UPIO_MEM32; uart_port.iotype = UPIO_MEM32;
uart_port.flags = UPF_FIXED_TYPE;
uart_port.regshift = 2; uart_port.regshift = 2;
uart_port.line = 0; uart_port.line = 0;
@ -653,6 +654,10 @@ static int __init ar7_register_devices(void)
u32 val; u32 val;
int res; int res;
res = ar7_gpio_init();
if (res)
pr_warn("unable to register gpios: %d\n", res);
res = ar7_register_uarts(); res = ar7_register_uarts();
if (res) if (res)
pr_err("unable to setup uart(s): %d\n", res); pr_err("unable to setup uart(s): %d\n", res);

View file

@ -246,8 +246,6 @@ void __init prom_init(void)
ar7_init_cmdline(fw_arg0, (char **)fw_arg1); ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
ar7_init_env((struct env_var *)fw_arg2); ar7_init_env((struct env_var *)fw_arg2);
console_config(); console_config();
ar7_gpio_init();
} }
#define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4))) #define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))

View file

@ -591,11 +591,11 @@ void __init bmips_cpu_setup(void)
/* Flush and enable RAC */ /* Flush and enable RAC */
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG); cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
__raw_writel(cfg | 0x100, BMIPS_RAC_CONFIG); __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG); __raw_readl(cbr + BMIPS_RAC_CONFIG);
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG); cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
__raw_writel(cfg | 0xf, BMIPS_RAC_CONFIG); __raw_writel(cfg | 0xf, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG); __raw_readl(cbr + BMIPS_RAC_CONFIG);
cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE); cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);

View file

@ -646,6 +646,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hnow_v = hpte_new_to_old_v(hnow_v, hnow_r); hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
hnow_r = hpte_new_to_old_r(hnow_r); hnow_r = hpte_new_to_old_r(hnow_r);
} }
/*
* If the HPT is being resized, don't update the HPTE,
* instead let the guest retry after the resize operation is complete.
* The synchronization for hpte_setup_done test vs. set is provided
* by the HPTE lock.
*/
if (!kvm->arch.hpte_setup_done)
goto out_unlock;
if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] || if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
rev->guest_rpte != hpte[2]) rev->guest_rpte != hpte[2])
/* HPTE has been changed under us; let the guest retry */ /* HPTE has been changed under us; let the guest retry */

View file

@ -2705,11 +2705,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* Hard-disable interrupts, and check resched flag and signals. * Hard-disable interrupts, and check resched flag and signals.
* If we need to reschedule or deliver a signal, clean up * If we need to reschedule or deliver a signal, clean up
* and return without going into the guest(s). * and return without going into the guest(s).
* If the hpte_setup_done flag has been cleared, don't go into the
* guest because that means a HPT resize operation is in progress.
*/ */
local_irq_disable(); local_irq_disable();
hard_irq_disable(); hard_irq_disable();
if (lazy_irq_pending() || need_resched() || if (lazy_irq_pending() || need_resched() ||
recheck_signals(&core_info)) { recheck_signals(&core_info) ||
(!kvm_is_radix(vc->kvm) && !vc->kvm->arch.hpte_setup_done)) {
local_irq_enable(); local_irq_enable();
vc->vcore_state = VCORE_INACTIVE; vc->vcore_state = VCORE_INACTIVE;
/* Unlock all except the primary vcore */ /* Unlock all except the primary vcore */
@ -3078,7 +3081,7 @@ out:
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{ {
int n_ceded, i; int n_ceded, i, r;
struct kvmppc_vcore *vc; struct kvmppc_vcore *vc;
struct kvm_vcpu *v; struct kvm_vcpu *v;
@ -3132,6 +3135,20 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
!signal_pending(current)) { !signal_pending(current)) {
/* See if the HPT and VRMA are ready to go */
if (!kvm_is_radix(vcpu->kvm) &&
!vcpu->kvm->arch.hpte_setup_done) {
spin_unlock(&vc->lock);
r = kvmppc_hv_setup_htab_rma(vcpu);
spin_lock(&vc->lock);
if (r) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason = 0;
vcpu->arch.ret = r;
break;
}
}
if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
kvmppc_vcore_end_preempt(vc); kvmppc_vcore_end_preempt(vc);
@ -3249,13 +3266,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */ /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */
smp_mb(); smp_mb();
/* On the first time here, set up HTAB and VRMA */
if (!kvm_is_radix(vcpu->kvm) && !vcpu->kvm->arch.hpte_setup_done) {
r = kvmppc_hv_setup_htab_rma(vcpu);
if (r)
goto out;
}
flush_all_to_thread(current); flush_all_to_thread(current);
/* Save userspace EBB and other register values */ /* Save userspace EBB and other register values */
@ -3303,7 +3313,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
mtspr(SPRN_VRSAVE, user_vrsave); mtspr(SPRN_VRSAVE, user_vrsave);
out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY; vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running); atomic_dec(&vcpu->kvm->arch.vcpus_running);
return r; return r;

View file

@ -157,8 +157,8 @@ LABEL skip_ %I
.endr .endr
# Find min length # Find min length
vmovdqa _lens+0*16(state), %xmm0 vmovdqu _lens+0*16(state), %xmm0
vmovdqa _lens+1*16(state), %xmm1 vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@ -178,8 +178,8 @@ LABEL skip_ %I
vpsubd %xmm2, %xmm0, %xmm0 vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1 vpsubd %xmm2, %xmm1, %xmm1
vmovdqa %xmm0, _lens+0*16(state) vmovdqu %xmm0, _lens+0*16(state)
vmovdqa %xmm1, _lens+1*16(state) vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1 # "state" and "args" are the same address, arg1
# len is arg2 # len is arg2
@ -235,8 +235,8 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2)
jc .return_null jc .return_null
# Find min length # Find min length
vmovdqa _lens(state), %xmm0 vmovdqu _lens(state), %xmm0
vmovdqa _lens+1*16(state), %xmm1 vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}

View file

@ -155,8 +155,8 @@ LABEL skip_ %I
.endr .endr
# Find min length # Find min length
vmovdqa _lens+0*16(state), %xmm0 vmovdqu _lens+0*16(state), %xmm0
vmovdqa _lens+1*16(state), %xmm1 vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@ -176,8 +176,8 @@ LABEL skip_ %I
vpsubd %xmm2, %xmm0, %xmm0 vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1 vpsubd %xmm2, %xmm1, %xmm1
vmovdqa %xmm0, _lens+0*16(state) vmovdqu %xmm0, _lens+0*16(state)
vmovdqa %xmm1, _lens+1*16(state) vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1 # "state" and "args" are the same address, arg1
# len is arg2 # len is arg2
@ -234,8 +234,8 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
jc .return_null jc .return_null
# Find min length # Find min length
vmovdqa _lens(state), %xmm0 vmovdqu _lens(state), %xmm0
vmovdqa _lens+1*16(state), %xmm1 vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A} vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C} vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}

View file

@ -253,7 +253,7 @@ extern int force_personality32;
* space open for things that want to use the area for 32-bit pointers. * space open for things that want to use the area for 32-bit pointers.
*/ */
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \ #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
(TASK_SIZE / 3 * 2)) (DEFAULT_MAP_WINDOW / 3 * 2))
/* This yields a mask that user programs can use to figure out what /* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space, instruction set this CPU supports. This could be done in user space,

View file

@ -22,7 +22,7 @@ obj-y += common.o
obj-y += rdrand.o obj-y += rdrand.o
obj-y += match.o obj-y += match.o
obj-y += bugs.o obj-y += bugs.o
obj-y += aperfmperf.o obj-$(CONFIG_CPU_FREQ) += aperfmperf.o
obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o

View file

@ -42,6 +42,10 @@ static void aperfmperf_snapshot_khz(void *dummy)
s64 time_delta = ktime_ms_delta(now, s->time); s64 time_delta = ktime_ms_delta(now, s->time);
unsigned long flags; unsigned long flags;
/* Don't bother re-computing within the cache threshold time. */
if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
return;
local_irq_save(flags); local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf); rdmsrl(MSR_IA32_MPERF, mperf);
@ -70,7 +74,6 @@ static void aperfmperf_snapshot_khz(void *dummy)
unsigned int arch_freq_get_on_cpu(int cpu) unsigned int arch_freq_get_on_cpu(int cpu)
{ {
s64 time_delta;
unsigned int khz; unsigned int khz;
if (!cpu_khz) if (!cpu_khz)
@ -79,12 +82,6 @@ unsigned int arch_freq_get_on_cpu(int cpu)
if (!static_cpu_has(X86_FEATURE_APERFMPERF)) if (!static_cpu_has(X86_FEATURE_APERFMPERF))
return 0; return 0;
/* Don't bother re-computing within the cache threshold time. */
time_delta = ktime_ms_delta(ktime_get(), per_cpu(samples.time, cpu));
khz = per_cpu(samples.khz, cpu);
if (khz && time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
return khz;
smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1); smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
khz = per_cpu(samples.khz, cpu); khz = per_cpu(samples.khz, cpu);
if (khz) if (khz)

View file

@ -78,10 +78,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "microcode\t: 0x%x\n", c->microcode); seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
if (cpu_has(c, X86_FEATURE_TSC)) { if (cpu_has(c, X86_FEATURE_TSC)) {
unsigned int freq = arch_freq_get_on_cpu(cpu); unsigned int freq = cpufreq_quick_get(cpu);
if (!freq)
freq = cpufreq_quick_get(cpu);
if (!freq) if (!freq)
freq = cpu_khz; freq = cpu_khz;
seq_printf(m, "cpu MHz\t\t: %u.%03u\n", seq_printf(m, "cpu MHz\t\t: %u.%03u\n",

View file

@ -92,8 +92,6 @@ static const __initdata struct idt_data def_idts[] = {
INTG(X86_TRAP_DF, double_fault), INTG(X86_TRAP_DF, double_fault),
#endif #endif
INTG(X86_TRAP_DB, debug), INTG(X86_TRAP_DB, debug),
INTG(X86_TRAP_NMI, nmi),
INTG(X86_TRAP_BP, int3),
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
INTG(X86_TRAP_MC, &machine_check), INTG(X86_TRAP_MC, &machine_check),

View file

@ -193,6 +193,12 @@ static void smp_callin(void)
*/ */
smp_store_cpu_info(cpuid); smp_store_cpu_info(cpuid);
/*
* The topology information must be up to date before
* calibrate_delay() and notify_cpu_starting().
*/
set_cpu_sibling_map(raw_smp_processor_id());
/* /*
* Get our bogomips. * Get our bogomips.
* Update loops_per_jiffy in cpu_data. Previous call to * Update loops_per_jiffy in cpu_data. Previous call to
@ -203,11 +209,6 @@ static void smp_callin(void)
cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy; cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
pr_debug("Stack at about %p\n", &cpuid); pr_debug("Stack at about %p\n", &cpuid);
/*
* This must be done before setting cpu_online_mask
* or calling notify_cpu_starting.
*/
set_cpu_sibling_map(raw_smp_processor_id());
wmb(); wmb();
notify_cpu_starting(cpuid); notify_cpu_starting(cpuid);

View file

@ -209,9 +209,6 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
if (fixup_exception(regs, trapnr)) if (fixup_exception(regs, trapnr))
return 0; return 0;
if (fixup_bug(regs, trapnr))
return 0;
tsk->thread.error_code = error_code; tsk->thread.error_code = error_code;
tsk->thread.trap_nr = trapnr; tsk->thread.trap_nr = trapnr;
die(str, regs, error_code); die(str, regs, error_code);
@ -292,6 +289,13 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
/*
* WARN*()s end up here; fix them up before we call the
* notifier chain.
*/
if (!user_mode(regs) && fixup_bug(regs, trapnr))
return;
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
NOTIFY_STOP) { NOTIFY_STOP) {
cond_local_irq_enable(regs); cond_local_irq_enable(regs);

View file

@ -1346,12 +1346,10 @@ void __init tsc_init(void)
unsigned long calibrate_delay_is_known(void) unsigned long calibrate_delay_is_known(void)
{ {
int sibling, cpu = smp_processor_id(); int sibling, cpu = smp_processor_id();
struct cpumask *mask = topology_core_cpumask(cpu); int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
const struct cpumask *mask = topology_core_cpumask(cpu);
if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC)) if (tsc_disabled || !constant_tsc || !mask)
return 0;
if (!mask)
return 0; return 0;
sibling = cpumask_any_but(mask, cpu); sibling = cpumask_any_but(mask, cpu);

View file

@ -279,7 +279,7 @@ static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
if (!stack_access_ok(state, addr, sizeof(long))) if (!stack_access_ok(state, addr, sizeof(long)))
return false; return false;
*val = READ_ONCE_TASK_STACK(state->task, *(unsigned long *)addr); *val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
return true; return true;
} }

View file

@ -40,7 +40,7 @@ static char sme_cmdline_off[] __initdata = "off";
* section is later cleared. * section is later cleared.
*/ */
u64 sme_me_mask __section(.data) = 0; u64 sme_me_mask __section(.data) = 0;
EXPORT_SYMBOL_GPL(sme_me_mask); EXPORT_SYMBOL(sme_me_mask);
/* Buffer used for early in-place encryption by BSP, no locking needed */ /* Buffer used for early in-place encryption by BSP, no locking needed */
static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE); static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);

View file

@ -212,8 +212,8 @@ static void arch_perfmon_setup_counters(void)
eax.full = cpuid_eax(0xa); eax.full = cpuid_eax(0xa);
/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 && if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 &&
__this_cpu_read(cpu_info.x86_model) == 15) { boot_cpu_data.x86_model == 15) {
eax.split.version_id = 2; eax.split.version_id = 2;
eax.split.num_counters = 2; eax.split.num_counters = 2;
eax.split.bit_width = 40; eax.split.bit_width = 40;

View file

@ -363,7 +363,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
unsigned int cryptlen = req->cryptlen; unsigned int cryptlen = req->cryptlen;
u8 *authtag = pctx->auth_tag; u8 *authtag = pctx->auth_tag;
u8 *odata = pctx->odata; u8 *odata = pctx->odata;
u8 *iv = req->iv; u8 *iv = pctx->idata;
int err; int err;
cryptlen -= authsize; cryptlen -= authsize;
@ -379,6 +379,8 @@ static int crypto_ccm_decrypt(struct aead_request *req)
if (req->src != req->dst) if (req->src != req->dst)
dst = pctx->dst; dst = pctx->dst;
memcpy(iv, req->iv, 16);
skcipher_request_set_tfm(skreq, ctx->ctr); skcipher_request_set_tfm(skreq, ctx->ctr);
skcipher_request_set_callback(skreq, pctx->flags, skcipher_request_set_callback(skreq, pctx->flags,
crypto_ccm_decrypt_done, req); crypto_ccm_decrypt_done, req);

View file

@ -160,6 +160,14 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
return 0; return 0;
} }
static bool acpi_sleep_no_lps0;
static int __init init_no_lps0(const struct dmi_system_id *d)
{
acpi_sleep_no_lps0 = true;
return 0;
}
static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
{ {
.callback = init_old_suspend_ordering, .callback = init_old_suspend_ordering,
@ -343,6 +351,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "80E3"), DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
}, },
}, },
/*
* https://bugzilla.kernel.org/show_bug.cgi?id=196907
* Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power
* S0 Idle firmware interface.
*/
{
.callback = init_no_lps0,
.ident = "Dell XPS13 9360",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
},
},
{}, {},
}; };
@ -485,6 +506,7 @@ static void acpi_pm_end(void)
} }
#else /* !CONFIG_ACPI_SLEEP */ #else /* !CONFIG_ACPI_SLEEP */
#define acpi_target_sleep_state ACPI_STATE_S0 #define acpi_target_sleep_state ACPI_STATE_S0
#define acpi_sleep_no_lps0 (false)
static inline void acpi_sleep_dmi_check(void) {} static inline void acpi_sleep_dmi_check(void) {}
#endif /* CONFIG_ACPI_SLEEP */ #endif /* CONFIG_ACPI_SLEEP */
@ -863,6 +885,12 @@ static int lps0_device_attach(struct acpi_device *adev,
if (lps0_device_handle) if (lps0_device_handle)
return 0; return 0;
if (acpi_sleep_no_lps0) {
acpi_handle_info(adev->handle,
"Low Power S0 Idle interface disabled\n");
return 0;
}
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
return 0; return 0;

View file

@ -2692,7 +2692,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
* from the parent. * from the parent.
*/ */
page_count = (u32)calc_pages_for(0, length); page_count = (u32)calc_pages_for(0, length);
pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) { if (IS_ERR(pages)) {
result = PTR_ERR(pages); result = PTR_ERR(pages);
pages = NULL; pages = NULL;
@ -2827,7 +2827,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
*/ */
size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32); size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
page_count = (u32)calc_pages_for(0, size); page_count = (u32)calc_pages_for(0, size);
pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) { if (IS_ERR(pages)) {
ret = PTR_ERR(pages); ret = PTR_ERR(pages);
goto fail_stat_request; goto fail_stat_request;

View file

@ -2094,6 +2094,11 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args,
goto err; goto err;
} }
if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
err = -EINVAL;
goto err;
}
syncobj = drm_syncobj_find(file, fence.handle); syncobj = drm_syncobj_find(file, fence.handle);
if (!syncobj) { if (!syncobj) {
DRM_DEBUG("Invalid syncobj handle provided\n"); DRM_DEBUG("Invalid syncobj handle provided\n");
@ -2101,6 +2106,9 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args,
goto err; goto err;
} }
BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
fences[n] = ptr_pack_bits(syncobj, fence.flags, 2); fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
} }

View file

@ -832,10 +832,14 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
} }
} }
struct sgt_dma { static inline struct sgt_dma {
struct scatterlist *sg; struct scatterlist *sg;
dma_addr_t dma, max; dma_addr_t dma, max;
}; } sgt_dma(struct i915_vma *vma) {
struct scatterlist *sg = vma->pages->sgl;
dma_addr_t addr = sg_dma_address(sg);
return (struct sgt_dma) { sg, addr, addr + sg->length };
}
struct gen8_insert_pte { struct gen8_insert_pte {
u16 pml4e; u16 pml4e;
@ -916,11 +920,7 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
u32 unused) u32 unused)
{ {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = { struct sgt_dma iter = sgt_dma(vma);
.sg = vma->pages->sgl,
.dma = sg_dma_address(iter.sg),
.max = iter.dma + iter.sg->length,
};
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx, gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
@ -933,11 +933,7 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
u32 unused) u32 unused)
{ {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = { struct sgt_dma iter = sgt_dma(vma);
.sg = vma->pages->sgl,
.dma = sg_dma_address(iter.sg),
.max = iter.dma + iter.sg->length,
};
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps; struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
@ -1632,13 +1628,10 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
unsigned act_pt = first_entry / GEN6_PTES; unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES; unsigned act_pte = first_entry % GEN6_PTES;
const u32 pte_encode = vm->pte_encode(0, cache_level, flags); const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
struct sgt_dma iter; struct sgt_dma iter = sgt_dma(vma);
gen6_pte_t *vaddr; gen6_pte_t *vaddr;
vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]); vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
iter.sg = vma->pages->sgl;
iter.dma = sg_dma_address(iter.sg);
iter.max = iter.dma + iter.sg->length;
do { do {
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);

View file

@ -721,7 +721,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
* allocation taken by fbdev * allocation taken by fbdev
*/ */
if (!(dev_priv->capabilities & SVGA_CAP_3D)) if (!(dev_priv->capabilities & SVGA_CAP_3D))
mem_size *= 2; mem_size *= 3;
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->prim_bb_mem = dev_priv->prim_bb_mem =

View file

@ -224,7 +224,7 @@ out:
return ret; return ret;
} }
static struct dma_fence_ops vmw_fence_ops = { static const struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name, .get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name, .get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling, .enable_signaling = vmw_fence_enable_signaling,

View file

@ -867,11 +867,16 @@ static void msf_from_bcd(struct atapi_msf *msf)
int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense) int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
{ {
struct cdrom_info *info = drive->driver_data; struct cdrom_info *info = drive->driver_data;
struct cdrom_device_info *cdi = &info->devinfo; struct cdrom_device_info *cdi;
unsigned char cmd[BLK_MAX_CDB]; unsigned char cmd[BLK_MAX_CDB];
ide_debug_log(IDE_DBG_FUNC, "enter"); ide_debug_log(IDE_DBG_FUNC, "enter");
if (!info)
return -EIO;
cdi = &info->devinfo;
memset(cmd, 0, BLK_MAX_CDB); memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_TEST_UNIT_READY; cmd[0] = GPCMD_TEST_UNIT_READY;

View file

@ -1258,6 +1258,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0605", 0 }, { "ELAN0605", 0 },
{ "ELAN0609", 0 }, { "ELAN0609", 0 },
{ "ELAN060B", 0 }, { "ELAN060B", 0 },
{ "ELAN060C", 0 },
{ "ELAN0611", 0 }, { "ELAN0611", 0 },
{ "ELAN1000", 0 }, { "ELAN1000", 0 },
{ } { }

View file

@ -312,7 +312,7 @@ static int rmi_smb_probe(struct i2c_client *client,
rmi_smb->xport.dev = &client->dev; rmi_smb->xport.dev = &client->dev;
rmi_smb->xport.pdata = *pdata; rmi_smb->xport.pdata = *pdata;
rmi_smb->xport.pdata.irq = client->irq; rmi_smb->xport.pdata.irq = client->irq;
rmi_smb->xport.proto_name = "smb2"; rmi_smb->xport.proto_name = "smb";
rmi_smb->xport.ops = &rmi_smb_ops; rmi_smb->xport.ops = &rmi_smb_ops;
smbus_version = rmi_smb_get_version(rmi_smb); smbus_version = rmi_smb_get_version(rmi_smb);
@ -322,7 +322,7 @@ static int rmi_smb_probe(struct i2c_client *client,
rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d", rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
smbus_version); smbus_version);
if (smbus_version != 2) { if (smbus_version != 2 && smbus_version != 3) {
dev_err(&client->dev, "Unrecognized SMB version %d\n", dev_err(&client->dev, "Unrecognized SMB version %d\n",
smbus_version); smbus_version);
return -ENODEV; return -ENODEV;

View file

@ -531,6 +531,7 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
input_set_drvdata(input_dev, ts); input_set_drvdata(input_dev, ts);
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
input_set_capability(input_dev, EV_KEY, BTN_TOUCH); input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, input_set_abs_params(input_dev, ABS_X,

View file

@ -2042,6 +2042,7 @@ static int bond_miimon_inspect(struct bonding *bond)
bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE; slave->new_link = BOND_LINK_NOCHANGE;
slave->link_new_state = slave->link;
link_state = bond_check_dev_link(bond, slave->dev, 0); link_state = bond_check_dev_link(bond, slave->dev, 0);
@ -3253,7 +3254,7 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
hash ^= (hash >> 16); hash ^= (hash >> 16);
hash ^= (hash >> 8); hash ^= (hash >> 8);
return hash; return hash >> 1;
} }
/*-------------------------- Device entry points ----------------------------*/ /*-------------------------- Device entry points ----------------------------*/

View file

@ -178,7 +178,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
break; break;
case BOSCH_D_CAN: case BOSCH_D_CAN:
priv->regs = reg_map_d_can; priv->regs = reg_map_d_can;
priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;

View file

@ -320,7 +320,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
break; break;
case BOSCH_D_CAN: case BOSCH_D_CAN:
priv->regs = reg_map_d_can; priv->regs = reg_map_d_can;
priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
priv->read_reg32 = d_can_plat_read_reg32; priv->read_reg32 = d_can_plat_read_reg32;

View file

@ -670,9 +670,9 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
priv->base + IFI_CANFD_FTIME); priv->base + IFI_CANFD_FTIME);
/* Configure transmitter delay */ /* Configure transmitter delay */
tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK; tdc = dbt->brp * (dbt->prop_seg + dbt->phase_seg1);
writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc, tdc &= IFI_CANFD_TDELAY_MASK;
priv->base + IFI_CANFD_TDELAY); writel(IFI_CANFD_TDELAY_EN | tdc, priv->base + IFI_CANFD_TDELAY);
} }
static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id, static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,

View file

@ -29,14 +29,19 @@
#include "peak_canfd_user.h" #include "peak_canfd_user.h"
MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe FD family cards"); MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");
MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe FD CAN cards"); MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
#define PCIEFD_DRV_NAME "peak_pciefd" #define PCIEFD_DRV_NAME "peak_pciefd"
#define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */ #define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */
#define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */ #define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */
#define PCAN_CPCIEFD_ID 0x0014 /* for Compact-PCI Serial slot cards */
#define PCAN_PCIE104FD_ID 0x0017 /* for PCIe-104 Express slot cards */
#define PCAN_MINIPCIEFD_ID 0x0018 /* for mini-PCIe slot cards */
#define PCAN_PCIEFD_OEM_ID 0x0019 /* for PCIe slot OEM cards */
#define PCAN_M2_ID 0x001a /* for M2 slot cards */
/* PEAK PCIe board access description */ /* PEAK PCIe board access description */
#define PCIEFD_BAR0_SIZE (64 * 1024) #define PCIEFD_BAR0_SIZE (64 * 1024)
@ -203,6 +208,11 @@ struct pciefd_board {
/* supported device ids. */ /* supported device ids. */
static const struct pci_device_id peak_pciefd_tbl[] = { static const struct pci_device_id peak_pciefd_tbl[] = {
{PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PCAN_CPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PCAN_PCIE104FD_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PCAN_MINIPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PCAN_PCIEFD_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
{PEAK_PCI_VENDOR_ID, PCAN_M2_ID, PCI_ANY_ID, PCI_ANY_ID,},
{0,} {0,}
}; };

View file

@ -539,6 +539,13 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
} }
stats->rx_over_errors++; stats->rx_over_errors++;
stats->rx_errors++; stats->rx_errors++;
/* reset the CAN IP by entering reset mode
* ignoring timeout error
*/
set_reset_mode(dev);
set_normal_mode(dev);
/* clear bit */ /* clear bit */
sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG); sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG);
} }
@ -653,8 +660,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
netif_wake_queue(dev); netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX); can_led_event(dev, CAN_LED_EVENT_TX);
} }
if (isrc & SUN4I_INT_RBUF_VLD) { if ((isrc & SUN4I_INT_RBUF_VLD) &&
/* receive interrupt */ !(isrc & SUN4I_INT_DATA_OR)) {
/* receive interrupt - don't read if overrun occurred */
while (status & SUN4I_STA_RBUF_RDY) { while (status & SUN4I_STA_RBUF_RDY) {
/* RX buffer is not empty */ /* RX buffer is not empty */
sun4i_can_rx(dev); sun4i_can_rx(dev);

View file

@ -37,7 +37,7 @@
#define T4FW_VERSION_MAJOR 0x01 #define T4FW_VERSION_MAJOR 0x01
#define T4FW_VERSION_MINOR 0x10 #define T4FW_VERSION_MINOR 0x10
#define T4FW_VERSION_MICRO 0x2D #define T4FW_VERSION_MICRO 0x3F
#define T4FW_VERSION_BUILD 0x00 #define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01 #define T4FW_MIN_VERSION_MAJOR 0x01
@ -46,7 +46,7 @@
#define T5FW_VERSION_MAJOR 0x01 #define T5FW_VERSION_MAJOR 0x01
#define T5FW_VERSION_MINOR 0x10 #define T5FW_VERSION_MINOR 0x10
#define T5FW_VERSION_MICRO 0x2D #define T5FW_VERSION_MICRO 0x3F
#define T5FW_VERSION_BUILD 0x00 #define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00
@ -55,7 +55,7 @@
#define T6FW_VERSION_MAJOR 0x01 #define T6FW_VERSION_MAJOR 0x01
#define T6FW_VERSION_MINOR 0x10 #define T6FW_VERSION_MINOR 0x10
#define T6FW_VERSION_MICRO 0x2D #define T6FW_VERSION_MICRO 0x3F
#define T6FW_VERSION_BUILD 0x00 #define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00

View file

@ -6747,6 +6747,9 @@ static int mvpp2_irqs_init(struct mvpp2_port *port)
for (i = 0; i < port->nqvecs; i++) { for (i = 0; i < port->nqvecs; i++) {
struct mvpp2_queue_vector *qv = port->qvecs + i; struct mvpp2_queue_vector *qv = port->qvecs + i;
if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
if (err) if (err)
goto err; goto err;
@ -6776,6 +6779,7 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
struct mvpp2_queue_vector *qv = port->qvecs + i; struct mvpp2_queue_vector *qv = port->qvecs + i;
irq_set_affinity_hint(qv->irq, NULL); irq_set_affinity_hint(qv->irq, NULL);
irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
free_irq(qv->irq, qv); free_irq(qv->irq, qv);
} }
} }

View file

@ -93,7 +93,7 @@ static void delayed_event_release(struct mlx5_device_context *dev_ctx,
list_splice_init(&priv->waiting_events_list, &temp); list_splice_init(&priv->waiting_events_list, &temp);
if (!dev_ctx->context) if (!dev_ctx->context)
goto out; goto out;
list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) list_for_each_entry_safe(de, n, &temp, list)
dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
out: out:

View file

@ -67,7 +67,7 @@
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6

View file

@ -365,21 +365,24 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
struct mlx5e_l2_hash_node *hn) struct mlx5e_l2_hash_node *hn)
{ {
u8 action = hn->action; u8 action = hn->action;
u8 mac_addr[ETH_ALEN];
int l2_err = 0; int l2_err = 0;
ether_addr_copy(mac_addr, hn->ai.addr);
switch (action) { switch (action) {
case MLX5E_ACTION_ADD: case MLX5E_ACTION_ADD:
mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH); mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
if (!is_multicast_ether_addr(hn->ai.addr)) { if (!is_multicast_ether_addr(mac_addr)) {
l2_err = mlx5_mpfs_add_mac(priv->mdev, hn->ai.addr); l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
hn->mpfs = !l2_err; hn->mpfs = !l2_err;
} }
hn->action = MLX5E_ACTION_NONE; hn->action = MLX5E_ACTION_NONE;
break; break;
case MLX5E_ACTION_DEL: case MLX5E_ACTION_DEL:
if (!is_multicast_ether_addr(hn->ai.addr) && hn->mpfs) if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
l2_err = mlx5_mpfs_del_mac(priv->mdev, hn->ai.addr); l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
mlx5e_del_l2_flow_rule(priv, &hn->ai); mlx5e_del_l2_flow_rule(priv, &hn->ai);
mlx5e_del_l2_from_hash(hn); mlx5e_del_l2_from_hash(hn);
break; break;
@ -387,7 +390,7 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
if (l2_err) if (l2_err)
netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n", netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
action == MLX5E_ACTION_ADD ? "add" : "del", hn->ai.addr, l2_err); action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
} }
static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)

View file

@ -215,22 +215,20 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info) struct mlx5e_dma_info *dma_info)
{ {
struct page *page;
if (mlx5e_rx_cache_get(rq, dma_info)) if (mlx5e_rx_cache_get(rq, dma_info))
return 0; return 0;
page = dev_alloc_pages(rq->buff.page_order); dma_info->page = dev_alloc_pages(rq->buff.page_order);
if (unlikely(!page)) if (unlikely(!dma_info->page))
return -ENOMEM; return -ENOMEM;
dma_info->addr = dma_map_page(rq->pdev, page, 0, dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
RQ_PAGE_SIZE(rq), rq->buff.map_dir); RQ_PAGE_SIZE(rq), rq->buff.map_dir);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
put_page(page); put_page(dma_info->page);
dma_info->page = NULL;
return -ENOMEM; return -ENOMEM;
} }
dma_info->page = page;
return 0; return 0;
} }

View file

@ -49,7 +49,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi); napi);
bool busy = false; bool busy = false;
int work_done; int work_done = 0;
int i; int i;
for (i = 0; i < c->num_tc; i++) for (i = 0; i < c->num_tc; i++)
@ -58,15 +58,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (c->xdp) if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq); busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); if (likely(budget)) { /* budget=0 means: don't poll rx rings */
busy |= work_done == budget; work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget;
}
busy |= c->rq.post_wqes(&c->rq); busy |= c->rq.post_wqes(&c->rq);
if (busy) { if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c))) if (likely(mlx5e_channel_no_affinity_change(c)))
return budget; return budget;
if (work_done == budget) if (budget && work_done == budget)
work_done--; work_done--;
} }

View file

@ -1482,9 +1482,16 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
return -EAGAIN; return -EAGAIN;
} }
/* Panic tear down fw command will stop the PCI bus communication
* with the HCA, so the health polll is no longer needed.
*/
mlx5_drain_health_wq(dev);
mlx5_stop_health_poll(dev);
ret = mlx5_cmd_force_teardown_hca(dev); ret = mlx5_cmd_force_teardown_hca(dev);
if (ret) { if (ret) {
mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret); mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
mlx5_start_health_poll(dev);
return ret; return ret;
} }

View file

@ -626,7 +626,7 @@ static int asix_suspend(struct usb_interface *intf, pm_message_t message)
struct usbnet *dev = usb_get_intfdata(intf); struct usbnet *dev = usb_get_intfdata(intf);
struct asix_common_private *priv = dev->driver_priv; struct asix_common_private *priv = dev->driver_priv;
if (priv->suspend) if (priv && priv->suspend)
priv->suspend(dev); priv->suspend(dev);
return usbnet_suspend(intf, message); return usbnet_suspend(intf, message);
@ -678,7 +678,7 @@ static int asix_resume(struct usb_interface *intf)
struct usbnet *dev = usb_get_intfdata(intf); struct usbnet *dev = usb_get_intfdata(intf);
struct asix_common_private *priv = dev->driver_priv; struct asix_common_private *priv = dev->driver_priv;
if (priv->resume) if (priv && priv->resume)
priv->resume(dev); priv->resume(dev);
return usbnet_resume(intf); return usbnet_resume(intf);

View file

@ -230,7 +230,7 @@ skip:
goto bad_desc; goto bad_desc;
} }
if (header.usb_cdc_ether_desc) { if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) {
dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize); dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize);
/* because of Zaurus, we may be ignoring the host /* because of Zaurus, we may be ignoring the host
* side link address we were given. * side link address we were given.

View file

@ -499,6 +499,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 1; return 1;
} }
if (rawip) { if (rawip) {
skb_reset_mac_header(skb);
skb->dev = dev->net; /* normally set by eth_type_trans */ skb->dev = dev->net; /* normally set by eth_type_trans */
skb->protocol = proto; skb->protocol = proto;
return 1; return 1;
@ -681,7 +682,7 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
} }
/* errors aren't fatal - we can live with the dynamic address */ /* errors aren't fatal - we can live with the dynamic address */
if (cdc_ether) { if (cdc_ether && cdc_ether->wMaxSegmentSize) {
dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize); dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize);
usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress); usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
} }

View file

@ -2685,7 +2685,6 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
} }
sdev->sdev_state = state; sdev->sdev_state = state;
sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
return 0; return 0;
illegal: illegal:
@ -3109,7 +3108,6 @@ int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
case SDEV_BLOCK: case SDEV_BLOCK:
case SDEV_TRANSPORT_OFFLINE: case SDEV_TRANSPORT_OFFLINE:
sdev->sdev_state = new_state; sdev->sdev_state = new_state;
sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
break; break;
case SDEV_CREATED_BLOCK: case SDEV_CREATED_BLOCK:
if (new_state == SDEV_TRANSPORT_OFFLINE || if (new_state == SDEV_TRANSPORT_OFFLINE ||
@ -3117,7 +3115,6 @@ int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
sdev->sdev_state = new_state; sdev->sdev_state = new_state;
else else
sdev->sdev_state = SDEV_CREATED; sdev->sdev_state = SDEV_CREATED;
sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
break; break;
case SDEV_CANCEL: case SDEV_CANCEL:
case SDEV_OFFLINE: case SDEV_OFFLINE:

View file

@ -556,11 +556,8 @@ int srp_reconnect_rport(struct srp_rport *rport)
*/ */
shost_for_each_device(sdev, shost) { shost_for_each_device(sdev, shost) {
mutex_lock(&sdev->state_mutex); mutex_lock(&sdev->state_mutex);
if (sdev->sdev_state == SDEV_OFFLINE) { if (sdev->sdev_state == SDEV_OFFLINE)
sdev->sdev_state = SDEV_RUNNING; sdev->sdev_state = SDEV_RUNNING;
sysfs_notify(&sdev->sdev_gendev.kobj,
NULL, "state");
}
mutex_unlock(&sdev->state_mutex); mutex_unlock(&sdev->state_mutex);
} }
} else if (rport->state == SRP_RPORT_RUNNING) { } else if (rport->state == SRP_RPORT_RUNNING) {

View file

@ -3770,6 +3770,13 @@ static inline void nf_reset_trace(struct sk_buff *skb)
#endif #endif
} }
static inline void ipvs_reset(struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IP_VS)
skb->ipvs_property = 0;
#endif
}
/* Note: This doesn't put any conntrack and bridge info in dst. */ /* Note: This doesn't put any conntrack and bridge info in dst. */
static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
bool copy) bool copy)

View file

@ -213,6 +213,11 @@ static inline struct ctl_table_header *register_sysctl_paths(
return NULL; return NULL;
} }
static inline struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table)
{
return NULL;
}
static inline void unregister_sysctl_table(struct ctl_table_header * table) static inline void unregister_sysctl_table(struct ctl_table_header * table)
{ {
} }

View file

@ -14,7 +14,6 @@
struct tcf_idrinfo { struct tcf_idrinfo {
spinlock_t lock; spinlock_t lock;
struct idr action_idr; struct idr action_idr;
struct net *net;
}; };
struct tc_action_ops; struct tc_action_ops;
@ -106,7 +105,7 @@ struct tc_action_net {
static inline static inline
int tc_action_net_init(struct tc_action_net *tn, int tc_action_net_init(struct tc_action_net *tn,
const struct tc_action_ops *ops, struct net *net) const struct tc_action_ops *ops)
{ {
int err = 0; int err = 0;
@ -114,7 +113,6 @@ int tc_action_net_init(struct tc_action_net *tn,
if (!tn->idrinfo) if (!tn->idrinfo)
return -ENOMEM; return -ENOMEM;
tn->ops = ops; tn->ops = ops;
tn->idrinfo->net = net;
spin_lock_init(&tn->idrinfo->lock); spin_lock_init(&tn->idrinfo->lock);
idr_init(&tn->idrinfo->action_idr); idr_init(&tn->idrinfo->action_idr);
return err; return err;

View file

@ -94,6 +94,7 @@ struct tcf_exts {
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */ __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
int nr_actions; int nr_actions;
struct tc_action **actions; struct tc_action **actions;
struct net *net;
#endif #endif
/* Map to export classifier specific extension TLV types to the /* Map to export classifier specific extension TLV types to the
* generic extensions API. Unsupported extensions must be set to 0. * generic extensions API. Unsupported extensions must be set to 0.
@ -107,6 +108,7 @@ static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
exts->type = 0; exts->type = 0;
exts->nr_actions = 0; exts->nr_actions = 0;
exts->net = NULL;
exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
GFP_KERNEL); GFP_KERNEL);
if (!exts->actions) if (!exts->actions)
@ -117,6 +119,28 @@ static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
return 0; return 0;
} }
/* Return false if the netns is being destroyed in cleanup_net(). Callers
* need to do cleanup synchronously in this case, otherwise may race with
* tc_action_net_exit(). Return true for other cases.
*/
static inline bool tcf_exts_get_net(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
exts->net = maybe_get_net(exts->net);
return exts->net != NULL;
#else
return true;
#endif
}
static inline void tcf_exts_put_net(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
if (exts->net)
put_net(exts->net);
#endif
}
static inline void tcf_exts_to_list(const struct tcf_exts *exts, static inline void tcf_exts_to_list(const struct tcf_exts *exts,
struct list_head *actions) struct list_head *actions)
{ {

View file

@ -49,7 +49,8 @@ typedef union snd_seq_timestamp snd_seq_timestamp_t;
#define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS 200 #define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS 200
/* max delivery path length */ /* max delivery path length */
#define SNDRV_SEQ_MAX_HOPS 10 /* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */
#define SNDRV_SEQ_MAX_HOPS 8
/* max size of event size */ /* max size of event size */
#define SNDRV_SEQ_MAX_EVENT_LEN 0x3fffffff #define SNDRV_SEQ_MAX_EVENT_LEN 0x3fffffff

View file

@ -90,6 +90,8 @@ struct snd_timer {
struct list_head ack_list_head; struct list_head ack_list_head;
struct list_head sack_list_head; /* slow ack list head */ struct list_head sack_list_head; /* slow ack list head */
struct tasklet_struct task_queue; struct tasklet_struct task_queue;
int max_instances; /* upper limit of timer instances */
int num_instances; /* current number of timer instances */
}; };
struct snd_timer_instance { struct snd_timer_instance {

View file

@ -829,6 +829,7 @@ struct drm_i915_gem_exec_fence {
#define I915_EXEC_FENCE_WAIT (1<<0) #define I915_EXEC_FENCE_WAIT (1<<0)
#define I915_EXEC_FENCE_SIGNAL (1<<1) #define I915_EXEC_FENCE_SIGNAL (1<<1)
#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
__u32 flags; __u32 flags;
}; };

View file

@ -0,0 +1,3 @@
[spatch]
options = --timeout 200
options = --use-gitgrep

View file

@ -1,7 +1,117 @@
# #
# Generated files # NOTE! Don't add files that are generated in specific
# subdirectories here. Add them in the ".gitignore" file
# in that subdirectory instead.
# #
config_data.h # NOTE! Please use 'git ls-files -i --exclude-standard'
config_data.gz # command after changing this file, to see if there are
timeconst.h # any tracked files which get ignored after the change.
hz.bc #
# Normal rules
#
.*
*.o
*.o.*
*.a
*.s
*.ko
*.so
*.so.dbg
*.mod.c
*.i
*.lst
*.symtypes
*.order
*.elf
*.bin
*.tar
*.gz
*.bz2
*.lzma
*.xz
*.lz4
*.lzo
*.patch
*.gcno
*.ll
modules.builtin
Module.symvers
*.dwo
*.su
*.c.[012]*.*
#
# Top-level generic files
#
/tags
/TAGS
/linux
/vmlinux
/vmlinux.32
/vmlinux-gdb.py
/vmlinuz
/System.map
/Module.markers
#
# Debian directory (make deb-pkg)
#
/debian/
#
# tar directory (make tar*-pkg)
#
/tar-install/
#
# git files that we don't want to ignore even if they are dot-files
#
!.gitignore
!.mailmap
!.cocciconfig
#
# Generated include files
#
include/config
include/generated
arch/*/include/generated
# stgit generated dirs
patches-*
# quilt's files
patches
series
# cscope files
cscope.*
ncscope.*
# gnu global files
GPATH
GRTAGS
GSYMS
GTAGS
# id-utils files
ID
*.orig
*~
\#*#
#
# Leavings from module signing
#
extra_certificates
signing_key.pem
signing_key.priv
signing_key.x509
x509.genkey
# Kconfig presets
all.config
# Kdevelop4
*.kdev4

191
kernel/kernel/.mailmap Normal file
View file

@ -0,0 +1,191 @@
#
# This list is used by git-shortlog to fix a few botched name translations
# in the git archive, either because the author's full name was messed up
# and/or not always written the same way, making contributions from the
# same person appearing not to be so or badly displayed.
#
# repo-abbrev: /pub/scm/linux/kernel/git/
#
Aaron Durbin <adurbin@google.com>
Adam Oldham <oldhamca@gmail.com>
Adam Radford <aradford@gmail.com>
Adrian Bunk <bunk@stusta.de>
Adriana Reus <adi.reus@gmail.com> <adriana.reus@intel.com>
Alan Cox <alan@lxorguk.ukuu.org.uk>
Alan Cox <root@hraefn.swansea.linux.org.uk>
Aleksey Gorelov <aleksey_gorelov@phoenix.com>
Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
Al Viro <viro@ftp.linux.org.uk>
Al Viro <viro@zenIV.linux.org.uk>
Andreas Herrmann <aherrman@de.ibm.com>
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
Andrew Morton <akpm@linux-foundation.org>
Andrew Vasquez <andrew.vasquez@qlogic.com>
Andy Adamson <andros@citi.umich.edu>
Antoine Tenart <antoine.tenart@free-electrons.com>
Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
Archit Taneja <archit@ti.com>
Arnaud Patard <arnaud.patard@rtp-net.org>
Arnd Bergmann <arnd@arndb.de>
Axel Dyks <xl@xlsigned.net>
Axel Lin <axel.lin@gmail.com>
Ben Gardner <bgardner@wabtec.com>
Ben M Cahill <ben.m.cahill@intel.com>
Björn Steinbrink <B.Steinbrink@gmx.de>
Boris Brezillon <boris.brezillon@free-electrons.com>
Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon.dev@gmail.com>
Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon@overkiz.com>
Brian Avery <b.avery@hp.com>
Brian King <brking@us.ibm.com>
Christoph Hellwig <hch@lst.de>
Christophe Ricard <christophe.ricard@gmail.com>
Corey Minyard <minyard@acm.org>
Damian Hobson-Garcia <dhobsong@igel.co.jp>
David Brownell <david-b@pacbell.net>
David Woodhouse <dwmw2@shinybook.infradead.org>
Deng-Cheng Zhu <dengcheng.zhu@mips.com> <dengcheng.zhu@imgtec.com>
Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
Domen Puncer <domen@coderock.org>
Douglas Gilbert <dougg@torque.net>
Ed L. Cashin <ecashin@coraid.com>
Evgeniy Polyakov <johnpol@2ka.mipt.ru>
Felipe W Damasio <felipewd@terra.com.br>
Felix Kuhling <fxkuehl@gmx.de>
Felix Moeller <felix@derklecks.de>
Filipe Lautert <filipe@icewall.org>
Franck Bui-Huu <vagabon.xyz@gmail.com>
Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com>
Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
Frank Zago <fzago@systemfabricworks.com>
Greg Kroah-Hartman <greg@echidna.(none)>
Greg Kroah-Hartman <gregkh@suse.de>
Greg Kroah-Hartman <greg@kroah.com>
Henk Vergonet <Henk.Vergonet@gmail.com>
Henrik Kretzschmar <henne@nachtwindheim.de>
Henrik Rydberg <rydberg@bitmath.org>
Herbert Xu <herbert@gondor.apana.org.au>
Jacob Shin <Jacob.Shin@amd.com>
James Bottomley <jejb@mulgrave.(none)>
James Bottomley <jejb@titanic.il.steeleye.com>
James E Wilson <wilson@specifix.com>
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
James Hogan <jhogan@kernel.org> <james@albanarts.com>
James Ketrenos <jketreno@io.(none)>
Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
Jean Tourrilhes <jt@hpl.hp.com>
Jeff Garzik <jgarzik@pretzel.yyz.us>
Jens Axboe <axboe@suse.de>
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
John Stultz <johnstul@us.ibm.com>
<josh@joshtriplett.org> <josh@freedesktop.org>
<josh@joshtriplett.org> <josh@kernel.org>
<josh@joshtriplett.org> <josht@linux.vnet.ibm.com>
<josh@joshtriplett.org> <josht@us.ibm.com>
<josh@joshtriplett.org> <josht@vnet.ibm.com>
Juha Yrjola <at solidboot.com>
Juha Yrjola <juha.yrjola@nokia.com>
Juha Yrjola <juha.yrjola@solidboot.com>
Kay Sievers <kay.sievers@vrfy.org>
Kenneth W Chen <kenneth.w.chen@intel.com>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Koushik <raghavendra.koushik@neterion.com>
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
Linas Vepstas <linas@austin.ibm.com>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Mark Brown <broonie@sirena.org.uk>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Matthieu CASTET <castet.matthieu@free.fr>
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@infradead.org>
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@redhat.com>
Mauro Carvalho Chehab <mchehab@kernel.org> <m.chehab@samsung.com>
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@osg.samsung.com>
Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@s-opensource.com>
Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
Mayuresh Janorkar <mayur@ti.com>
Michael Buesch <m@bues.ch>
Michel Dänzer <michel@tungstengraphics.com>
Miodrag Dinic <miodrag.dinic@mips.com> <miodrag.dinic@imgtec.com>
Mitesh shah <mshah@teja.com>
Mohit Kumar <mohit.kumar@st.com> <mohit.kumar.dhaka@gmail.com>
Morten Welinder <terra@gnome.org>
Morten Welinder <welinder@anemone.rentec.com>
Morten Welinder <welinder@darter.rentec.com>
Morten Welinder <welinder@troll.com>
Mythri P K <mythripk@ti.com>
Nguyen Anh Quynh <aquynh@gmail.com>
Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Patrick Mochel <mochel@digitalimplant.org>
Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com>
Peter A Jonsson <pj@ludd.ltu.se>
Peter Oruba <peter@oruba.de>
Peter Oruba <peter.oruba@amd.com>
Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
Praveen BP <praveenbp@ti.com>
Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
Rajesh Shah <rajesh.shah@intel.com>
Ralf Baechle <ralf@linux-mips.org>
Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
Randy Dunlap <rdunlap@infradead.org> <rdunlap@xenotime.net>
Rémi Denis-Courmont <rdenis@simphalempin.com>
Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
Rudolf Marek <R.Marek@sh.cvut.cz>
Rui Saraiva <rmps@joel.ist.utl.pt>
Sachin P Sant <ssant@in.ibm.com>
Sarangdhar Joshi <spjoshi@codeaurora.org>
Sam Ravnborg <sam@mars.ravnborg.org>
Santosh Shilimkar <ssantosh@kernel.org>
Santosh Shilimkar <santosh.shilimkar@oracle.org>
Sascha Hauer <s.hauer@pengutronix.de>
S.Çağlar Onur <caglar@pardus.org.tr>
Sebastian Reichel <sre@kernel.org> <sre@debian.org>
Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
Shuah Khan <shuah@kernel.org> <shuahkh@osg.samsung.com>
Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
Simon Kelley <simon@thekelleys.org.uk>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <shemminger@osdl.org>
Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
Subhash Jadavani <subhashj@codeaurora.org>
Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
Sumit Semwal <sumit.semwal@ti.com>
Tejun Heo <htejun@gmail.com>
Thomas Graf <tgraf@suug.ch>
Thomas Pedersen <twp@codeaurora.org>
Tony Luck <tony.luck@intel.com>
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
Uwe Kleine-König <ukl@pengutronix.de>
Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
Yusuke Goda <goda.yusuke@renesas.com>
Gustavo Padovan <gustavo@las.ic.unicamp.br>
Gustavo Padovan <padovan@profusion.mobi>

View file

@ -649,6 +649,7 @@ static int sugov_start(struct cpufreq_policy *policy)
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
memset(sg_cpu, 0, sizeof(*sg_cpu)); memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->cpu = cpu;
sg_cpu->sg_policy = sg_policy; sg_cpu->sg_policy = sg_policy;
sg_cpu->flags = SCHED_CPUFREQ_RT; sg_cpu->flags = SCHED_CPUFREQ_RT;
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
@ -714,11 +715,6 @@ struct cpufreq_governor *cpufreq_default_governor(void)
static int __init sugov_register(void) static int __init sugov_register(void)
{ {
int cpu;
for_each_possible_cpu(cpu)
per_cpu(sugov_cpu, cpu).cpu = cpu;
return cpufreq_register_governor(&schedutil_gov); return cpufreq_register_governor(&schedutil_gov);
} }
fs_initcall(sugov_register); fs_initcall(sugov_register);

View file

@ -10,6 +10,7 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/preempt.h>
struct worker_pool; struct worker_pool;
@ -60,7 +61,7 @@ struct worker {
*/ */
static inline struct worker *current_wq_worker(void) static inline struct worker *current_wq_worker(void)
{ {
if (current->flags & PF_WQ_WORKER) if (in_task() && (current->flags & PF_WQ_WORKER))
return kthread_data(current); return kthread_data(current);
return NULL; return NULL;
} }

View file

@ -228,7 +228,7 @@ next_op:
hdr = 2; hdr = 2;
/* Extract a tag from the data */ /* Extract a tag from the data */
if (unlikely(dp >= datalen - 1)) if (unlikely(datalen - dp < 2))
goto data_overrun_error; goto data_overrun_error;
tag = data[dp++]; tag = data[dp++];
if (unlikely((tag & 0x1f) == ASN1_LONG_TAG)) if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
@ -274,7 +274,7 @@ next_op:
int n = len - 0x80; int n = len - 0x80;
if (unlikely(n > 2)) if (unlikely(n > 2))
goto length_too_long; goto length_too_long;
if (unlikely(dp >= datalen - n)) if (unlikely(n > datalen - dp))
goto data_overrun_error; goto data_overrun_error;
hdr += n; hdr += n;
for (len = 0; n > 0; n--) { for (len = 0; n > 0; n--) {

View file

@ -376,6 +376,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
dev->name); dev->name);
vlan_vid_add(dev, htons(ETH_P_8021Q), 0); vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
} }
if (event == NETDEV_DOWN &&
(dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
vlan_info = rtnl_dereference(dev->vlan_info); vlan_info = rtnl_dereference(dev->vlan_info);
if (!vlan_info) if (!vlan_info)
@ -423,9 +426,6 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
struct net_device *tmp; struct net_device *tmp;
LIST_HEAD(close_list); LIST_HEAD(close_list);
if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
/* Put all VLANs for this dev in the down state too. */ /* Put all VLANs for this dev in the down state too. */
vlan_group_for_each_dev(grp, i, vlandev) { vlan_group_for_each_dev(grp, i, vlandev) {
flgs = vlandev->flags; flgs = vlandev->flags;

View file

@ -4864,6 +4864,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
if (!xnet) if (!xnet)
return; return;
ipvs_reset(skb);
skb_orphan(skb); skb_orphan(skb);
skb->mark = 0; skb->mark = 0;
} }

View file

@ -133,6 +133,8 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds,
if (err) if (err)
return err; return err;
} }
return 0;
} }
for_each_set_bit(port, group, ds->num_ports) for_each_set_bit(port, group, ds->num_ports)
@ -180,6 +182,8 @@ static int dsa_switch_vlan_add(struct dsa_switch *ds,
if (err) if (err)
return err; return err;
} }
return 0;
} }
for_each_set_bit(port, members, ds->num_ports) for_each_set_bit(port, members, ds->num_ports)

View file

@ -115,7 +115,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE|FLAG_DSACKING_ACK)
#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
@ -2615,7 +2615,6 @@ void tcp_simple_retransmit(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb; struct sk_buff *skb;
unsigned int mss = tcp_current_mss(sk); unsigned int mss = tcp_current_mss(sk);
u32 prior_lost = tp->lost_out;
tcp_for_write_queue(skb, sk) { tcp_for_write_queue(skb, sk) {
if (skb == tcp_send_head(sk)) if (skb == tcp_send_head(sk))
@ -2632,7 +2631,7 @@ void tcp_simple_retransmit(struct sock *sk)
tcp_clear_retrans_hints_partial(tp); tcp_clear_retrans_hints_partial(tp);
if (prior_lost == tp->lost_out) if (!tp->lost_out)
return; return;
if (tcp_is_reno(tp)) if (tcp_is_reno(tp))

View file

@ -149,11 +149,19 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
* is freed by GSO engine * is freed by GSO engine
*/ */
if (copy_destructor) { if (copy_destructor) {
int delta;
swap(gso_skb->sk, skb->sk); swap(gso_skb->sk, skb->sk);
swap(gso_skb->destructor, skb->destructor); swap(gso_skb->destructor, skb->destructor);
sum_truesize += skb->truesize; sum_truesize += skb->truesize;
refcount_add(sum_truesize - gso_skb->truesize, delta = sum_truesize - gso_skb->truesize;
&skb->sk->sk_wmem_alloc); /* In some pathological cases, delta can be negative.
* We need to either use refcount_add() or refcount_sub_and_test()
*/
if (likely(delta >= 0))
refcount_add(delta, &skb->sk->sk_wmem_alloc);
else
WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
} }
delta = htonl(oldlen + (skb_tail_pointer(skb) - delta = htonl(oldlen + (skb_tail_pointer(skb) -

View file

@ -123,6 +123,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
unsigned char *ptr, *optr; unsigned char *ptr, *optr;
struct l2tp_session *session; struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL; struct l2tp_tunnel *tunnel = NULL;
struct iphdr *iph;
int length; int length;
if (!pskb_may_pull(skb, 4)) if (!pskb_may_pull(skb, 4))
@ -178,24 +179,17 @@ pass_up:
goto discard; goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]); tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(net, tunnel_id); iph = (struct iphdr *)skb_network_header(skb);
if (tunnel) {
sk = tunnel->sock;
sock_hold(sk);
} else {
struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
read_lock_bh(&l2tp_ip_lock); read_lock_bh(&l2tp_ip_lock);
sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
inet_iif(skb), tunnel_id); tunnel_id);
if (!sk) { if (!sk) {
read_unlock_bh(&l2tp_ip_lock);
goto discard;
}
sock_hold(sk);
read_unlock_bh(&l2tp_ip_lock); read_unlock_bh(&l2tp_ip_lock);
goto discard;
} }
sock_hold(sk);
read_unlock_bh(&l2tp_ip_lock);
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put; goto discard_put;

View file

@ -136,6 +136,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
unsigned char *ptr, *optr; unsigned char *ptr, *optr;
struct l2tp_session *session; struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL; struct l2tp_tunnel *tunnel = NULL;
struct ipv6hdr *iph;
int length; int length;
if (!pskb_may_pull(skb, 4)) if (!pskb_may_pull(skb, 4))
@ -192,24 +193,17 @@ pass_up:
goto discard; goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]); tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(net, tunnel_id); iph = ipv6_hdr(skb);
if (tunnel) {
sk = tunnel->sock;
sock_hold(sk);
} else {
struct ipv6hdr *iph = ipv6_hdr(skb);
read_lock_bh(&l2tp_ip6_lock); read_lock_bh(&l2tp_ip6_lock);
sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr, sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
inet6_iif(skb), tunnel_id); inet6_iif(skb), tunnel_id);
if (!sk) { if (!sk) {
read_unlock_bh(&l2tp_ip6_lock);
goto discard;
}
sock_hold(sk);
read_unlock_bh(&l2tp_ip6_lock); read_unlock_bh(&l2tp_ip6_lock);
goto discard;
} }
sock_hold(sk);
read_unlock_bh(&l2tp_ip6_lock);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put; goto discard_put;

View file

@ -1085,7 +1085,7 @@ static int __init qrtr_proto_init(void)
return 0; return 0;
} }
module_init(qrtr_proto_init); postcore_initcall(qrtr_proto_init);
static void __exit qrtr_proto_fini(void) static void __exit qrtr_proto_fini(void)
{ {

View file

@ -410,14 +410,14 @@ void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
break; break;
} }
/* XXX when can this fail? */ rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv,
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
recv->r_ibinc, sg_page(&recv->r_frag->f_sg), recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
(long) ib_sg_dma_address( (long) ib_sg_dma_address(
ic->i_cm_id->device, ic->i_cm_id->device,
&recv->r_frag->f_sg), &recv->r_frag->f_sg));
ret);
/* XXX when can this fail? */
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
if (ret) { if (ret) {
rds_ib_conn_error(conn, "recv post on " rds_ib_conn_error(conn, "recv post on "
"%pI4 returned %d, disconnecting and " "%pI4 returned %d, disconnecting and "

View file

@ -78,7 +78,6 @@ static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p)
spin_lock_bh(&idrinfo->lock); spin_lock_bh(&idrinfo->lock);
idr_remove_ext(&idrinfo->action_idr, p->tcfa_index); idr_remove_ext(&idrinfo->action_idr, p->tcfa_index);
spin_unlock_bh(&idrinfo->lock); spin_unlock_bh(&idrinfo->lock);
put_net(idrinfo->net);
gen_kill_estimator(&p->tcfa_rate_est); gen_kill_estimator(&p->tcfa_rate_est);
free_tcf(p); free_tcf(p);
} }
@ -337,7 +336,6 @@ err3:
p->idrinfo = idrinfo; p->idrinfo = idrinfo;
p->ops = ops; p->ops = ops;
INIT_LIST_HEAD(&p->list); INIT_LIST_HEAD(&p->list);
get_net(idrinfo->net);
*a = p; *a = p;
return 0; return 0;
} }

View file

@ -398,7 +398,7 @@ static __net_init int bpf_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, bpf_net_id); struct tc_action_net *tn = net_generic(net, bpf_net_id);
return tc_action_net_init(tn, &act_bpf_ops, net); return tc_action_net_init(tn, &act_bpf_ops);
} }
static void __net_exit bpf_exit_net(struct net *net) static void __net_exit bpf_exit_net(struct net *net)

View file

@ -206,7 +206,7 @@ static __net_init int connmark_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, connmark_net_id); struct tc_action_net *tn = net_generic(net, connmark_net_id);
return tc_action_net_init(tn, &act_connmark_ops, net); return tc_action_net_init(tn, &act_connmark_ops);
} }
static void __net_exit connmark_exit_net(struct net *net) static void __net_exit connmark_exit_net(struct net *net)

View file

@ -626,7 +626,7 @@ static __net_init int csum_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, csum_net_id); struct tc_action_net *tn = net_generic(net, csum_net_id);
return tc_action_net_init(tn, &act_csum_ops, net); return tc_action_net_init(tn, &act_csum_ops);
} }
static void __net_exit csum_exit_net(struct net *net) static void __net_exit csum_exit_net(struct net *net)

View file

@ -232,7 +232,7 @@ static __net_init int gact_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, gact_net_id); struct tc_action_net *tn = net_generic(net, gact_net_id);
return tc_action_net_init(tn, &act_gact_ops, net); return tc_action_net_init(tn, &act_gact_ops);
} }
static void __net_exit gact_exit_net(struct net *net) static void __net_exit gact_exit_net(struct net *net)

View file

@ -818,7 +818,7 @@ static __net_init int ife_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, ife_net_id); struct tc_action_net *tn = net_generic(net, ife_net_id);
return tc_action_net_init(tn, &act_ife_ops, net); return tc_action_net_init(tn, &act_ife_ops);
} }
static void __net_exit ife_exit_net(struct net *net) static void __net_exit ife_exit_net(struct net *net)

View file

@ -334,7 +334,7 @@ static __net_init int ipt_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, ipt_net_id); struct tc_action_net *tn = net_generic(net, ipt_net_id);
return tc_action_net_init(tn, &act_ipt_ops, net); return tc_action_net_init(tn, &act_ipt_ops);
} }
static void __net_exit ipt_exit_net(struct net *net) static void __net_exit ipt_exit_net(struct net *net)
@ -384,7 +384,7 @@ static __net_init int xt_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, xt_net_id); struct tc_action_net *tn = net_generic(net, xt_net_id);
return tc_action_net_init(tn, &act_xt_ops, net); return tc_action_net_init(tn, &act_xt_ops);
} }
static void __net_exit xt_exit_net(struct net *net) static void __net_exit xt_exit_net(struct net *net)

View file

@ -343,7 +343,7 @@ static __net_init int mirred_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, mirred_net_id); struct tc_action_net *tn = net_generic(net, mirred_net_id);
return tc_action_net_init(tn, &act_mirred_ops, net); return tc_action_net_init(tn, &act_mirred_ops);
} }
static void __net_exit mirred_exit_net(struct net *net) static void __net_exit mirred_exit_net(struct net *net)

View file

@ -307,7 +307,7 @@ static __net_init int nat_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, nat_net_id); struct tc_action_net *tn = net_generic(net, nat_net_id);
return tc_action_net_init(tn, &act_nat_ops, net); return tc_action_net_init(tn, &act_nat_ops);
} }
static void __net_exit nat_exit_net(struct net *net) static void __net_exit nat_exit_net(struct net *net)

View file

@ -450,7 +450,7 @@ static __net_init int pedit_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, pedit_net_id); struct tc_action_net *tn = net_generic(net, pedit_net_id);
return tc_action_net_init(tn, &act_pedit_ops, net); return tc_action_net_init(tn, &act_pedit_ops);
} }
static void __net_exit pedit_exit_net(struct net *net) static void __net_exit pedit_exit_net(struct net *net)

View file

@ -331,7 +331,7 @@ static __net_init int police_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, police_net_id); struct tc_action_net *tn = net_generic(net, police_net_id);
return tc_action_net_init(tn, &act_police_ops, net); return tc_action_net_init(tn, &act_police_ops);
} }
static void __net_exit police_exit_net(struct net *net) static void __net_exit police_exit_net(struct net *net)

View file

@ -240,7 +240,7 @@ static __net_init int sample_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, sample_net_id); struct tc_action_net *tn = net_generic(net, sample_net_id);
return tc_action_net_init(tn, &act_sample_ops, net); return tc_action_net_init(tn, &act_sample_ops);
} }
static void __net_exit sample_exit_net(struct net *net) static void __net_exit sample_exit_net(struct net *net)

View file

@ -201,7 +201,7 @@ static __net_init int simp_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, simp_net_id); struct tc_action_net *tn = net_generic(net, simp_net_id);
return tc_action_net_init(tn, &act_simp_ops, net); return tc_action_net_init(tn, &act_simp_ops);
} }
static void __net_exit simp_exit_net(struct net *net) static void __net_exit simp_exit_net(struct net *net)

View file

@ -238,7 +238,7 @@ static __net_init int skbedit_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, skbedit_net_id); struct tc_action_net *tn = net_generic(net, skbedit_net_id);
return tc_action_net_init(tn, &act_skbedit_ops, net); return tc_action_net_init(tn, &act_skbedit_ops);
} }
static void __net_exit skbedit_exit_net(struct net *net) static void __net_exit skbedit_exit_net(struct net *net)

View file

@ -263,7 +263,7 @@ static __net_init int skbmod_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, skbmod_net_id); struct tc_action_net *tn = net_generic(net, skbmod_net_id);
return tc_action_net_init(tn, &act_skbmod_ops, net); return tc_action_net_init(tn, &act_skbmod_ops);
} }
static void __net_exit skbmod_exit_net(struct net *net) static void __net_exit skbmod_exit_net(struct net *net)

View file

@ -322,7 +322,7 @@ static __net_init int tunnel_key_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
return tc_action_net_init(tn, &act_tunnel_key_ops, net); return tc_action_net_init(tn, &act_tunnel_key_ops);
} }
static void __net_exit tunnel_key_exit_net(struct net *net) static void __net_exit tunnel_key_exit_net(struct net *net)

View file

@ -269,7 +269,7 @@ static __net_init int vlan_init_net(struct net *net)
{ {
struct tc_action_net *tn = net_generic(net, vlan_net_id); struct tc_action_net *tn = net_generic(net, vlan_net_id);
return tc_action_net_init(tn, &act_vlan_ops, net); return tc_action_net_init(tn, &act_vlan_ops);
} }
static void __net_exit vlan_exit_net(struct net *net) static void __net_exit vlan_exit_net(struct net *net)

View file

@ -927,6 +927,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
exts->actions[i++] = act; exts->actions[i++] = act;
exts->nr_actions = i; exts->nr_actions = i;
} }
exts->net = net;
} }
#else #else
if ((exts->action && tb[exts->action]) || if ((exts->action && tb[exts->action]) ||

View file

@ -85,16 +85,21 @@ static int basic_init(struct tcf_proto *tp)
return 0; return 0;
} }
static void __basic_delete_filter(struct basic_filter *f)
{
tcf_exts_destroy(&f->exts);
tcf_em_tree_destroy(&f->ematches);
tcf_exts_put_net(&f->exts);
kfree(f);
}
static void basic_delete_filter_work(struct work_struct *work) static void basic_delete_filter_work(struct work_struct *work)
{ {
struct basic_filter *f = container_of(work, struct basic_filter, work); struct basic_filter *f = container_of(work, struct basic_filter, work);
rtnl_lock(); rtnl_lock();
tcf_exts_destroy(&f->exts); __basic_delete_filter(f);
tcf_em_tree_destroy(&f->ematches);
rtnl_unlock(); rtnl_unlock();
kfree(f);
} }
static void basic_delete_filter(struct rcu_head *head) static void basic_delete_filter(struct rcu_head *head)
@ -113,7 +118,10 @@ static void basic_destroy(struct tcf_proto *tp)
list_for_each_entry_safe(f, n, &head->flist, link) { list_for_each_entry_safe(f, n, &head->flist, link) {
list_del_rcu(&f->link); list_del_rcu(&f->link);
tcf_unbind_filter(tp, &f->res); tcf_unbind_filter(tp, &f->res);
call_rcu(&f->rcu, basic_delete_filter); if (tcf_exts_get_net(&f->exts))
call_rcu(&f->rcu, basic_delete_filter);
else
__basic_delete_filter(f);
} }
kfree_rcu(head, rcu); kfree_rcu(head, rcu);
} }
@ -125,6 +133,7 @@ static int basic_delete(struct tcf_proto *tp, void *arg, bool *last)
list_del_rcu(&f->link); list_del_rcu(&f->link);
tcf_unbind_filter(tp, &f->res); tcf_unbind_filter(tp, &f->res);
tcf_exts_get_net(&f->exts);
call_rcu(&f->rcu, basic_delete_filter); call_rcu(&f->rcu, basic_delete_filter);
*last = list_empty(&head->flist); *last = list_empty(&head->flist);
return 0; return 0;
@ -219,6 +228,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
if (fold) { if (fold) {
list_replace_rcu(&fold->link, &fnew->link); list_replace_rcu(&fold->link, &fnew->link);
tcf_unbind_filter(tp, &fold->res); tcf_unbind_filter(tp, &fold->res);
tcf_exts_get_net(&fold->exts);
call_rcu(&fold->rcu, basic_delete_filter); call_rcu(&fold->rcu, basic_delete_filter);
} else { } else {
list_add_rcu(&fnew->link, &head->flist); list_add_rcu(&fnew->link, &head->flist);

View file

@ -249,6 +249,7 @@ static int cls_bpf_init(struct tcf_proto *tp)
static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
{ {
tcf_exts_destroy(&prog->exts); tcf_exts_destroy(&prog->exts);
tcf_exts_put_net(&prog->exts);
if (cls_bpf_is_ebpf(prog)) if (cls_bpf_is_ebpf(prog))
bpf_prog_put(prog->filter); bpf_prog_put(prog->filter);
@ -282,7 +283,10 @@ static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
cls_bpf_stop_offload(tp, prog); cls_bpf_stop_offload(tp, prog);
list_del_rcu(&prog->link); list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res); tcf_unbind_filter(tp, &prog->res);
call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu); if (tcf_exts_get_net(&prog->exts))
call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
else
__cls_bpf_delete_prog(prog);
} }
static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last) static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last)
@ -516,6 +520,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
if (oldprog) { if (oldprog) {
list_replace_rcu(&oldprog->link, &prog->link); list_replace_rcu(&oldprog->link, &prog->link);
tcf_unbind_filter(tp, &oldprog->res); tcf_unbind_filter(tp, &oldprog->res);
tcf_exts_get_net(&oldprog->exts);
call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu); call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
} else { } else {
list_add_rcu(&prog->link, &head->plist); list_add_rcu(&prog->link, &head->plist);

View file

@ -60,15 +60,21 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
[TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
}; };
static void __cls_cgroup_destroy(struct cls_cgroup_head *head)
{
tcf_exts_destroy(&head->exts);
tcf_em_tree_destroy(&head->ematches);
tcf_exts_put_net(&head->exts);
kfree(head);
}
static void cls_cgroup_destroy_work(struct work_struct *work) static void cls_cgroup_destroy_work(struct work_struct *work)
{ {
struct cls_cgroup_head *head = container_of(work, struct cls_cgroup_head *head = container_of(work,
struct cls_cgroup_head, struct cls_cgroup_head,
work); work);
rtnl_lock(); rtnl_lock();
tcf_exts_destroy(&head->exts); __cls_cgroup_destroy(head);
tcf_em_tree_destroy(&head->ematches);
kfree(head);
rtnl_unlock(); rtnl_unlock();
} }
@ -124,8 +130,10 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
goto errout; goto errout;
rcu_assign_pointer(tp->root, new); rcu_assign_pointer(tp->root, new);
if (head) if (head) {
tcf_exts_get_net(&head->exts);
call_rcu(&head->rcu, cls_cgroup_destroy_rcu); call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
}
return 0; return 0;
errout: errout:
tcf_exts_destroy(&new->exts); tcf_exts_destroy(&new->exts);
@ -138,8 +146,12 @@ static void cls_cgroup_destroy(struct tcf_proto *tp)
struct cls_cgroup_head *head = rtnl_dereference(tp->root); struct cls_cgroup_head *head = rtnl_dereference(tp->root);
/* Head can still be NULL due to cls_cgroup_init(). */ /* Head can still be NULL due to cls_cgroup_init(). */
if (head) if (head) {
call_rcu(&head->rcu, cls_cgroup_destroy_rcu); if (tcf_exts_get_net(&head->exts))
call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
else
__cls_cgroup_destroy(head);
}
} }
static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last) static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last)

View file

@ -372,15 +372,21 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
[TCA_FLOW_PERTURB] = { .type = NLA_U32 }, [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
}; };
static void __flow_destroy_filter(struct flow_filter *f)
{
del_timer_sync(&f->perturb_timer);
tcf_exts_destroy(&f->exts);
tcf_em_tree_destroy(&f->ematches);
tcf_exts_put_net(&f->exts);
kfree(f);
}
static void flow_destroy_filter_work(struct work_struct *work) static void flow_destroy_filter_work(struct work_struct *work)
{ {
struct flow_filter *f = container_of(work, struct flow_filter, work); struct flow_filter *f = container_of(work, struct flow_filter, work);
rtnl_lock(); rtnl_lock();
del_timer_sync(&f->perturb_timer); __flow_destroy_filter(f);
tcf_exts_destroy(&f->exts);
tcf_em_tree_destroy(&f->ematches);
kfree(f);
rtnl_unlock(); rtnl_unlock();
} }
@ -552,8 +558,10 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
*arg = fnew; *arg = fnew;
if (fold) if (fold) {
tcf_exts_get_net(&fold->exts);
call_rcu(&fold->rcu, flow_destroy_filter); call_rcu(&fold->rcu, flow_destroy_filter);
}
return 0; return 0;
err2: err2:
@ -570,6 +578,7 @@ static int flow_delete(struct tcf_proto *tp, void *arg, bool *last)
struct flow_filter *f = arg; struct flow_filter *f = arg;
list_del_rcu(&f->list); list_del_rcu(&f->list);
tcf_exts_get_net(&f->exts);
call_rcu(&f->rcu, flow_destroy_filter); call_rcu(&f->rcu, flow_destroy_filter);
*last = list_empty(&head->filters); *last = list_empty(&head->filters);
return 0; return 0;
@ -594,7 +603,10 @@ static void flow_destroy(struct tcf_proto *tp)
list_for_each_entry_safe(f, next, &head->filters, list) { list_for_each_entry_safe(f, next, &head->filters, list) {
list_del_rcu(&f->list); list_del_rcu(&f->list);
call_rcu(&f->rcu, flow_destroy_filter); if (tcf_exts_get_net(&f->exts))
call_rcu(&f->rcu, flow_destroy_filter);
else
__flow_destroy_filter(f);
} }
kfree_rcu(head, rcu); kfree_rcu(head, rcu);
} }

View file

@ -218,13 +218,19 @@ static int fl_init(struct tcf_proto *tp)
return 0; return 0;
} }
static void __fl_destroy_filter(struct cls_fl_filter *f)
{
tcf_exts_destroy(&f->exts);
tcf_exts_put_net(&f->exts);
kfree(f);
}
static void fl_destroy_filter_work(struct work_struct *work) static void fl_destroy_filter_work(struct work_struct *work)
{ {
struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work); struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work);
rtnl_lock(); rtnl_lock();
tcf_exts_destroy(&f->exts); __fl_destroy_filter(f);
kfree(f);
rtnl_unlock(); rtnl_unlock();
} }
@ -318,7 +324,10 @@ static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f)
if (!tc_skip_hw(f->flags)) if (!tc_skip_hw(f->flags))
fl_hw_destroy_filter(tp, f); fl_hw_destroy_filter(tp, f);
tcf_unbind_filter(tp, &f->res); tcf_unbind_filter(tp, &f->res);
call_rcu(&f->rcu, fl_destroy_filter); if (tcf_exts_get_net(&f->exts))
call_rcu(&f->rcu, fl_destroy_filter);
else
__fl_destroy_filter(f);
} }
static void fl_destroy_sleepable(struct work_struct *work) static void fl_destroy_sleepable(struct work_struct *work)
@ -988,6 +997,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
idr_replace_ext(&head->handle_idr, fnew, fnew->handle); idr_replace_ext(&head->handle_idr, fnew, fnew->handle);
list_replace_rcu(&fold->list, &fnew->list); list_replace_rcu(&fold->list, &fnew->list);
tcf_unbind_filter(tp, &fold->res); tcf_unbind_filter(tp, &fold->res);
tcf_exts_get_net(&fold->exts);
call_rcu(&fold->rcu, fl_destroy_filter); call_rcu(&fold->rcu, fl_destroy_filter);
} else { } else {
list_add_tail_rcu(&fnew->list, &head->filters); list_add_tail_rcu(&fnew->list, &head->filters);

View file

@ -122,13 +122,19 @@ static int fw_init(struct tcf_proto *tp)
return 0; return 0;
} }
static void __fw_delete_filter(struct fw_filter *f)
{
tcf_exts_destroy(&f->exts);
tcf_exts_put_net(&f->exts);
kfree(f);
}
static void fw_delete_filter_work(struct work_struct *work) static void fw_delete_filter_work(struct work_struct *work)
{ {
struct fw_filter *f = container_of(work, struct fw_filter, work); struct fw_filter *f = container_of(work, struct fw_filter, work);
rtnl_lock(); rtnl_lock();
tcf_exts_destroy(&f->exts); __fw_delete_filter(f);
kfree(f);
rtnl_unlock(); rtnl_unlock();
} }
@ -154,7 +160,10 @@ static void fw_destroy(struct tcf_proto *tp)
RCU_INIT_POINTER(head->ht[h], RCU_INIT_POINTER(head->ht[h],
rtnl_dereference(f->next)); rtnl_dereference(f->next));
tcf_unbind_filter(tp, &f->res); tcf_unbind_filter(tp, &f->res);
call_rcu(&f->rcu, fw_delete_filter); if (tcf_exts_get_net(&f->exts))
call_rcu(&f->rcu, fw_delete_filter);
else
__fw_delete_filter(f);
} }
} }
kfree_rcu(head, rcu); kfree_rcu(head, rcu);
@ -179,6 +188,7 @@ static int fw_delete(struct tcf_proto *tp, void *arg, bool *last)
if (pfp == f) { if (pfp == f) {
RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
tcf_unbind_filter(tp, &f->res); tcf_unbind_filter(tp, &f->res);
tcf_exts_get_net(&f->exts);
call_rcu(&f->rcu, fw_delete_filter); call_rcu(&f->rcu, fw_delete_filter);
ret = 0; ret = 0;
break; break;
@ -299,6 +309,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
RCU_INIT_POINTER(fnew->next, rtnl_dereference(pfp->next)); RCU_INIT_POINTER(fnew->next, rtnl_dereference(pfp->next));
rcu_assign_pointer(*fp, fnew); rcu_assign_pointer(*fp, fnew);
tcf_unbind_filter(tp, &f->res); tcf_unbind_filter(tp, &f->res);
tcf_exts_get_net(&f->exts);
call_rcu(&f->rcu, fw_delete_filter); call_rcu(&f->rcu, fw_delete_filter);
*arg = fnew; *arg = fnew;

View file

@ -44,13 +44,19 @@ static int mall_init(struct tcf_proto *tp)
return 0; return 0;
} }
static void __mall_destroy(struct cls_mall_head *head)
{
tcf_exts_destroy(&head->exts);
tcf_exts_put_net(&head->exts);
kfree(head);
}
static void mall_destroy_work(struct work_struct *work) static void mall_destroy_work(struct work_struct *work)
{ {
struct cls_mall_head *head = container_of(work, struct cls_mall_head, struct cls_mall_head *head = container_of(work, struct cls_mall_head,
work); work);
rtnl_lock(); rtnl_lock();
tcf_exts_destroy(&head->exts); __mall_destroy(head);
kfree(head);
rtnl_unlock(); rtnl_unlock();
} }
@ -109,7 +115,10 @@ static void mall_destroy(struct tcf_proto *tp)
if (tc_should_offload(dev, head->flags)) if (tc_should_offload(dev, head->flags))
mall_destroy_hw_filter(tp, head, (unsigned long) head); mall_destroy_hw_filter(tp, head, (unsigned long) head);
call_rcu(&head->rcu, mall_destroy_rcu); if (tcf_exts_get_net(&head->exts))
call_rcu(&head->rcu, mall_destroy_rcu);
else
__mall_destroy(head);
} }
static void *mall_get(struct tcf_proto *tp, u32 handle) static void *mall_get(struct tcf_proto *tp, u32 handle)

Some files were not shown because too many files have changed in this diff Show more