updating to mainline 4.14.5

This commit is contained in:
Jake Day 2017-12-10 11:19:27 -05:00
parent e4a6cc5002
commit 3ed1e9afdb
90 changed files with 575 additions and 249 deletions

3
config
View file

@ -1,6 +1,6 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/x86_64 4.14.4-jakeday Kernel Configuration # Linux/x86_64 4.14.5-jakeday Kernel Configuration
# #
CONFIG_64BIT=y CONFIG_64BIT=y
CONFIG_X86_64=y CONFIG_X86_64=y
@ -347,6 +347,7 @@ CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
CONFIG_STRICT_KERNEL_RWX=y CONFIG_STRICT_KERNEL_RWX=y
CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
CONFIG_STRICT_MODULE_RWX=y CONFIG_STRICT_MODULE_RWX=y
CONFIG_ARCH_HAS_REFCOUNT=y
# CONFIG_REFCOUNT_FULL is not set # CONFIG_REFCOUNT_FULL is not set
# #

View file

@ -20,16 +20,16 @@ Required Properties:
(CMT1 on sh73a0 and r8a7740) (CMT1 on sh73a0 and r8a7740)
This is a fallback for the above renesas,cmt-48-* entries. This is a fallback for the above renesas,cmt-48-* entries.
- "renesas,cmt0-r8a73a4" for the 32-bit CMT0 device included in r8a73a4. - "renesas,r8a73a4-cmt0" for the 32-bit CMT0 device included in r8a73a4.
- "renesas,cmt1-r8a73a4" for the 48-bit CMT1 device included in r8a73a4. - "renesas,r8a73a4-cmt1" for the 48-bit CMT1 device included in r8a73a4.
- "renesas,cmt0-r8a7790" for the 32-bit CMT0 device included in r8a7790. - "renesas,r8a7790-cmt0" for the 32-bit CMT0 device included in r8a7790.
- "renesas,cmt1-r8a7790" for the 48-bit CMT1 device included in r8a7790. - "renesas,r8a7790-cmt1" for the 48-bit CMT1 device included in r8a7790.
- "renesas,cmt0-r8a7791" for the 32-bit CMT0 device included in r8a7791. - "renesas,r8a7791-cmt0" for the 32-bit CMT0 device included in r8a7791.
- "renesas,cmt1-r8a7791" for the 48-bit CMT1 device included in r8a7791. - "renesas,r8a7791-cmt1" for the 48-bit CMT1 device included in r8a7791.
- "renesas,cmt0-r8a7793" for the 32-bit CMT0 device included in r8a7793. - "renesas,r8a7793-cmt0" for the 32-bit CMT0 device included in r8a7793.
- "renesas,cmt1-r8a7793" for the 48-bit CMT1 device included in r8a7793. - "renesas,r8a7793-cmt1" for the 48-bit CMT1 device included in r8a7793.
- "renesas,cmt0-r8a7794" for the 32-bit CMT0 device included in r8a7794. - "renesas,r8a7794-cmt0" for the 32-bit CMT0 device included in r8a7794.
- "renesas,cmt1-r8a7794" for the 48-bit CMT1 device included in r8a7794. - "renesas,r8a7794-cmt1" for the 48-bit CMT1 device included in r8a7794.
- "renesas,rcar-gen2-cmt0" for 32-bit CMT0 devices included in R-Car Gen2. - "renesas,rcar-gen2-cmt0" for 32-bit CMT0 devices included in R-Car Gen2.
- "renesas,rcar-gen2-cmt1" for 48-bit CMT1 devices included in R-Car Gen2. - "renesas,rcar-gen2-cmt1" for 48-bit CMT1 devices included in R-Car Gen2.
@ -46,7 +46,7 @@ Required Properties:
Example: R8A7790 (R-Car H2) CMT0 and CMT1 nodes Example: R8A7790 (R-Car H2) CMT0 and CMT1 nodes
cmt0: timer@ffca0000 { cmt0: timer@ffca0000 {
compatible = "renesas,cmt0-r8a7790", "renesas,rcar-gen2-cmt0"; compatible = "renesas,r8a7790-cmt0", "renesas,rcar-gen2-cmt0";
reg = <0 0xffca0000 0 0x1004>; reg = <0 0xffca0000 0 0x1004>;
interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>, interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>,
<0 142 IRQ_TYPE_LEVEL_HIGH>; <0 142 IRQ_TYPE_LEVEL_HIGH>;
@ -55,7 +55,7 @@ Example: R8A7790 (R-Car H2) CMT0 and CMT1 nodes
}; };
cmt1: timer@e6130000 { cmt1: timer@e6130000 {
compatible = "renesas,cmt1-r8a7790", "renesas,rcar-gen2-cmt1"; compatible = "renesas,r8a7790-cmt1", "renesas,rcar-gen2-cmt1";
reg = <0 0xe6130000 0 0x1004>; reg = <0 0xe6130000 0 0x1004>;
interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>, interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>,
<0 121 IRQ_TYPE_LEVEL_HIGH>, <0 121 IRQ_TYPE_LEVEL_HIGH>,

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 14 PATCHLEVEL = 14
SUBLEVEL = 4 SUBLEVEL = 5
EXTRAVERSION = EXTRAVERSION =
NAME = Petit Gorille NAME = Petit Gorille

View file

@ -91,7 +91,7 @@ config STATIC_KEYS_SELFTEST
config OPTPROBES config OPTPROBES
def_bool y def_bool y
depends on KPROBES && HAVE_OPTPROBES depends on KPROBES && HAVE_OPTPROBES
depends on !PREEMPT select TASKS_RCU if PREEMPT
config KPROBES_ON_FTRACE config KPROBES_ON_FTRACE
def_bool y def_bool y

View file

@ -170,7 +170,7 @@ void __init cf_bootmem_alloc(void)
max_pfn = max_low_pfn = PFN_DOWN(_ramend); max_pfn = max_low_pfn = PFN_DOWN(_ramend);
high_memory = (void *)_ramend; high_memory = (void *)_ramend;
m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6; m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
module_fixup(NULL, __start_fixup, __stop_fixup); module_fixup(NULL, __start_fixup, __stop_fixup);
/* setup bootmem data */ /* setup bootmem data */

View file

@ -16,7 +16,6 @@ generic-y += qrwlock.h
generic-y += qspinlock.h generic-y += qspinlock.h
generic-y += sections.h generic-y += sections.h
generic-y += segment.h generic-y += segment.h
generic-y += serial.h
generic-y += trace_clock.h generic-y += trace_clock.h
generic-y += unaligned.h generic-y += unaligned.h
generic-y += user.h generic-y += user.h

View file

@ -0,0 +1,22 @@
/*
* Copyright (C) 2017 MIPS Tech, LLC
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef __ASM__SERIAL_H
#define __ASM__SERIAL_H
#ifdef CONFIG_MIPS_GENERIC
/*
* Generic kernels cannot know a correct value for all platforms at
* compile time. Set it to 0 to prevent 8250_early using it
*/
#define BASE_BAUD 0
#else
#include <asm-generic/serial.h>
#endif
#endif /* __ASM__SERIAL_H */

View file

@ -65,6 +65,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
/* Disable irq for emulating a breakpoint and avoiding preempt */ /* Disable irq for emulating a breakpoint and avoiding preempt */
local_irq_save(flags); local_irq_save(flags);
hard_irq_disable(); hard_irq_disable();
preempt_disable();
p = get_kprobe((kprobe_opcode_t *)nip); p = get_kprobe((kprobe_opcode_t *)nip);
if (unlikely(!p) || kprobe_disabled(p)) if (unlikely(!p) || kprobe_disabled(p))
@ -86,12 +87,18 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
kcb->kprobe_status = KPROBE_HIT_ACTIVE; kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (!p->pre_handler || !p->pre_handler(p, regs)) if (!p->pre_handler || !p->pre_handler(p, regs))
__skip_singlestep(p, regs, kcb, orig_nip); __skip_singlestep(p, regs, kcb, orig_nip);
/* else {
* If pre_handler returns !0, it sets regs->nip and /*
* resets current kprobe. * If pre_handler returns !0, it sets regs->nip and
*/ * resets current kprobe. In this case, we still need
* to restore irq, but not preemption.
*/
local_irq_restore(flags);
return;
}
} }
end: end:
preempt_enable_no_resched();
local_irq_restore(flags); local_irq_restore(flags);
} }
NOKPROBE_SYMBOL(kprobe_ftrace_handler); NOKPROBE_SYMBOL(kprobe_ftrace_handler);

View file

@ -115,7 +115,6 @@ static unsigned long can_optimize(struct kprobe *p)
static void optimized_callback(struct optimized_kprobe *op, static void optimized_callback(struct optimized_kprobe *op,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long flags; unsigned long flags;
/* This is possible if op is under delayed unoptimizing */ /* This is possible if op is under delayed unoptimizing */
@ -124,13 +123,14 @@ static void optimized_callback(struct optimized_kprobe *op,
local_irq_save(flags); local_irq_save(flags);
hard_irq_disable(); hard_irq_disable();
preempt_disable();
if (kprobe_running()) { if (kprobe_running()) {
kprobes_inc_nmissed_count(&op->kp); kprobes_inc_nmissed_count(&op->kp);
} else { } else {
__this_cpu_write(current_kprobe, &op->kp); __this_cpu_write(current_kprobe, &op->kp);
regs->nip = (unsigned long)op->kp.addr; regs->nip = (unsigned long)op->kp.addr;
kcb->kprobe_status = KPROBE_HIT_ACTIVE; get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
opt_pre_handler(&op->kp, regs); opt_pre_handler(&op->kp, regs);
__this_cpu_write(current_kprobe, NULL); __this_cpu_write(current_kprobe, NULL);
} }
@ -140,6 +140,7 @@ static void optimized_callback(struct optimized_kprobe *op,
* local_irq_restore() will re-enable interrupts, * local_irq_restore() will re-enable interrupts,
* if they were hard disabled. * if they were hard disabled.
*/ */
preempt_enable_no_resched();
local_irq_restore(flags); local_irq_restore(flags);
} }
NOKPROBE_SYMBOL(optimized_callback); NOKPROBE_SYMBOL(optimized_callback);

View file

@ -82,6 +82,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
int zpci_load(u64 *data, u64 req, u64 offset); int zpci_load(u64 *data, u64 req, u64 offset);
int zpci_store(u64 data, u64 req, u64 offset); int zpci_store(u64 data, u64 req, u64 offset);
int zpci_store_block(const u64 *data, u64 req, u64 offset); int zpci_store_block(const u64 *data, u64 req, u64 offset);
void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc); int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
#endif #endif

View file

@ -86,6 +86,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
load_runtime_instr_cb(&runtime_instr_empty_cb); load_runtime_instr_cb(&runtime_instr_empty_cb);
} }
void exit_thread_runtime_instr(void); struct task_struct;
void runtime_instr_release(struct task_struct *tsk);
#endif /* _RUNTIME_INSTR_H */ #endif /* _RUNTIME_INSTR_H */

View file

@ -49,10 +49,8 @@ extern void kernel_thread_starter(void);
*/ */
void exit_thread(struct task_struct *tsk) void exit_thread(struct task_struct *tsk)
{ {
if (tsk == current) { if (tsk == current)
exit_thread_runtime_instr();
exit_thread_gs(); exit_thread_gs();
}
} }
void flush_thread(void) void flush_thread(void)
@ -65,6 +63,7 @@ void release_thread(struct task_struct *dead_task)
void arch_release_task_struct(struct task_struct *tsk) void arch_release_task_struct(struct task_struct *tsk)
{ {
runtime_instr_release(tsk);
} }
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)

View file

@ -1172,26 +1172,37 @@ static int s390_gs_cb_set(struct task_struct *target,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf) const void *kbuf, const void __user *ubuf)
{ {
struct gs_cb *data = target->thread.gs_cb; struct gs_cb gs_cb = { }, *data = NULL;
int rc; int rc;
if (!MACHINE_HAS_GS) if (!MACHINE_HAS_GS)
return -ENODEV; return -ENODEV;
if (!data) { if (!target->thread.gs_cb) {
data = kzalloc(sizeof(*data), GFP_KERNEL); data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) if (!data)
return -ENOMEM; return -ENOMEM;
data->gsd = 25;
target->thread.gs_cb = data;
if (target == current)
__ctl_set_bit(2, 4);
} else if (target == current) {
save_gs_cb(data);
} }
if (!target->thread.gs_cb)
gs_cb.gsd = 25;
else if (target == current)
save_gs_cb(&gs_cb);
else
gs_cb = *target->thread.gs_cb;
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
data, 0, sizeof(struct gs_cb)); &gs_cb, 0, sizeof(gs_cb));
if (target == current) if (rc) {
restore_gs_cb(data); kfree(data);
return -EFAULT;
}
preempt_disable();
if (!target->thread.gs_cb)
target->thread.gs_cb = data;
*target->thread.gs_cb = gs_cb;
if (target == current) {
__ctl_set_bit(2, 4);
restore_gs_cb(target->thread.gs_cb);
}
preempt_enable();
return rc; return rc;
} }

View file

@ -21,11 +21,24 @@
/* empty control block to disable RI by loading it */ /* empty control block to disable RI by loading it */
struct runtime_instr_cb runtime_instr_empty_cb; struct runtime_instr_cb runtime_instr_empty_cb;
void runtime_instr_release(struct task_struct *tsk)
{
kfree(tsk->thread.ri_cb);
}
static void disable_runtime_instr(void) static void disable_runtime_instr(void)
{ {
struct pt_regs *regs = task_pt_regs(current); struct task_struct *task = current;
struct pt_regs *regs;
if (!task->thread.ri_cb)
return;
regs = task_pt_regs(task);
preempt_disable();
load_runtime_instr_cb(&runtime_instr_empty_cb); load_runtime_instr_cb(&runtime_instr_empty_cb);
kfree(task->thread.ri_cb);
task->thread.ri_cb = NULL;
preempt_enable();
/* /*
* Make sure the RI bit is deleted from the PSW. If the user did not * Make sure the RI bit is deleted from the PSW. If the user did not
@ -46,19 +59,6 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
cb->valid = 1; cb->valid = 1;
} }
void exit_thread_runtime_instr(void)
{
struct task_struct *task = current;
preempt_disable();
if (!task->thread.ri_cb)
return;
disable_runtime_instr();
kfree(task->thread.ri_cb);
task->thread.ri_cb = NULL;
preempt_enable();
}
SYSCALL_DEFINE1(s390_runtime_instr, int, command) SYSCALL_DEFINE1(s390_runtime_instr, int, command)
{ {
struct runtime_instr_cb *cb; struct runtime_instr_cb *cb;
@ -67,7 +67,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (command == S390_RUNTIME_INSTR_STOP) { if (command == S390_RUNTIME_INSTR_STOP) {
exit_thread_runtime_instr(); disable_runtime_instr();
return 0; return 0;
} }

View file

@ -368,7 +368,8 @@ static void zpci_irq_handler(struct airq_struct *airq)
/* End of second scan with interrupts on. */ /* End of second scan with interrupts on. */
break; break;
/* First scan complete, reenable interrupts. */ /* First scan complete, reenable interrupts. */
zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC))
break;
si = 0; si = 0;
continue; continue;
} }
@ -956,7 +957,7 @@ static int __init pci_base_init(void)
if (!s390_pci_probe) if (!s390_pci_probe)
return 0; return 0;
if (!test_facility(69) || !test_facility(71) || !test_facility(72)) if (!test_facility(69) || !test_facility(71))
return 0; return 0;
rc = zpci_debug_init(); rc = zpci_debug_init();

View file

@ -7,6 +7,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <asm/facility.h>
#include <asm/pci_insn.h> #include <asm/pci_insn.h>
#include <asm/pci_debug.h> #include <asm/pci_debug.h>
#include <asm/processor.h> #include <asm/processor.h>
@ -91,11 +92,14 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
} }
/* Set Interruption Controls */ /* Set Interruption Controls */
void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc) int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
{ {
if (!test_facility(72))
return -EIO;
asm volatile ( asm volatile (
" .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused)); : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
return 0;
} }
/* PCI Load */ /* PCI Load */

View file

@ -56,7 +56,7 @@ config X86
select ARCH_HAS_KCOV if X86_64 select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_PMEM_API if X86_64 select ARCH_HAS_PMEM_API if X86_64
# Causing hangs/crashes, see the commit that added this change for details. # Causing hangs/crashes, see the commit that added this change for details.
select ARCH_HAS_REFCOUNT if BROKEN select ARCH_HAS_REFCOUNT
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN select ARCH_HAS_SG_CHAIN

View file

@ -15,7 +15,7 @@
* back to the regular execution flow in .text. * back to the regular execution flow in .text.
*/ */
#define _REFCOUNT_EXCEPTION \ #define _REFCOUNT_EXCEPTION \
".pushsection .text.unlikely\n" \ ".pushsection .text..refcount\n" \
"111:\tlea %[counter], %%" _ASM_CX "\n" \ "111:\tlea %[counter], %%" _ASM_CX "\n" \
"112:\t" ASM_UD0 "\n" \ "112:\t" ASM_UD0 "\n" \
ASM_UNREACHABLE \ ASM_UNREACHABLE \

View file

@ -21,7 +21,7 @@ asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
asmlinkage long sys_iopl(unsigned int); asmlinkage long sys_iopl(unsigned int);
/* kernel/ldt.c */ /* kernel/ldt.c */
asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); asmlinkage long sys_modify_ldt(int, void __user *, unsigned long);
/* kernel/signal.c */ /* kernel/signal.c */
asmlinkage long sys_rt_sigreturn(void); asmlinkage long sys_rt_sigreturn(void);

View file

@ -267,6 +267,7 @@ static void rdt_get_cdp_l3_config(int type)
r->num_closid = r_l3->num_closid / 2; r->num_closid = r_l3->num_closid / 2;
r->cache.cbm_len = r_l3->cache.cbm_len; r->cache.cbm_len = r_l3->cache.cbm_len;
r->default_ctrl = r_l3->default_ctrl; r->default_ctrl = r_l3->default_ctrl;
r->cache.shareable_bits = r_l3->cache.shareable_bits;
r->data_width = (r->cache.cbm_len + 3) / 4; r->data_width = (r->cache.cbm_len + 3) / 4;
r->alloc_capable = true; r->alloc_capable = true;
/* /*

View file

@ -1081,6 +1081,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
struct dentry *dentry; struct dentry *dentry;
int ret; int ret;
cpus_read_lock();
mutex_lock(&rdtgroup_mutex); mutex_lock(&rdtgroup_mutex);
/* /*
* resctrl file system can only be mounted once. * resctrl file system can only be mounted once.
@ -1130,12 +1131,12 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
goto out_mondata; goto out_mondata;
if (rdt_alloc_capable) if (rdt_alloc_capable)
static_branch_enable(&rdt_alloc_enable_key); static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
if (rdt_mon_capable) if (rdt_mon_capable)
static_branch_enable(&rdt_mon_enable_key); static_branch_enable_cpuslocked(&rdt_mon_enable_key);
if (rdt_alloc_capable || rdt_mon_capable) if (rdt_alloc_capable || rdt_mon_capable)
static_branch_enable(&rdt_enable_key); static_branch_enable_cpuslocked(&rdt_enable_key);
if (is_mbm_enabled()) { if (is_mbm_enabled()) {
r = &rdt_resources_all[RDT_RESOURCE_L3]; r = &rdt_resources_all[RDT_RESOURCE_L3];
@ -1157,6 +1158,7 @@ out_cdp:
cdp_disable(); cdp_disable();
out: out:
mutex_unlock(&rdtgroup_mutex); mutex_unlock(&rdtgroup_mutex);
cpus_read_unlock();
return dentry; return dentry;
} }

View file

@ -26,7 +26,7 @@
#include "common.h" #include "common.h"
static nokprobe_inline static nokprobe_inline
int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb, unsigned long orig_ip) struct kprobe_ctlblk *kcb, unsigned long orig_ip)
{ {
/* /*
@ -41,20 +41,21 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
__this_cpu_write(current_kprobe, NULL); __this_cpu_write(current_kprobe, NULL);
if (orig_ip) if (orig_ip)
regs->ip = orig_ip; regs->ip = orig_ip;
return 1;
} }
int skip_singlestep(struct kprobe *p, struct pt_regs *regs, int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb) struct kprobe_ctlblk *kcb)
{ {
if (kprobe_ftrace(p)) if (kprobe_ftrace(p)) {
return __skip_singlestep(p, regs, kcb, 0); __skip_singlestep(p, regs, kcb, 0);
else preempt_enable_no_resched();
return 0; return 1;
}
return 0;
} }
NOKPROBE_SYMBOL(skip_singlestep); NOKPROBE_SYMBOL(skip_singlestep);
/* Ftrace callback handler for kprobes */ /* Ftrace callback handler for kprobes -- called under preepmt disabed */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs) struct ftrace_ops *ops, struct pt_regs *regs)
{ {
@ -77,13 +78,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
regs->ip = ip + sizeof(kprobe_opcode_t); regs->ip = ip + sizeof(kprobe_opcode_t);
/* To emulate trap based kprobes, preempt_disable here */
preempt_disable();
__this_cpu_write(current_kprobe, p); __this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE; kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (!p->pre_handler || !p->pre_handler(p, regs)) if (!p->pre_handler || !p->pre_handler(p, regs)) {
__skip_singlestep(p, regs, kcb, orig_ip); __skip_singlestep(p, regs, kcb, orig_ip);
preempt_enable_no_resched();
}
/* /*
* If pre_handler returns !0, it sets regs->ip and * If pre_handler returns !0, it sets regs->ip and
* resets current kprobe. * resets current kprobe, and keep preempt count +1.
*/ */
} }
end: end:

View file

@ -13,6 +13,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/syscalls.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
@ -295,8 +296,8 @@ out:
return error; return error;
} }
asmlinkage int sys_modify_ldt(int func, void __user *ptr, SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
unsigned long bytecount) unsigned long , bytecount)
{ {
int ret = -ENOSYS; int ret = -ENOSYS;
@ -314,5 +315,14 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr,
ret = write_ldt(ptr, bytecount, 0); ret = write_ldt(ptr, bytecount, 0);
break; break;
} }
return ret; /*
* The SYSCALL_DEFINE() macros give us an 'unsigned long'
* return type, but tht ABI for sys_modify_ldt() expects
* 'int'. This cast gives us an int-sized value in %rax
* for the return code. The 'unsigned' is necessary so
* the compiler does not try to sign-extend the negative
* return codes into the high half of the register when
* taking the value from int->long.
*/
return (unsigned int)ret;
} }

View file

@ -67,17 +67,22 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup,
* wrapped around) will be set. Additionally, seeing the refcount * wrapped around) will be set. Additionally, seeing the refcount
* reach 0 will set ZF (Zero Flag: result was zero). In each of * reach 0 will set ZF (Zero Flag: result was zero). In each of
* these cases we want a report, since it's a boundary condition. * these cases we want a report, since it's a boundary condition.
* * The SF case is not reported since it indicates post-boundary
* manipulations below zero or above INT_MAX. And if none of the
* flags are set, something has gone very wrong, so report it.
*/ */
if (regs->flags & (X86_EFLAGS_OF | X86_EFLAGS_ZF)) { if (regs->flags & (X86_EFLAGS_OF | X86_EFLAGS_ZF)) {
bool zero = regs->flags & X86_EFLAGS_ZF; bool zero = regs->flags & X86_EFLAGS_ZF;
refcount_error_report(regs, zero ? "hit zero" : "overflow"); refcount_error_report(regs, zero ? "hit zero" : "overflow");
} else if ((regs->flags & X86_EFLAGS_SF) == 0) {
/* Report if none of OF, ZF, nor SF are set. */
refcount_error_report(regs, "unexpected saturation");
} }
return true; return true;
} }
EXPORT_SYMBOL_GPL(ex_handler_refcount); EXPORT_SYMBOL(ex_handler_refcount);
/* /*
* Handler for when we fail to restore a task's FPU state. We should never get * Handler for when we fail to restore a task's FPU state. We should never get

View file

@ -6,6 +6,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <os.h> #include <os.h>
@ -369,7 +370,9 @@ void free_ldt(struct mm_context *mm)
mm->arch.ldt.entry_count = 0; mm->arch.ldt.entry_count = 0;
} }
int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount) SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
unsigned long , bytecount)
{ {
return do_modify_ldt_skas(func, ptr, bytecount); /* See non-um modify_ldt() for why we do this cast */
return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount);
} }

View file

@ -1947,6 +1947,26 @@ static void binder_send_failed_reply(struct binder_transaction *t,
} }
} }
/**
* binder_cleanup_transaction() - cleans up undelivered transaction
* @t: transaction that needs to be cleaned up
* @reason: reason the transaction wasn't delivered
* @error_code: error to return to caller (if synchronous call)
*/
static void binder_cleanup_transaction(struct binder_transaction *t,
const char *reason,
uint32_t error_code)
{
if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
binder_send_failed_reply(t, error_code);
} else {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered transaction %d, %s\n",
t->debug_id, reason);
binder_free_transaction(t);
}
}
/** /**
* binder_validate_object() - checks for a valid metadata object in a buffer. * binder_validate_object() - checks for a valid metadata object in a buffer.
* @buffer: binder_buffer that we're parsing. * @buffer: binder_buffer that we're parsing.
@ -4015,12 +4035,20 @@ retry:
if (put_user(cmd, (uint32_t __user *)ptr)) { if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from) if (t_from)
binder_thread_dec_tmpref(t_from); binder_thread_dec_tmpref(t_from);
binder_cleanup_transaction(t, "put_user failed",
BR_FAILED_REPLY);
return -EFAULT; return -EFAULT;
} }
ptr += sizeof(uint32_t); ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr))) { if (copy_to_user(ptr, &tr, sizeof(tr))) {
if (t_from) if (t_from)
binder_thread_dec_tmpref(t_from); binder_thread_dec_tmpref(t_from);
binder_cleanup_transaction(t, "copy_to_user failed",
BR_FAILED_REPLY);
return -EFAULT; return -EFAULT;
} }
ptr += sizeof(tr); ptr += sizeof(tr);
@ -4090,15 +4118,9 @@ static void binder_release_work(struct binder_proc *proc,
struct binder_transaction *t; struct binder_transaction *t;
t = container_of(w, struct binder_transaction, work); t = container_of(w, struct binder_transaction, work);
if (t->buffer->target_node &&
!(t->flags & TF_ONE_WAY)) { binder_cleanup_transaction(t, "process died.",
binder_send_failed_reply(t, BR_DEAD_REPLY); BR_DEAD_REPLY);
} else {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered transaction %d\n",
t->debug_id);
binder_free_transaction(t);
}
} break; } break;
case BINDER_WORK_RETURN_ERROR: { case BINDER_WORK_RETURN_ERROR: {
struct binder_error *e = container_of( struct binder_error *e = container_of(

View file

@ -921,7 +921,7 @@ static int pm_genpd_prepare(struct device *dev)
genpd_unlock(genpd); genpd_unlock(genpd);
ret = pm_generic_prepare(dev); ret = pm_generic_prepare(dev);
if (ret) { if (ret < 0) {
genpd_lock(genpd); genpd_lock(genpd);
genpd->prepared_count--; genpd->prepared_count--;
@ -929,7 +929,8 @@ static int pm_genpd_prepare(struct device *dev)
genpd_unlock(genpd); genpd_unlock(genpd);
} }
return ret; /* Never return 1, as genpd don't cope with the direct_complete path. */
return ret >= 0 ? 0 : ret;
} }
/** /**

View file

@ -1268,10 +1268,6 @@ arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
iounmap(cntctlbase); iounmap(cntctlbase);
if (!best_frame)
pr_err("Unable to find a suitable frame in timer @ %pa\n",
&timer_mem->cntctlbase);
return best_frame; return best_frame;
} }
@ -1372,6 +1368,8 @@ static int __init arch_timer_mem_of_init(struct device_node *np)
frame = arch_timer_mem_find_best_frame(timer_mem); frame = arch_timer_mem_find_best_frame(timer_mem);
if (!frame) { if (!frame) {
pr_err("Unable to find a suitable frame in timer @ %pa\n",
&timer_mem->cntctlbase);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
@ -1420,7 +1418,7 @@ arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
static int __init arch_timer_mem_acpi_init(int platform_timer_count) static int __init arch_timer_mem_acpi_init(int platform_timer_count)
{ {
struct arch_timer_mem *timers, *timer; struct arch_timer_mem *timers, *timer;
struct arch_timer_mem_frame *frame; struct arch_timer_mem_frame *frame, *best_frame = NULL;
int timer_count, i, ret = 0; int timer_count, i, ret = 0;
timers = kcalloc(platform_timer_count, sizeof(*timers), timers = kcalloc(platform_timer_count, sizeof(*timers),
@ -1432,14 +1430,6 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count)
if (ret || !timer_count) if (ret || !timer_count)
goto out; goto out;
for (i = 0; i < timer_count; i++) {
ret = arch_timer_mem_verify_cntfrq(&timers[i]);
if (ret) {
pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
goto out;
}
}
/* /*
* While unlikely, it's theoretically possible that none of the frames * While unlikely, it's theoretically possible that none of the frames
* in a timer expose the combination of feature we want. * in a timer expose the combination of feature we want.
@ -1448,12 +1438,26 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count)
timer = &timers[i]; timer = &timers[i];
frame = arch_timer_mem_find_best_frame(timer); frame = arch_timer_mem_find_best_frame(timer);
if (frame) if (!best_frame)
break; best_frame = frame;
ret = arch_timer_mem_verify_cntfrq(timer);
if (ret) {
pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
goto out;
}
if (!best_frame) /* implies !frame */
/*
* Only complain about missing suitable frames if we
* haven't already found one in a previous iteration.
*/
pr_err("Unable to find a suitable frame in timer @ %pa\n",
&timer->cntctlbase);
} }
if (frame) if (best_frame)
ret = arch_timer_mem_frame_register(frame); ret = arch_timer_mem_frame_register(best_frame);
out: out:
kfree(timers); kfree(timers);
return ret; return ret;

View file

@ -104,13 +104,13 @@ static int __init arm_idle_init(void)
ret = dt_init_idle_driver(drv, arm_idle_state_match, 1); ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
if (ret <= 0) { if (ret <= 0) {
ret = ret ? : -ENODEV; ret = ret ? : -ENODEV;
goto init_fail; goto out_kfree_drv;
} }
ret = cpuidle_register_driver(drv); ret = cpuidle_register_driver(drv);
if (ret) { if (ret) {
pr_err("Failed to register cpuidle driver\n"); pr_err("Failed to register cpuidle driver\n");
goto init_fail; goto out_kfree_drv;
} }
/* /*
@ -128,14 +128,14 @@ static int __init arm_idle_init(void)
if (ret) { if (ret) {
pr_err("CPU %d failed to init idle CPU ops\n", cpu); pr_err("CPU %d failed to init idle CPU ops\n", cpu);
goto out_fail; goto out_unregister_drv;
} }
dev = kzalloc(sizeof(*dev), GFP_KERNEL); dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) { if (!dev) {
pr_err("Failed to allocate cpuidle device\n"); pr_err("Failed to allocate cpuidle device\n");
ret = -ENOMEM; ret = -ENOMEM;
goto out_fail; goto out_unregister_drv;
} }
dev->cpu = cpu; dev->cpu = cpu;
@ -143,21 +143,25 @@ static int __init arm_idle_init(void)
if (ret) { if (ret) {
pr_err("Failed to register cpuidle device for CPU %d\n", pr_err("Failed to register cpuidle device for CPU %d\n",
cpu); cpu);
kfree(dev); goto out_kfree_dev;
goto out_fail;
} }
} }
return 0; return 0;
init_fail:
out_kfree_dev:
kfree(dev);
out_unregister_drv:
cpuidle_unregister_driver(drv);
out_kfree_drv:
kfree(drv); kfree(drv);
out_fail: out_fail:
while (--cpu >= 0) { while (--cpu >= 0) {
dev = per_cpu(cpuidle_devices, cpu); dev = per_cpu(cpuidle_devices, cpu);
drv = cpuidle_get_cpu_driver(dev);
cpuidle_unregister_device(dev); cpuidle_unregister_device(dev);
kfree(dev);
drv = cpuidle_get_driver();
cpuidle_unregister_driver(drv); cpuidle_unregister_driver(drv);
kfree(dev);
kfree(drv); kfree(drv);
} }

View file

@ -321,8 +321,16 @@ static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
static int sw_sync_debugfs_release(struct inode *inode, struct file *file) static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
{ {
struct sync_timeline *obj = file->private_data; struct sync_timeline *obj = file->private_data;
struct sync_pt *pt, *next;
smp_wmb(); spin_lock_irq(&obj->lock);
list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
dma_fence_set_error(&pt->base, -ENOENT);
dma_fence_signal_locked(&pt->base);
}
spin_unlock_irq(&obj->lock);
sync_timeline_put(obj); sync_timeline_put(obj);
return 0; return 0;

View file

@ -2498,6 +2498,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA: case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
pvt->pci_ta = pdev; pvt->pci_ta = pdev;
break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS: case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS: case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
pvt->pci_ras = pdev; pvt->pci_ras = pdev;

View file

@ -697,7 +697,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr); struct amdgpu_queue_mgr *mgr);
int amdgpu_queue_mgr_map(struct amdgpu_device *adev, int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr, struct amdgpu_queue_mgr *mgr,
int hw_ip, int instance, int ring, u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring); struct amdgpu_ring **out_ring);
/* /*

View file

@ -63,7 +63,7 @@ static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper,
static int amdgpu_identity_map(struct amdgpu_device *adev, static int amdgpu_identity_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper, struct amdgpu_queue_mapper *mapper,
int ring, u32 ring,
struct amdgpu_ring **out_ring) struct amdgpu_ring **out_ring)
{ {
switch (mapper->hw_ip) { switch (mapper->hw_ip) {
@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
static int amdgpu_lru_map(struct amdgpu_device *adev, static int amdgpu_lru_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper, struct amdgpu_queue_mapper *mapper,
int user_ring, u32 user_ring,
struct amdgpu_ring **out_ring) struct amdgpu_ring **out_ring)
{ {
int r, i, j; int r, i, j;
@ -208,7 +208,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
*/ */
int amdgpu_queue_mgr_map(struct amdgpu_device *adev, int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr, struct amdgpu_queue_mgr *mgr,
int hw_ip, int instance, int ring, u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring) struct amdgpu_ring **out_ring)
{ {
int r, ip_num_rings; int r, ip_num_rings;

View file

@ -210,7 +210,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
return PTR_ERR(fsl_dev->state); return PTR_ERR(fsl_dev->state);
} }
clk_disable_unprepare(fsl_dev->pix_clk);
clk_disable_unprepare(fsl_dev->clk); clk_disable_unprepare(fsl_dev->clk);
return 0; return 0;
@ -233,6 +232,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
if (fsl_dev->tcon) if (fsl_dev->tcon)
fsl_tcon_bypass_enable(fsl_dev->tcon); fsl_tcon_bypass_enable(fsl_dev->tcon);
fsl_dcu_drm_init_planes(fsl_dev->drm); fsl_dcu_drm_init_planes(fsl_dev->drm);
enable_irq(fsl_dev->irq);
drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
console_lock(); console_lock();
@ -240,7 +240,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
console_unlock(); console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm); drm_kms_helper_poll_enable(fsl_dev->drm);
enable_irq(fsl_dev->irq);
return 0; return 0;
} }

View file

@ -404,9 +404,9 @@ extern const struct regulator_ops pmbus_regulator_ops;
/* Function declarations */ /* Function declarations */
void pmbus_clear_cache(struct i2c_client *client); void pmbus_clear_cache(struct i2c_client *client);
int pmbus_set_page(struct i2c_client *client, u8 page); int pmbus_set_page(struct i2c_client *client, int page);
int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg); int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg);
int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word); int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, u16 word);
int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg); int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
int pmbus_write_byte(struct i2c_client *client, int page, u8 value); int pmbus_write_byte(struct i2c_client *client, int page, u8 value);
int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg,

View file

@ -136,13 +136,13 @@ void pmbus_clear_cache(struct i2c_client *client)
} }
EXPORT_SYMBOL_GPL(pmbus_clear_cache); EXPORT_SYMBOL_GPL(pmbus_clear_cache);
int pmbus_set_page(struct i2c_client *client, u8 page) int pmbus_set_page(struct i2c_client *client, int page)
{ {
struct pmbus_data *data = i2c_get_clientdata(client); struct pmbus_data *data = i2c_get_clientdata(client);
int rv = 0; int rv = 0;
int newpage; int newpage;
if (page != data->currpage) { if (page >= 0 && page != data->currpage) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE); newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
if (newpage != page) if (newpage != page)
@ -158,11 +158,9 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
{ {
int rv; int rv;
if (page >= 0) { rv = pmbus_set_page(client, page);
rv = pmbus_set_page(client, page); if (rv < 0)
if (rv < 0) return rv;
return rv;
}
return i2c_smbus_write_byte(client, value); return i2c_smbus_write_byte(client, value);
} }
@ -186,7 +184,8 @@ static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value)
return pmbus_write_byte(client, page, value); return pmbus_write_byte(client, page, value);
} }
int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word) int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
u16 word)
{ {
int rv; int rv;
@ -219,7 +218,7 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
return pmbus_write_word_data(client, page, reg, word); return pmbus_write_word_data(client, page, reg, word);
} }
int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg) int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
{ {
int rv; int rv;
@ -255,11 +254,9 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
{ {
int rv; int rv;
if (page >= 0) { rv = pmbus_set_page(client, page);
rv = pmbus_set_page(client, page); if (rv < 0)
if (rv < 0) return rv;
return rv;
}
return i2c_smbus_read_byte_data(client, reg); return i2c_smbus_read_byte_data(client, reg);
} }

View file

@ -369,6 +369,7 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]); conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]);
conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]); conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
conv_time += conv_time / 10; /* 10% internal clock inaccuracy */
usleep_range(conv_time, conv_time + 1); usleep_range(conv_time, conv_time + 1);
data->conv_invalid = false; data->conv_invalid = false;
} }

View file

@ -285,6 +285,9 @@ static int mux_configure_channel(struct device *dev, struct mux *mux,
child->ext_info_cache = devm_kzalloc(dev, child->ext_info_cache = devm_kzalloc(dev,
sizeof(*child->ext_info_cache) * sizeof(*child->ext_info_cache) *
num_ext_info, GFP_KERNEL); num_ext_info, GFP_KERNEL);
if (!child->ext_info_cache)
return -ENOMEM;
for (i = 0; i < num_ext_info; ++i) { for (i = 0; i < num_ext_info; ++i) {
child->ext_info_cache[i].size = -1; child->ext_info_cache[i].size = -1;
@ -309,6 +312,9 @@ static int mux_configure_channel(struct device *dev, struct mux *mux,
child->ext_info_cache[i].data = devm_kmemdup(dev, page, ret + 1, child->ext_info_cache[i].data = devm_kmemdup(dev, page, ret + 1,
GFP_KERNEL); GFP_KERNEL);
if (!child->ext_info_cache[i].data)
return -ENOMEM;
child->ext_info_cache[i].data[ret] = 0; child->ext_info_cache[i].data[ret] = 0;
child->ext_info_cache[i].size = ret; child->ext_info_cache[i].size = ret;
} }

View file

@ -1251,6 +1251,21 @@ static int sdhci_msm_probe(struct platform_device *pdev)
CORE_VENDOR_SPEC_CAPABILITIES0); CORE_VENDOR_SPEC_CAPABILITIES0);
} }
/*
* Power on reset state may trigger power irq if previous status of
* PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
* interrupt in GIC, any pending power irq interrupt should be
* acknowledged. Otherwise power irq interrupt handler would be
* fired prematurely.
*/
sdhci_msm_voltage_switch(host);
/*
* Ensure that above writes are propogated before interrupt enablement
* in GIC.
*/
mb();
/* Setup IRQ for handling power/voltage tasks with PMIC */ /* Setup IRQ for handling power/voltage tasks with PMIC */
msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
if (msm_host->pwr_irq < 0) { if (msm_host->pwr_irq < 0) {
@ -1260,6 +1275,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable; goto clk_disable;
} }
/* Enable pwr irq interrupts */
writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK);
ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
sdhci_msm_pwr_irq, IRQF_ONESHOT, sdhci_msm_pwr_irq, IRQF_ONESHOT,
dev_name(&pdev->dev), host); dev_name(&pdev->dev), host);

View file

@ -1113,8 +1113,11 @@ static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
{ {
struct tmio_mmc_data *pdata = host->pdata; struct tmio_mmc_data *pdata = host->pdata;
struct mmc_host *mmc = host->mmc; struct mmc_host *mmc = host->mmc;
int err;
mmc_regulator_get_supply(mmc); err = mmc_regulator_get_supply(mmc);
if (err)
return err;
/* use ocr_mask if no regulator */ /* use ocr_mask if no regulator */
if (!mmc->ocr_avail) if (!mmc->ocr_avail)

View file

@ -11,6 +11,7 @@
*/ */
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h> #include <linux/irq.h>
@ -594,6 +595,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
int i; int i;
int phy_count; int phy_count;
struct phy **phy; struct phy **phy;
struct device_link **link;
void __iomem *base; void __iomem *base;
struct resource *res; struct resource *res;
struct dw_pcie *pci; struct dw_pcie *pci;
@ -649,11 +651,21 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
if (!phy) if (!phy)
return -ENOMEM; return -ENOMEM;
link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
if (!link)
return -ENOMEM;
for (i = 0; i < phy_count; i++) { for (i = 0; i < phy_count; i++) {
snprintf(name, sizeof(name), "pcie-phy%d", i); snprintf(name, sizeof(name), "pcie-phy%d", i);
phy[i] = devm_phy_get(dev, name); phy[i] = devm_phy_get(dev, name);
if (IS_ERR(phy[i])) if (IS_ERR(phy[i]))
return PTR_ERR(phy[i]); return PTR_ERR(phy[i]);
link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
if (!link[i]) {
ret = -EINVAL;
goto err_link;
}
} }
dra7xx->base = base; dra7xx->base = base;
@ -732,6 +744,10 @@ err_get_sync:
pm_runtime_disable(dev); pm_runtime_disable(dev);
dra7xx_pcie_disable_phy(dra7xx); dra7xx_pcie_disable_phy(dra7xx);
err_link:
while (--i >= 0)
device_link_del(link[i]);
return ret; return ret;
} }

View file

@ -330,6 +330,8 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx)
{ {
struct ccw1 *ccw = chain->ch_ccw + idx; struct ccw1 *ccw = chain->ch_ccw + idx;
if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw))
return;
if (!ccw->count) if (!ccw->count)
return; return;

View file

@ -553,7 +553,7 @@ err_put_master:
static int spi_engine_remove(struct platform_device *pdev) static int spi_engine_remove(struct platform_device *pdev)
{ {
struct spi_master *master = platform_get_drvdata(pdev); struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct spi_engine *spi_engine = spi_master_get_devdata(master); struct spi_engine *spi_engine = spi_master_get_devdata(master);
int irq = platform_get_irq(pdev, 0); int irq = platform_get_irq(pdev, 0);
@ -561,6 +561,8 @@ static int spi_engine_remove(struct platform_device *pdev)
free_irq(irq, master); free_irq(irq, master);
spi_master_put(master);
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET); writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);

View file

@ -900,7 +900,7 @@ static int sh_msiof_transfer_one(struct spi_master *master,
break; break;
copy32 = copy_bswap32; copy32 = copy_bswap32;
} else if (bits <= 16) { } else if (bits <= 16) {
if (l & 1) if (l & 3)
break; break;
copy32 = copy_wswap32; copy32 = copy_wswap32;
} else { } else {

View file

@ -1790,9 +1790,12 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
} }
in += sizeof(u32); in += sizeof(u32);
rc = ssi_hash_init(state, ctx); /* call init() to allocate bufs if the user hasn't */
if (rc) if (!state->digest_buff) {
goto out; rc = ssi_hash_init(state, ctx);
if (rc)
goto out;
}
dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr, dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL); ctx->inter_digestsize, DMA_BIDIRECTIONAL);

View file

@ -131,6 +131,8 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
u16 fd_offset = dpaa2_fd_get_offset(fd); u16 fd_offset = dpaa2_fd_get_offset(fd);
u32 fd_length = dpaa2_fd_get_len(fd); u32 fd_length = dpaa2_fd_get_len(fd);
ch->buf_count--;
skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE + skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
if (unlikely(!skb)) if (unlikely(!skb))
@ -139,8 +141,6 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
skb_reserve(skb, fd_offset); skb_reserve(skb, fd_offset);
skb_put(skb, fd_length); skb_put(skb, fd_length);
ch->buf_count--;
return skb; return skb;
} }
@ -178,8 +178,15 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
/* We build the skb around the first data buffer */ /* We build the skb around the first data buffer */
skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE + skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
if (unlikely(!skb)) if (unlikely(!skb)) {
return NULL; /* We still need to subtract the buffers used
* by this FD from our software counter
*/
while (!dpaa2_sg_is_final(&sgt[i]) &&
i < DPAA2_ETH_MAX_SG_ENTRIES)
i++;
break;
}
sg_offset = dpaa2_sg_get_offset(sge); sg_offset = dpaa2_sg_get_offset(sge);
skb_reserve(skb, sg_offset); skb_reserve(skb, sg_offset);

View file

@ -76,7 +76,7 @@ static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
if (d) if (d)
return d; return d;
if (unlikely(cpu >= num_possible_cpus())) if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
return NULL; return NULL;
/* /*
@ -121,7 +121,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
return NULL; return NULL;
/* check if CPU is out of range (-1 means any cpu) */ /* check if CPU is out of range (-1 means any cpu) */
if (desc->cpu >= num_possible_cpus()) { if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
kfree(obj); kfree(obj);
return NULL; return NULL;
} }

View file

@ -54,6 +54,8 @@ struct device;
* for dequeue. * for dequeue.
*/ */
#define DPAA2_IO_ANY_CPU -1
/** /**
* struct dpaa2_io_desc - The DPIO descriptor * struct dpaa2_io_desc - The DPIO descriptor
* @receives_notifications: Use notificaton mode. Non-zero if the DPIO * @receives_notifications: Use notificaton mode. Non-zero if the DPIO
@ -91,8 +93,8 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj);
* @cb: The callback to be invoked when the notification arrives * @cb: The callback to be invoked when the notification arrives
* @is_cdan: Zero for FQDAN, non-zero for CDAN * @is_cdan: Zero for FQDAN, non-zero for CDAN
* @id: FQID or channel ID, needed for rearm * @id: FQID or channel ID, needed for rearm
* @desired_cpu: The cpu on which the notifications will show up. -1 means * @desired_cpu: The cpu on which the notifications will show up. Use
* any CPU. * DPAA2_IO_ANY_CPU if don't care
* @dpio_id: The dpio index * @dpio_id: The dpio index
* @qman64: The 64-bit context value shows up in the FQDAN/CDAN. * @qman64: The 64-bit context value shows up in the FQDAN/CDAN.
* @node: The list node * @node: The list node

View file

@ -1042,8 +1042,10 @@ static int gb_loopback_fn(void *data)
else if (type == GB_LOOPBACK_TYPE_SINK) else if (type == GB_LOOPBACK_TYPE_SINK)
error = gb_loopback_async_sink(gb, size); error = gb_loopback_async_sink(gb, size);
if (error) if (error) {
gb->error++; gb->error++;
gb->iteration_count++;
}
} else { } else {
/* We are effectively single threaded here */ /* We are effectively single threaded here */
if (type == GB_LOOPBACK_TYPE_PING) if (type == GB_LOOPBACK_TYPE_PING)

View file

@ -106,10 +106,10 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv) void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
{ {
rtw_free_mlme_priv_ie_data(pmlmepriv); if (pmlmepriv) {
rtw_free_mlme_priv_ie_data(pmlmepriv);
if (pmlmepriv)
vfree(pmlmepriv->free_bss_buf); vfree(pmlmepriv->free_bss_buf);
}
} }
struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv) struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)

View file

@ -490,6 +490,8 @@ void odm_pause_dig(void *dm_void, enum phydm_pause_type pause_type,
break; break;
} }
/* pin max_level to be >= 0 */
max_level = max_t(s8, 0, max_level);
/* write IGI of lower level */ /* write IGI of lower level */
odm_write_dig(dm, dig_tab->pause_dig_value[max_level]); odm_write_dig(dm, dig_tab->pause_dig_value[max_level]);
ODM_RT_TRACE(dm, ODM_COMP_DIG, ODM_RT_TRACE(dm, ODM_COMP_DIG,

View file

@ -419,7 +419,7 @@ static bool _rtl8822be_send_bcn_or_cmd_packet(struct ieee80211_hw *hw,
dma_addr = rtlpriv->cfg->ops->get_desc( dma_addr = rtlpriv->cfg->ops->get_desc(
hw, (u8 *)pbd_desc, true, HW_DESC_TXBUFF_ADDR); hw, (u8 *)pbd_desc, true, HW_DESC_TXBUFF_ADDR);
pci_unmap_single(rtlpci->pdev, dma_addr, skb->len, pci_unmap_single(rtlpci->pdev, dma_addr, pskb->len,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
kfree_skb(pskb); kfree_skb(pskb);

View file

@ -125,12 +125,14 @@ static void __init init_port(struct earlycon_device *device)
serial8250_early_out(port, UART_FCR, 0); /* no fifo */ serial8250_early_out(port, UART_FCR, 0); /* no fifo */
serial8250_early_out(port, UART_MCR, 0x3); /* DTR + RTS */ serial8250_early_out(port, UART_MCR, 0x3); /* DTR + RTS */
divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud); if (port->uartclk && device->baud) {
c = serial8250_early_in(port, UART_LCR); divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud);
serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB); c = serial8250_early_in(port, UART_LCR);
serial8250_early_out(port, UART_DLL, divisor & 0xff); serial8250_early_out(port, UART_LCR, c | UART_LCR_DLAB);
serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff); serial8250_early_out(port, UART_DLL, divisor & 0xff);
serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB); serial8250_early_out(port, UART_DLM, (divisor >> 8) & 0xff);
serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB);
}
} }
int __init early_serial8250_setup(struct earlycon_device *device, int __init early_serial8250_setup(struct earlycon_device *device,

View file

@ -211,7 +211,7 @@ static int fintek_8250_rs485_config(struct uart_port *port,
if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) == if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) ==
(!!(rs485->flags & SER_RS485_RTS_AFTER_SEND))) (!!(rs485->flags & SER_RS485_RTS_AFTER_SEND)))
rs485->flags &= SER_RS485_ENABLED; rs485->flags &= ~SER_RS485_ENABLED;
else else
config |= RS485_URA; config |= RS485_URA;

View file

@ -5137,6 +5137,9 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 }, { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
{ PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 }, { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
/* Amazon PCI serial device */
{ PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 },
/* /*
* These entries match devices with class COMMUNICATION_SERIAL, * These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL

View file

@ -2586,8 +2586,11 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
serial_dl_write(up, quot); serial_dl_write(up, quot);
/* XR17V35x UARTs have an extra fractional divisor register (DLD) */ /* XR17V35x UARTs have an extra fractional divisor register (DLD) */
if (up->port.type == PORT_XR17V35X) if (up->port.type == PORT_XR17V35X) {
/* Preserve bits not related to baudrate; DLD[7:4]. */
quot_frac |= serial_port_in(port, 0x2) & 0xf0;
serial_port_out(port, 0x2, quot_frac); serial_port_out(port, 0x2, quot_frac);
}
} }
static unsigned int serial8250_get_baud_rate(struct uart_port *port, static unsigned int serial8250_get_baud_rate(struct uart_port *port,

View file

@ -334,7 +334,8 @@ static void imx_port_rts_active(struct imx_port *sport, unsigned long *ucr2)
{ {
*ucr2 &= ~(UCR2_CTSC | UCR2_CTS); *ucr2 &= ~(UCR2_CTSC | UCR2_CTS);
mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS); sport->port.mctrl |= TIOCM_RTS;
mctrl_gpio_set(sport->gpios, sport->port.mctrl);
} }
static void imx_port_rts_inactive(struct imx_port *sport, unsigned long *ucr2) static void imx_port_rts_inactive(struct imx_port *sport, unsigned long *ucr2)
@ -342,7 +343,8 @@ static void imx_port_rts_inactive(struct imx_port *sport, unsigned long *ucr2)
*ucr2 &= ~UCR2_CTSC; *ucr2 &= ~UCR2_CTSC;
*ucr2 |= UCR2_CTS; *ucr2 |= UCR2_CTS;
mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS); sport->port.mctrl &= ~TIOCM_RTS;
mctrl_gpio_set(sport->gpios, sport->port.mctrl);
} }
static void imx_port_rts_auto(struct imx_port *sport, unsigned long *ucr2) static void imx_port_rts_auto(struct imx_port *sport, unsigned long *ucr2)

View file

@ -1491,6 +1491,14 @@ static void sci_request_dma(struct uart_port *port)
return; return;
s->cookie_tx = -EINVAL; s->cookie_tx = -EINVAL;
/*
* Don't request a dma channel if no channel was specified
* in the device tree.
*/
if (!of_find_property(port->dev->of_node, "dmas", NULL))
return;
chan = sci_request_dma_chan(port, DMA_MEM_TO_DEV); chan = sci_request_dma_chan(port, DMA_MEM_TO_DEV);
dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan); dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
if (chan) { if (chan) {

View file

@ -246,8 +246,10 @@ static void sysrq_handle_showallcpus(int key)
* architecture has no support for it: * architecture has no support for it:
*/ */
if (!trigger_all_cpu_backtrace()) { if (!trigger_all_cpu_backtrace()) {
struct pt_regs *regs = get_irq_regs(); struct pt_regs *regs = NULL;
if (in_irq())
regs = get_irq_regs();
if (regs) { if (regs) {
pr_info("CPU%d:\n", smp_processor_id()); pr_info("CPU%d:\n", smp_processor_id());
show_regs(regs); show_regs(regs);
@ -266,7 +268,10 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
static void sysrq_handle_showregs(int key) static void sysrq_handle_showregs(int key)
{ {
struct pt_regs *regs = get_irq_regs(); struct pt_regs *regs = NULL;
if (in_irq())
regs = get_irq_regs();
if (regs) if (regs)
show_regs(regs); show_regs(regs);
perf_event_print_debug(); perf_event_print_debug();

View file

@ -183,9 +183,9 @@ static int ulpi_of_register(struct ulpi *ulpi)
/* Find a ulpi bus underneath the parent or the grandparent */ /* Find a ulpi bus underneath the parent or the grandparent */
parent = ulpi->dev.parent; parent = ulpi->dev.parent;
if (parent->of_node) if (parent->of_node)
np = of_find_node_by_name(parent->of_node, "ulpi"); np = of_get_child_by_name(parent->of_node, "ulpi");
else if (parent->parent && parent->parent->of_node) else if (parent->parent && parent->parent->of_node)
np = of_find_node_by_name(parent->parent->of_node, "ulpi"); np = of_get_child_by_name(parent->parent->of_node, "ulpi");
if (!np) if (!np)
return 0; return 0;

View file

@ -905,14 +905,25 @@ void usb_release_bos_descriptor(struct usb_device *dev)
} }
} }
static const __u8 bos_desc_len[256] = {
[USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE,
[USB_CAP_TYPE_EXT] = USB_DT_USB_EXT_CAP_SIZE,
[USB_SS_CAP_TYPE] = USB_DT_USB_SS_CAP_SIZE,
[USB_SSP_CAP_TYPE] = USB_DT_USB_SSP_CAP_SIZE(1),
[CONTAINER_ID_TYPE] = USB_DT_USB_SS_CONTN_ID_SIZE,
[USB_PTM_CAP_TYPE] = USB_DT_USB_PTM_ID_SIZE,
};
/* Get BOS descriptor set */ /* Get BOS descriptor set */
int usb_get_bos_descriptor(struct usb_device *dev) int usb_get_bos_descriptor(struct usb_device *dev)
{ {
struct device *ddev = &dev->dev; struct device *ddev = &dev->dev;
struct usb_bos_descriptor *bos; struct usb_bos_descriptor *bos;
struct usb_dev_cap_header *cap; struct usb_dev_cap_header *cap;
struct usb_ssp_cap_descriptor *ssp_cap;
unsigned char *buffer; unsigned char *buffer;
int length, total_len, num, i; int length, total_len, num, i, ssac;
__u8 cap_type;
int ret; int ret;
bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL); bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
@ -965,7 +976,13 @@ int usb_get_bos_descriptor(struct usb_device *dev)
dev->bos->desc->bNumDeviceCaps = i; dev->bos->desc->bNumDeviceCaps = i;
break; break;
} }
cap_type = cap->bDevCapabilityType;
length = cap->bLength; length = cap->bLength;
if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) {
dev->bos->desc->bNumDeviceCaps = i;
break;
}
total_len -= length; total_len -= length;
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
@ -973,7 +990,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
continue; continue;
} }
switch (cap->bDevCapabilityType) { switch (cap_type) {
case USB_CAP_TYPE_WIRELESS_USB: case USB_CAP_TYPE_WIRELESS_USB:
/* Wireless USB cap descriptor is handled by wusb */ /* Wireless USB cap descriptor is handled by wusb */
break; break;
@ -986,8 +1003,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
(struct usb_ss_cap_descriptor *)buffer; (struct usb_ss_cap_descriptor *)buffer;
break; break;
case USB_SSP_CAP_TYPE: case USB_SSP_CAP_TYPE:
dev->bos->ssp_cap = ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
(struct usb_ssp_cap_descriptor *)buffer; ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
dev->bos->ssp_cap = ssp_cap;
break; break;
case CONTAINER_ID_TYPE: case CONTAINER_ID_TYPE:
dev->bos->ss_id = dev->bos->ss_id =

View file

@ -1455,14 +1455,18 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
int number_of_packets = 0; int number_of_packets = 0;
unsigned int stream_id = 0; unsigned int stream_id = 0;
void *buf; void *buf;
unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
USBDEVFS_URB_SHORT_NOT_OK |
USBDEVFS_URB_BULK_CONTINUATION | USBDEVFS_URB_BULK_CONTINUATION |
USBDEVFS_URB_NO_FSBR | USBDEVFS_URB_NO_FSBR |
USBDEVFS_URB_ZERO_PACKET | USBDEVFS_URB_ZERO_PACKET |
USBDEVFS_URB_NO_INTERRUPT)) USBDEVFS_URB_NO_INTERRUPT;
return -EINVAL; /* USBDEVFS_URB_ISO_ASAP is a special case */
if (uurb->type == USBDEVFS_URB_TYPE_ISO)
mask |= USBDEVFS_URB_ISO_ASAP;
if (uurb->flags & ~mask)
return -EINVAL;
if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX) if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
return -EINVAL; return -EINVAL;
if (uurb->buffer_length > 0 && !uurb->buffer) if (uurb->buffer_length > 0 && !uurb->buffer)

View file

@ -4936,6 +4936,15 @@ loop:
usb_put_dev(udev); usb_put_dev(udev);
if ((status == -ENOTCONN) || (status == -ENOTSUPP)) if ((status == -ENOTCONN) || (status == -ENOTSUPP))
break; break;
/* When halfway through our retry count, power-cycle the port */
if (i == (SET_CONFIG_TRIES / 2) - 1) {
dev_info(&port_dev->dev, "attempt power cycle\n");
usb_hub_set_port_power(hdev, hub, port1, false);
msleep(2 * hub_power_on_good_delay(hub));
usb_hub_set_port_power(hdev, hub, port1, true);
msleep(hub_power_on_good_delay(hub));
}
} }
if (hub->hdev->parent || if (hub->hdev->parent ||
!hcd->driver->port_handed_over || !hcd->driver->port_handed_over ||

View file

@ -151,6 +151,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* appletouch */ /* appletouch */
{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
/* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
{ USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },

View file

@ -837,7 +837,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
default: /* unknown */ default: /* unknown */
break; break;
} }
temp = (cap >> 8) & 0xff; offset = (cap >> 8) & 0xff;
} }
} }
#endif #endif

View file

@ -947,6 +947,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
if (!vdev) if (!vdev)
return; return;
if (vdev->real_port == 0 ||
vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
xhci_dbg(xhci, "Bad vdev->real_port.\n");
goto out;
}
tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts); tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* is this a hub device that added a tt_info to the tts list */ /* is this a hub device that added a tt_info to the tts list */
@ -960,6 +966,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
} }
} }
} }
out:
/* we are now at a leaf device */ /* we are now at a leaf device */
xhci_free_virt_device(xhci, slot_id); xhci_free_virt_device(xhci, slot_id);
} }

View file

@ -2486,12 +2486,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/ */
if (list_empty(&ep_ring->td_list)) { if (list_empty(&ep_ring->td_list)) {
/* /*
* A stopped endpoint may generate an extra completion * Don't print wanings if it's due to a stopped endpoint
* event if the device was suspended. Don't print * generating an extra completion event if the device
* warnings. * was suspended. Or, a event for the last TRB of a
* short TD we already got a short event for.
* The short TD is already removed from the TD list.
*/ */
if (!(trb_comp_code == COMP_STOPPED || if (!(trb_comp_code == COMP_STOPPED ||
trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
ep_ring->last_td_was_short)) {
xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index); ep_index);

View file

@ -3583,10 +3583,9 @@ int xhci_disable_slot(struct xhci_hcd *xhci, struct xhci_command *command,
state = readl(&xhci->op_regs->status); state = readl(&xhci->op_regs->status);
if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
(xhci->xhc_state & XHCI_STATE_HALTED)) { (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_free_virt_device(xhci, slot_id);
spin_unlock_irqrestore(&xhci->lock, flags); spin_unlock_irqrestore(&xhci->lock, flags);
kfree(command); kfree(command);
return ret; return -ENODEV;
} }
ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,

View file

@ -774,9 +774,9 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb)
return -ENOMEM; return -ENOMEM;
mtu->irq = platform_get_irq(pdev, 0); mtu->irq = platform_get_irq(pdev, 0);
if (mtu->irq <= 0) { if (mtu->irq < 0) {
dev_err(dev, "fail to get irq number\n"); dev_err(dev, "fail to get irq number\n");
return -ENODEV; return mtu->irq;
} }
dev_info(dev, "irq %d\n", mtu->irq); dev_info(dev, "irq %d\n", mtu->irq);

View file

@ -368,7 +368,8 @@ static int tahvo_usb_probe(struct platform_device *pdev)
tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable); tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable);
if (IS_ERR(tu->extcon)) { if (IS_ERR(tu->extcon)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
return -ENOMEM; ret = PTR_ERR(tu->extcon);
goto err_disable_clk;
} }
ret = devm_extcon_dev_register(&pdev->dev, tu->extcon); ret = devm_extcon_dev_register(&pdev->dev, tu->extcon);

View file

@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Quectel's vendor ID */ /* These Quectel products use Quectel's vendor ID */
#define QUECTEL_PRODUCT_EC21 0x0121 #define QUECTEL_PRODUCT_EC21 0x0121
#define QUECTEL_PRODUCT_EC25 0x0125 #define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_BG96 0x0296
#define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001 #define CMOTECH_PRODUCT_6001 0x6001
@ -1185,6 +1186,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf4_blacklist }, .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist }, .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),

View file

@ -34,12 +34,14 @@ static const struct usb_device_id id_table[] = {
}; };
static const struct usb_device_id dbc_id_table[] = { static const struct usb_device_id dbc_id_table[] = {
{ USB_DEVICE(0x1d6b, 0x0010) },
{ USB_DEVICE(0x1d6b, 0x0011) }, { USB_DEVICE(0x1d6b, 0x0011) },
{ }, { },
}; };
static const struct usb_device_id id_table_combined[] = { static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(0x0525, 0x127a) }, { USB_DEVICE(0x0525, 0x127a) },
{ USB_DEVICE(0x1d6b, 0x0010) },
{ USB_DEVICE(0x1d6b, 0x0011) }, { USB_DEVICE(0x1d6b, 0x0011) },
{ }, { },
}; };

View file

@ -112,6 +112,10 @@ static int uas_use_uas_driver(struct usb_interface *intf,
} }
} }
/* All Seagate disk enclosures have broken ATA pass-through support */
if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
flags |= US_FL_NO_ATA_1X;
usb_stor_adjust_quirks(udev, &flags); usb_stor_adjust_quirks(udev, &flags);
if (flags & US_FL_IGNORE_UAS) { if (flags & US_FL_IGNORE_UAS) {

View file

@ -1112,7 +1112,6 @@ static int hcd_name_to_id(const char *name)
static int vhci_setup(struct usb_hcd *hcd) static int vhci_setup(struct usb_hcd *hcd)
{ {
struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller)); struct vhci *vhci = *((void **)dev_get_platdata(hcd->self.controller));
hcd->self.sg_tablesize = ~0;
if (usb_hcd_is_primary_hcd(hcd)) { if (usb_hcd_is_primary_hcd(hcd)) {
vhci->vhci_hcd_hs = hcd_to_vhci_hcd(hcd); vhci->vhci_hcd_hs = hcd_to_vhci_hcd(hcd);
vhci->vhci_hcd_hs->vhci = vhci; vhci->vhci_hcd_hs->vhci = vhci;

View file

@ -459,6 +459,7 @@
#define TEXT_TEXT \ #define TEXT_TEXT \
ALIGN_FUNCTION(); \ ALIGN_FUNCTION(); \
*(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
*(.text..refcount) \
*(.ref.text) \ *(.ref.text) \
MEM_KEEP(init.text) \ MEM_KEEP(init.text) \
MEM_KEEP(exit.text) \ MEM_KEEP(exit.text) \

View file

@ -876,6 +876,8 @@ struct usb_wireless_cap_descriptor { /* Ultra Wide Band */
__u8 bReserved; __u8 bReserved;
} __attribute__((packed)); } __attribute__((packed));
#define USB_DT_USB_WIRELESS_CAP_SIZE 11
/* USB 2.0 Extension descriptor */ /* USB 2.0 Extension descriptor */
#define USB_CAP_TYPE_EXT 2 #define USB_CAP_TYPE_EXT 2
@ -1068,6 +1070,7 @@ struct usb_ptm_cap_descriptor {
__u8 bDevCapabilityType; __u8 bDevCapabilityType;
} __attribute__((packed)); } __attribute__((packed));
#define USB_DT_USB_PTM_ID_SIZE 3
/* /*
* The size of the descriptor for the Sublink Speed Attribute Count * The size of the descriptor for the Sublink Speed Attribute Count
* (SSAC) specified in bmAttributes[4:0]. * (SSAC) specified in bmAttributes[4:0].

View file

@ -4433,6 +4433,8 @@ static int __perf_read_group_add(struct perf_event *leader,
if (ret) if (ret)
return ret; return ret;
raw_spin_lock_irqsave(&ctx->lock, flags);
/* /*
* Since we co-schedule groups, {enabled,running} times of siblings * Since we co-schedule groups, {enabled,running} times of siblings
* will be identical to those of the leader, so we only publish one * will be identical to those of the leader, so we only publish one
@ -4455,8 +4457,6 @@ static int __perf_read_group_add(struct perf_event *leader,
if (read_format & PERF_FORMAT_ID) if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader); values[n++] = primary_event_id(leader);
raw_spin_lock_irqsave(&ctx->lock, flags);
list_for_each_entry(sub, &leader->sibling_list, group_entry) { list_for_each_entry(sub, &leader->sibling_list, group_entry) {
values[n++] += perf_event_count(sub); values[n++] += perf_event_count(sub);
if (read_format & PERF_FORMAT_ID) if (read_format & PERF_FORMAT_ID)

View file

@ -573,13 +573,15 @@ static void kprobe_optimizer(struct work_struct *work)
do_unoptimize_kprobes(); do_unoptimize_kprobes();
/* /*
* Step 2: Wait for quiesence period to ensure all running interrupts * Step 2: Wait for quiesence period to ensure all potentially
* are done. Because optprobe may modify multiple instructions * preempted tasks to have normally scheduled. Because optprobe
* there is a chance that Nth instruction is interrupted. In that * may modify multiple instructions, there is a chance that Nth
* case, running interrupt can return to 2nd-Nth byte of jump * instruction is preempted. In that case, such tasks can return
* instruction. This wait is for avoiding it. * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
* Note that on non-preemptive kernel, this is transparently converted
* to synchronoze_sched() to wait for all interrupts to have completed.
*/ */
synchronize_sched(); synchronize_rcu_tasks();
/* Step 3: Optimize kprobes after quiesence period */ /* Step 3: Optimize kprobes after quiesence period */
do_optimize_kprobes(); do_optimize_kprobes();

View file

@ -1707,6 +1707,47 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
return 1; return 1;
} }
static void gup_pgd_range(unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pgd_t *pgdp;
pgdp = pgd_offset(current->mm, addr);
do {
pgd_t pgd = READ_ONCE(*pgdp);
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
return;
if (unlikely(pgd_huge(pgd))) {
if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
pages, nr))
return;
} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
PGDIR_SHIFT, next, write, pages, nr))
return;
} else if (!gup_p4d_range(pgd, addr, next, write, pages, nr))
return;
} while (pgdp++, addr = next, addr != end);
}
#ifndef gup_fast_permitted
/*
* Check if it's allowed to use __get_user_pages_fast() for the range, or
* we need to fall back to the slow version:
*/
bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
{
unsigned long len, end;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
return end >= start;
}
#endif
/* /*
* Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
* the regular GUP. It will only return non-negative values. * the regular GUP. It will only return non-negative values.
@ -1714,10 +1755,8 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
int __get_user_pages_fast(unsigned long start, int nr_pages, int write, int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages) struct page **pages)
{ {
struct mm_struct *mm = current->mm;
unsigned long addr, len, end; unsigned long addr, len, end;
unsigned long next, flags; unsigned long flags;
pgd_t *pgdp;
int nr = 0; int nr = 0;
start &= PAGE_MASK; start &= PAGE_MASK;
@ -1741,45 +1780,15 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* block IPIs that come from THPs splitting. * block IPIs that come from THPs splitting.
*/ */
local_irq_save(flags); if (gup_fast_permitted(start, nr_pages, write)) {
pgdp = pgd_offset(mm, addr); local_irq_save(flags);
do { gup_pgd_range(addr, end, write, pages, &nr);
pgd_t pgd = READ_ONCE(*pgdp); local_irq_restore(flags);
}
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
break;
if (unlikely(pgd_huge(pgd))) {
if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
pages, &nr))
break;
} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
PGDIR_SHIFT, next, write, pages, &nr))
break;
} else if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
break;
} while (pgdp++, addr = next, addr != end);
local_irq_restore(flags);
return nr; return nr;
} }
#ifndef gup_fast_permitted
/*
* Check if it's allowed to use __get_user_pages_fast() for the range, or
* we need to fall back to the slow version:
*/
bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
{
unsigned long len, end;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
return end >= start;
}
#endif
/** /**
* get_user_pages_fast() - pin user pages in memory * get_user_pages_fast() - pin user pages in memory
* @start: starting user address * @start: starting user address
@ -1799,12 +1808,22 @@ bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
int get_user_pages_fast(unsigned long start, int nr_pages, int write, int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages) struct page **pages)
{ {
unsigned long addr, len, end;
int nr = 0, ret = 0; int nr = 0, ret = 0;
start &= PAGE_MASK; start &= PAGE_MASK;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(void __user *)start, len)))
return 0;
if (gup_fast_permitted(start, nr_pages, write)) { if (gup_fast_permitted(start, nr_pages, write)) {
nr = __get_user_pages_fast(start, nr_pages, write, pages); local_irq_disable();
gup_pgd_range(addr, end, write, pages, &nr);
local_irq_enable();
ret = nr; ret = nr;
} }

View file

@ -51,6 +51,8 @@ static int __init hash_setup(char *str)
ima_hash_algo = HASH_ALGO_SHA1; ima_hash_algo = HASH_ALGO_SHA1;
else if (strncmp(str, "md5", 3) == 0) else if (strncmp(str, "md5", 3) == 0)
ima_hash_algo = HASH_ALGO_MD5; ima_hash_algo = HASH_ALGO_MD5;
else
return 1;
goto out; goto out;
} }
@ -60,6 +62,8 @@ static int __init hash_setup(char *str)
break; break;
} }
} }
if (i == HASH_ALGO__LAST)
return 1;
out: out:
hash_setup_done = 1; hash_setup_done = 1;
return 1; return 1;

View file

@ -15,6 +15,10 @@
# define POISON_POINTER_DELTA 0 # define POISON_POINTER_DELTA 0
#endif #endif
#ifdef __cplusplus
#define LIST_POISON1 NULL
#define LIST_POISON2 NULL
#else
/* /*
* These are non-NULL pointers that will result in page faults * These are non-NULL pointers that will result in page faults
* under normal circumstances, used to verify that nobody uses * under normal circumstances, used to verify that nobody uses
@ -22,6 +26,7 @@
*/ */
#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) #define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA) #define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
#endif
/********** include/linux/timer.h **********/ /********** include/linux/timer.h **********/
/* /*

View file

@ -2733,6 +2733,7 @@ static int perf_c2c__record(int argc, const char **argv)
if (!perf_mem_events[j].supported) { if (!perf_mem_events[j].supported) {
pr_err("failed: event '%s' not supported\n", pr_err("failed: event '%s' not supported\n",
perf_mem_events[j].name); perf_mem_events[j].name);
free(rec_argv);
return -1; return -1;
} }

View file

@ -113,6 +113,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
if (!perf_mem_events[j].supported) { if (!perf_mem_events[j].supported) {
pr_err("failed: event '%s' not supported\n", pr_err("failed: event '%s' not supported\n",
perf_mem_events__name(j)); perf_mem_events__name(j));
free(rec_argv);
return -1; return -1;
} }

View file

@ -1732,8 +1732,10 @@ static int timechart__io_record(int argc, const char **argv)
if (rec_argv == NULL) if (rec_argv == NULL)
return -ENOMEM; return -ENOMEM;
if (asprintf(&filter, "common_pid != %d", getpid()) < 0) if (asprintf(&filter, "common_pid != %d", getpid()) < 0) {
free(rec_argv);
return -ENOMEM; return -ENOMEM;
}
p = rec_argv; p = rec_argv;
for (i = 0; i < common_args_nr; i++) for (i = 0; i < common_args_nr; i++)

View file

@ -2086,6 +2086,7 @@ static int trace__record(struct trace *trace, int argc, const char **argv)
rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit"; rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
else { else {
pr_err("Neither raw_syscalls nor syscalls events exist.\n"); pr_err("Neither raw_syscalls nor syscalls events exist.\n");
free(rec_argv);
return -1; return -1;
} }
} }

View file

@ -167,7 +167,7 @@ static int run_dir(const char *d, const char *perf)
snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s", snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
d, d, perf, vcnt, v); d, d, perf, vcnt, v);
return system(cmd); return system(cmd) ? TEST_FAIL : TEST_OK;
} }
int test__attr(struct test *test __maybe_unused, int subtest __maybe_unused) int test__attr(struct test *test __maybe_unused, int subtest __maybe_unused)

View file

@ -238,6 +238,7 @@ class Test(object):
# events in result. Fail if there's not any. # events in result. Fail if there's not any.
for exp_name, exp_event in expect.items(): for exp_name, exp_event in expect.items():
exp_list = [] exp_list = []
res_event = {}
log.debug(" matching [%s]" % exp_name) log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items(): for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name) log.debug(" to [%s]" % res_name)
@ -254,7 +255,10 @@ class Test(object):
if exp_event.optional(): if exp_event.optional():
log.debug(" %s does not match, but is optional" % exp_name) log.debug(" %s does not match, but is optional" % exp_name)
else: else:
exp_event.diff(res_event) if not res_event:
log.debug(" res_event is empty");
else:
exp_event.diff(res_event)
raise Fail(self, 'match failure'); raise Fail(self, 'match failure');
match[exp_name] = exp_list match[exp_name] = exp_list

View file

@ -115,7 +115,15 @@ static void check_valid_segment(uint16_t index, int ldt,
return; return;
} }
if (ar != expected_ar) { /* The SDM says "bits 19:16 are undefined". Thanks. */
ar &= ~0xF0000;
/*
* NB: Different Linux versions do different things with the
* accessed bit in set_thread_area().
*/
if (ar != expected_ar &&
(ldt || ar != (expected_ar | AR_ACCESSED))) {
printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n", printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n",
(ldt ? "LDT" : "GDT"), index, ar, expected_ar); (ldt ? "LDT" : "GDT"), index, ar, expected_ar);
nerrs++; nerrs++;
@ -367,9 +375,24 @@ static void do_simple_tests(void)
install_invalid(&desc, false); install_invalid(&desc, false);
desc.seg_not_present = 0; desc.seg_not_present = 0;
desc.read_exec_only = 0;
desc.seg_32bit = 1; desc.seg_32bit = 1;
desc.read_exec_only = 0;
desc.limit = 0xfffff;
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB); install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB);
desc.limit_in_pages = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB | AR_G);
desc.read_exec_only = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P | AR_DB | AR_G);
desc.contents = 1;
desc.read_exec_only = 0;
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
desc.read_exec_only = 1;
install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
desc.limit = 0;
install_invalid(&desc, true); install_invalid(&desc, true);
} }

View file

@ -2,6 +2,7 @@
SUBDIRS := libsrc src SUBDIRS := libsrc src
includedir = @includedir@/usbip includedir = @includedir@/usbip
include_HEADERS := $(addprefix libsrc/, \ include_HEADERS := $(addprefix libsrc/, \
usbip_common.h vhci_driver.h usbip_host_driver.h) usbip_common.h vhci_driver.h usbip_host_driver.h \
list.h sysfs_utils.h usbip_host_common.h)
dist_man_MANS := $(addprefix doc/, usbip.8 usbipd.8) dist_man_MANS := $(addprefix doc/, usbip.8 usbipd.8)

View file

@ -329,9 +329,17 @@ err:
int usbip_vhci_get_free_port(uint32_t speed) int usbip_vhci_get_free_port(uint32_t speed)
{ {
for (int i = 0; i < vhci_driver->nports; i++) { for (int i = 0; i < vhci_driver->nports; i++) {
if (speed == USB_SPEED_SUPER &&
vhci_driver->idev[i].hub != HUB_SPEED_SUPER) switch (speed) {
continue; case USB_SPEED_SUPER:
if (vhci_driver->idev[i].hub != HUB_SPEED_SUPER)
continue;
break;
default:
if (vhci_driver->idev[i].hub != HUB_SPEED_HIGH)
continue;
break;
}
if (vhci_driver->idev[i].status == VDEV_ST_NULL) if (vhci_driver->idev[i].status == VDEV_ST_NULL)
return vhci_driver->idev[i].port; return vhci_driver->idev[i].port;