updating to mainline 4.12.9

This commit is contained in:
Jake Day 2017-08-25 09:43:42 -04:00
parent a58738788e
commit fc3972b401
62 changed files with 347 additions and 219 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 12 PATCHLEVEL = 12
SUBLEVEL = 8 SUBLEVEL = 9
EXTRAVERSION = EXTRAVERSION =
NAME = Fearless Coyote NAME = Fearless Coyote

View file

@ -507,7 +507,7 @@
pinctrl_pcie: pciegrp { pinctrl_pcie: pciegrp {
fsl,pins = < fsl,pins = <
/* PCIe reset */ /* PCIe reset */
MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x030b0 MX6QDL_PAD_EIM_DA0__GPIO3_IO00 0x030b0
MX6QDL_PAD_EIM_DA4__GPIO3_IO04 0x030b0 MX6QDL_PAD_EIM_DA4__GPIO3_IO04 0x030b0
>; >;
}; };
@ -668,7 +668,7 @@
&pcie { &pcie {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pcie>; pinctrl-0 = <&pinctrl_pcie>;
reset-gpio = <&gpio6 31 GPIO_ACTIVE_LOW>; reset-gpio = <&gpio3 0 GPIO_ACTIVE_LOW>;
status = "okay"; status = "okay";
}; };

View file

@ -37,7 +37,7 @@ do { \
".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \ ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
"2:\t.asciz " #__file "\n" \ "2:\t.asciz " #__file "\n" \
".popsection\n" \ ".popsection\n" \
".pushsection __bug_table,\"a\"\n" \ ".pushsection __bug_table,\"aw\"\n" \
".align 2\n" \ ".align 2\n" \
"3:\t.word 1b, 2b\n" \ "3:\t.word 1b, 2b\n" \
"\t.hword " #__line ", 0\n" \ "\t.hword " #__line ", 0\n" \

View file

@ -36,7 +36,7 @@
#ifdef CONFIG_GENERIC_BUG #ifdef CONFIG_GENERIC_BUG
#define __BUG_ENTRY(flags) \ #define __BUG_ENTRY(flags) \
".pushsection __bug_table,\"a\"\n\t" \ ".pushsection __bug_table,\"aw\"\n\t" \
".align 2\n\t" \ ".align 2\n\t" \
"0: .long 1f - 0b\n\t" \ "0: .long 1f - 0b\n\t" \
_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \

View file

@ -114,10 +114,10 @@
/* /*
* This is the base location for PIE (ET_DYN with INTERP) loads. On * This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is raised to 4GB to leave the entire 32-bit address * 64-bit, this is above 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers. * space open for things that want to use the area for 32-bit pointers.
*/ */
#define ELF_ET_DYN_BASE 0x100000000UL #define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__

View file

@ -21,7 +21,7 @@
#define _BUG_OR_WARN(flags) \ #define _BUG_OR_WARN(flags) \
asm volatile( \ asm volatile( \
"1: .hword %0\n" \ "1: .hword %0\n" \
" .section __bug_table,\"a\",@progbits\n" \ " .section __bug_table,\"aw\",@progbits\n" \
"2: .long 1b\n" \ "2: .long 1b\n" \
" .long %1\n" \ " .long %1\n" \
" .short %2\n" \ " .short %2\n" \
@ -38,7 +38,7 @@
#define _BUG_OR_WARN(flags) \ #define _BUG_OR_WARN(flags) \
asm volatile( \ asm volatile( \
"1: .hword %0\n" \ "1: .hword %0\n" \
" .section __bug_table,\"a\",@progbits\n" \ " .section __bug_table,\"aw\",@progbits\n" \
"2: .long 1b\n" \ "2: .long 1b\n" \
" .short %1\n" \ " .short %1\n" \
" .org 2b + %2\n" \ " .org 2b + %2\n" \

View file

@ -21,7 +21,7 @@ do { \
asm volatile( \ asm volatile( \
" syscall 15 \n" \ " syscall 15 \n" \
"0: \n" \ "0: \n" \
" .section __bug_table,\"a\" \n" \ " .section __bug_table,\"aw\" \n" \
" .long 0b,%0,%1 \n" \ " .long 0b,%0,%1 \n" \
" .previous \n" \ " .previous \n" \
: \ : \

View file

@ -27,7 +27,7 @@
do { \ do { \
asm volatile("\n" \ asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"a\"\n" \ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \ "2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \ "\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \ "\t.org 2b+%c3\n" \
@ -50,7 +50,7 @@
do { \ do { \
asm volatile("\n" \ asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"a\"\n" \ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \ "2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \ "\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \ "\t.org 2b+%c3\n" \
@ -64,7 +64,7 @@
do { \ do { \
asm volatile("\n" \ asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"a\"\n" \ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t" ASM_WORD_INSN "1b\n" \ "2:\t" ASM_WORD_INSN "1b\n" \
"\t.short %c0\n" \ "\t.short %c0\n" \
"\t.org 2b+%c1\n" \ "\t.org 2b+%c1\n" \

View file

@ -18,7 +18,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#ifdef CONFIG_DEBUG_BUGVERBOSE #ifdef CONFIG_DEBUG_BUGVERBOSE
.macro EMIT_BUG_ENTRY addr,file,line,flags .macro EMIT_BUG_ENTRY addr,file,line,flags
.section __bug_table,"a" .section __bug_table,"aw"
5001: PPC_LONG \addr, 5002f 5001: PPC_LONG \addr, 5002f
.short \line, \flags .short \line, \flags
.org 5001b+BUG_ENTRY_SIZE .org 5001b+BUG_ENTRY_SIZE
@ -29,7 +29,7 @@
.endm .endm
#else #else
.macro EMIT_BUG_ENTRY addr,file,line,flags .macro EMIT_BUG_ENTRY addr,file,line,flags
.section __bug_table,"a" .section __bug_table,"aw"
5001: PPC_LONG \addr 5001: PPC_LONG \addr
.short \flags .short \flags
.org 5001b+BUG_ENTRY_SIZE .org 5001b+BUG_ENTRY_SIZE
@ -42,14 +42,14 @@
sizeof(struct bug_entry), respectively */ sizeof(struct bug_entry), respectively */
#ifdef CONFIG_DEBUG_BUGVERBOSE #ifdef CONFIG_DEBUG_BUGVERBOSE
#define _EMIT_BUG_ENTRY \ #define _EMIT_BUG_ENTRY \
".section __bug_table,\"a\"\n" \ ".section __bug_table,\"aw\"\n" \
"2:\t" PPC_LONG "1b, %0\n" \ "2:\t" PPC_LONG "1b, %0\n" \
"\t.short %1, %2\n" \ "\t.short %1, %2\n" \
".org 2b+%3\n" \ ".org 2b+%3\n" \
".previous\n" ".previous\n"
#else #else
#define _EMIT_BUG_ENTRY \ #define _EMIT_BUG_ENTRY \
".section __bug_table,\"a\"\n" \ ".section __bug_table,\"aw\"\n" \
"2:\t" PPC_LONG "1b\n" \ "2:\t" PPC_LONG "1b\n" \
"\t.short %2\n" \ "\t.short %2\n" \
".org 2b+%3\n" \ ".org 2b+%3\n" \

View file

@ -362,7 +362,8 @@ void enable_kernel_vsx(void)
cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX); cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) { if (current->thread.regs &&
(current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
check_if_tm_restore_required(current); check_if_tm_restore_required(current);
/* /*
* If a thread has already been reclaimed then the * If a thread has already been reclaimed then the
@ -386,7 +387,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
{ {
if (tsk->thread.regs) { if (tsk->thread.regs) {
preempt_disable(); preempt_disable();
if (tsk->thread.regs->msr & MSR_VSX) { if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
BUG_ON(tsk != current); BUG_ON(tsk != current);
giveup_vsx(tsk); giveup_vsx(tsk);
} }

View file

@ -14,7 +14,7 @@
".section .rodata.str,\"aMS\",@progbits,1\n" \ ".section .rodata.str,\"aMS\",@progbits,1\n" \
"2: .asciz \""__FILE__"\"\n" \ "2: .asciz \""__FILE__"\"\n" \
".previous\n" \ ".previous\n" \
".section __bug_table,\"a\"\n" \ ".section __bug_table,\"aw\"\n" \
"3: .long 1b-3b,2b-3b\n" \ "3: .long 1b-3b,2b-3b\n" \
" .short %0,%1\n" \ " .short %0,%1\n" \
" .org 3b+%2\n" \ " .org 3b+%2\n" \
@ -30,7 +30,7 @@
asm volatile( \ asm volatile( \
"0: j 0b+2\n" \ "0: j 0b+2\n" \
"1:\n" \ "1:\n" \
".section __bug_table,\"a\"\n" \ ".section __bug_table,\"aw\"\n" \
"2: .long 1b-2b\n" \ "2: .long 1b-2b\n" \
" .short %0\n" \ " .short %0\n" \
" .org 2b+%1\n" \ " .org 2b+%1\n" \

View file

@ -24,14 +24,14 @@
*/ */
#ifdef CONFIG_DEBUG_BUGVERBOSE #ifdef CONFIG_DEBUG_BUGVERBOSE
#define _EMIT_BUG_ENTRY \ #define _EMIT_BUG_ENTRY \
"\t.pushsection __bug_table,\"a\"\n" \ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t.long 1b, %O1\n" \ "2:\t.long 1b, %O1\n" \
"\t.short %O2, %O3\n" \ "\t.short %O2, %O3\n" \
"\t.org 2b+%O4\n" \ "\t.org 2b+%O4\n" \
"\t.popsection\n" "\t.popsection\n"
#else #else
#define _EMIT_BUG_ENTRY \ #define _EMIT_BUG_ENTRY \
"\t.pushsection __bug_table,\"a\"\n" \ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t.long 1b\n" \ "2:\t.long 1b\n" \
"\t.short %O3\n" \ "\t.short %O3\n" \
"\t.org 2b+%O4\n" \ "\t.org 2b+%O4\n" \

View file

@ -94,6 +94,7 @@ config X86
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL select GENERIC_TIME_VSYSCALL
select HARDLOCKUP_CHECK_TIMESTAMP if X86_64
select HAVE_ACPI_APEI if ACPI select HAVE_ACPI_APEI if ACPI
select HAVE_ACPI_APEI_NMI if ACPI select HAVE_ACPI_APEI_NMI if ACPI
select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ALIGNED_STRUCT_PAGE if SLUB

View file

@ -117,11 +117,10 @@
.set T1, REG_T1 .set T1, REG_T1
.endm .endm
#define K_BASE %r8
#define HASH_PTR %r9 #define HASH_PTR %r9
#define BLOCKS_CTR %r8
#define BUFFER_PTR %r10 #define BUFFER_PTR %r10
#define BUFFER_PTR2 %r13 #define BUFFER_PTR2 %r13
#define BUFFER_END %r11
#define PRECALC_BUF %r14 #define PRECALC_BUF %r14
#define WK_BUF %r15 #define WK_BUF %r15
@ -205,14 +204,14 @@
* blended AVX2 and ALU instruction scheduling * blended AVX2 and ALU instruction scheduling
* 1 vector iteration per 8 rounds * 1 vector iteration per 8 rounds
*/ */
vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP vmovdqu (i * 2)(BUFFER_PTR), W_TMP
.elseif ((i & 7) == 1) .elseif ((i & 7) == 1)
vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\ vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
WY_TMP, WY_TMP WY_TMP, WY_TMP
.elseif ((i & 7) == 2) .elseif ((i & 7) == 2)
vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
.elseif ((i & 7) == 4) .elseif ((i & 7) == 4)
vpaddd K_XMM(K_BASE), WY, WY_TMP vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
.elseif ((i & 7) == 7) .elseif ((i & 7) == 7)
vmovdqu WY_TMP, PRECALC_WK(i&~7) vmovdqu WY_TMP, PRECALC_WK(i&~7)
@ -255,7 +254,7 @@
vpxor WY, WY_TMP, WY_TMP vpxor WY, WY_TMP, WY_TMP
.elseif ((i & 7) == 7) .elseif ((i & 7) == 7)
vpxor WY_TMP2, WY_TMP, WY vpxor WY_TMP2, WY_TMP, WY
vpaddd K_XMM(K_BASE), WY, WY_TMP vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7) vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY PRECALC_ROTATE_WY
@ -291,7 +290,7 @@
vpsrld $30, WY, WY vpsrld $30, WY, WY
vpor WY, WY_TMP, WY vpor WY, WY_TMP, WY
.elseif ((i & 7) == 7) .elseif ((i & 7) == 7)
vpaddd K_XMM(K_BASE), WY, WY_TMP vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7) vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY PRECALC_ROTATE_WY
@ -446,6 +445,16 @@
.endm .endm
/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
* %1 + %2 >= %3 ? %4 : 0
*/
.macro ADD_IF_GE a, b, c, d
mov \a, RTA
add $\d, RTA
cmp $\c, \b
cmovge RTA, \a
.endm
/* /*
* macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining * macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
*/ */
@ -463,13 +472,16 @@
lea (2*4*80+32)(%rsp), WK_BUF lea (2*4*80+32)(%rsp), WK_BUF
# Precalc WK for first 2 blocks # Precalc WK for first 2 blocks
PRECALC_OFFSET = 0 ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
.set i, 0 .set i, 0
.rept 160 .rept 160
PRECALC i PRECALC i
.set i, i + 1 .set i, i + 1
.endr .endr
PRECALC_OFFSET = 128
/* Go to next block if needed */
ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
xchg WK_BUF, PRECALC_BUF xchg WK_BUF, PRECALC_BUF
.align 32 .align 32
@ -479,8 +491,8 @@ _loop:
* we use K_BASE value as a signal of a last block, * we use K_BASE value as a signal of a last block,
* it is set below by: cmovae BUFFER_PTR, K_BASE * it is set below by: cmovae BUFFER_PTR, K_BASE
*/ */
cmp K_BASE, BUFFER_PTR test BLOCKS_CTR, BLOCKS_CTR
jne _begin jnz _begin
.align 32 .align 32
jmp _end jmp _end
.align 32 .align 32
@ -512,10 +524,10 @@ _loop0:
.set j, j+2 .set j, j+2
.endr .endr
add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */ /* Update Counter */
cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */ sub $1, BLOCKS_CTR
cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */ /* Move to the next block only if needed*/
ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
/* /*
* rounds * rounds
* 60,62,64,66,68 * 60,62,64,66,68
@ -532,8 +544,8 @@ _loop0:
UPDATE_HASH 12(HASH_PTR), D UPDATE_HASH 12(HASH_PTR), D
UPDATE_HASH 16(HASH_PTR), E UPDATE_HASH 16(HASH_PTR), E
cmp K_BASE, BUFFER_PTR /* is current block the last one? */ test BLOCKS_CTR, BLOCKS_CTR
je _loop jz _loop
mov TB, B mov TB, B
@ -575,10 +587,10 @@ _loop2:
.set j, j+2 .set j, j+2
.endr .endr
add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */ /* update counter */
sub $1, BLOCKS_CTR
cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */ /* Move to the next block only if needed*/
cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
jmp _loop3 jmp _loop3
_loop3: _loop3:
@ -641,19 +653,12 @@ _loop3:
avx2_zeroupper avx2_zeroupper
lea K_XMM_AR(%rip), K_BASE /* Setup initial values */
mov CTX, HASH_PTR mov CTX, HASH_PTR
mov BUF, BUFFER_PTR mov BUF, BUFFER_PTR
lea 64(BUF), BUFFER_PTR2
shl $6, CNT /* mul by 64 */ mov BUF, BUFFER_PTR2
add BUF, CNT mov CNT, BLOCKS_CTR
add $64, CNT
mov CNT, BUFFER_END
cmp BUFFER_END, BUFFER_PTR2
cmovae K_BASE, BUFFER_PTR2
xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP

View file

@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
static bool avx2_usable(void) static bool avx2_usable(void)
{ {
if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
&& boot_cpu_has(X86_FEATURE_BMI1) && boot_cpu_has(X86_FEATURE_BMI1)
&& boot_cpu_has(X86_FEATURE_BMI2)) && boot_cpu_has(X86_FEATURE_BMI2))
return true; return true;

View file

@ -1209,6 +1209,8 @@ ENTRY(nmi)
* other IST entries. * other IST entries.
*/ */
ASM_CLAC
/* Use %rdx as our temp variable throughout */ /* Use %rdx as our temp variable throughout */
pushq %rdx pushq %rdx

View file

@ -2105,7 +2105,7 @@ static void refresh_pce(void *ignored)
load_mm_cr4(current->active_mm); load_mm_cr4(current->active_mm);
} }
static void x86_pmu_event_mapped(struct perf_event *event) static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{ {
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return; return;
@ -2120,22 +2120,20 @@ static void x86_pmu_event_mapped(struct perf_event *event)
* For now, this can't happen because all callers hold mmap_sem * For now, this can't happen because all callers hold mmap_sem
* for write. If this changes, we'll need a different solution. * for write. If this changes, we'll need a different solution.
*/ */
lockdep_assert_held_exclusive(&current->mm->mmap_sem); lockdep_assert_held_exclusive(&mm->mmap_sem);
if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1) if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
} }
static void x86_pmu_event_unmapped(struct perf_event *event) static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
{ {
if (!current->mm)
return;
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return; return;
if (atomic_dec_and_test(&current->mm->context.perf_rdpmc_allowed)) if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
} }
static int x86_pmu_event_idx(struct perf_event *event) static int x86_pmu_event_idx(struct perf_event *event)

View file

@ -35,7 +35,7 @@
#define _BUG_FLAGS(ins, flags) \ #define _BUG_FLAGS(ins, flags) \
do { \ do { \
asm volatile("1:\t" ins "\n" \ asm volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"a\"\n" \ ".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \ "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
"\t.word %c1" "\t# bug_entry::line\n" \ "\t.word %c1" "\t# bug_entry::line\n" \
@ -52,7 +52,7 @@ do { \
#define _BUG_FLAGS(ins, flags) \ #define _BUG_FLAGS(ins, flags) \
do { \ do { \
asm volatile("1:\t" ins "\n" \ asm volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"a\"\n" \ ".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t.word %c0" "\t# bug_entry::flags\n" \ "\t.word %c0" "\t# bug_entry::flags\n" \
"\t.org 2b+%c1\n" \ "\t.org 2b+%c1\n" \

View file

@ -247,11 +247,11 @@ extern int force_personality32;
/* /*
* This is the base location for PIE (ET_DYN with INTERP) loads. On * This is the base location for PIE (ET_DYN with INTERP) loads. On
* 64-bit, this is raised to 4GB to leave the entire 32-bit address * 64-bit, this is above 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers. * space open for things that want to use the area for 32-bit pointers.
*/ */
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \ #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
0x100000000UL) (TASK_SIZE / 3 * 2))
/* This yields a mask that user programs can use to figure out what /* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space, instruction set this CPU supports. This could be done in user space,

View file

@ -50,8 +50,7 @@ unsigned long tasksize_64bit(void)
static unsigned long stack_maxrandom_size(unsigned long task_size) static unsigned long stack_maxrandom_size(unsigned long task_size)
{ {
unsigned long max = 0; unsigned long max = 0;
if ((current->flags & PF_RANDOMIZE) && if (current->flags & PF_RANDOMIZE) {
!(current->personality & ADDR_NO_RANDOMIZE)) {
max = (-1UL) & __STACK_RND_MASK(task_size == tasksize_32bit()); max = (-1UL) & __STACK_RND_MASK(task_size == tasksize_32bit());
max <<= PAGE_SHIFT; max <<= PAGE_SHIFT;
} }
@ -82,13 +81,13 @@ static int mmap_is_legacy(void)
static unsigned long arch_rnd(unsigned int rndbits) static unsigned long arch_rnd(unsigned int rndbits)
{ {
if (!(current->flags & PF_RANDOMIZE))
return 0;
return (get_random_long() & ((1UL << rndbits) - 1)) << PAGE_SHIFT; return (get_random_long() & ((1UL << rndbits) - 1)) << PAGE_SHIFT;
} }
unsigned long arch_mmap_rnd(void) unsigned long arch_mmap_rnd(void)
{ {
if (!(current->flags & PF_RANDOMIZE))
return 0;
return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits); return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits);
} }

View file

@ -36,12 +36,18 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev)
for (queue = 0; queue < set->nr_hw_queues; queue++) { for (queue = 0; queue < set->nr_hw_queues; queue++) {
mask = pci_irq_get_affinity(pdev, queue); mask = pci_irq_get_affinity(pdev, queue);
if (!mask) if (!mask)
return -EINVAL; goto fallback;
for_each_cpu(cpu, mask) for_each_cpu(cpu, mask)
set->mq_map[cpu] = queue; set->mq_map[cpu] = queue;
} }
return 0; return 0;
fallback:
WARN_ON_ONCE(set->nr_hw_queues > 1);
for_each_possible_cpu(cpu)
set->mq_map[cpu] = 0;
return 0;
} }
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues); EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);

View file

@ -2119,9 +2119,9 @@ static int blkfront_resume(struct xenbus_device *dev)
/* /*
* Get the bios in the request so we can re-queue them. * Get the bios in the request so we can re-queue them.
*/ */
if (req_op(shadow[i].request) == REQ_OP_FLUSH || if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
req_op(shadow[i].request) == REQ_OP_DISCARD || req_op(shadow[j].request) == REQ_OP_DISCARD ||
req_op(shadow[i].request) == REQ_OP_SECURE_ERASE || req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
shadow[j].request->cmd_flags & REQ_FUA) { shadow[j].request->cmd_flags & REQ_FUA) {
/* /*
* Flush operations don't contain bios, so * Flush operations don't contain bios, so

View file

@ -1074,7 +1074,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
&crypt->icv_rev_aes); &crypt->icv_rev_aes);
if (unlikely(!req_ctx->hmac_virt)) if (unlikely(!req_ctx->hmac_virt))
goto free_buf_src; goto free_buf_dst;
if (!encrypt) { if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt, scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, cryptlen, authsize, 0); req->src, cryptlen, authsize, 0);
@ -1089,10 +1089,10 @@ static int aead_perform(struct aead_request *req, int encrypt,
BUG_ON(qmgr_stat_overflow(SEND_QID)); BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS; return -EINPROGRESS;
free_buf_src:
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
free_buf_dst: free_buf_dst:
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
free_buf_src:
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
crypt->ctl_flags = CTL_FLAG_UNUSED; crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM; return -ENOMEM;
} }

View file

@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct dma_fence *f = e->fence; struct dma_fence *f = e->fence;
struct amd_sched_fence *s_fence = to_amd_sched_fence(f); struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
if (dma_fence_is_signaled(f)) {
hash_del(&e->node);
dma_fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
if (ring && s_fence) { if (ring && s_fence) {
/* For fences from the same ring it is sufficient /* For fences from the same ring it is sufficient
* when they are scheduled. * when they are scheduled.
@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
} }
} }
if (dma_fence_is_signaled(f)) {
hash_del(&e->node);
dma_fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
return f; return f;
} }

View file

@ -242,6 +242,10 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
goto err_unpin; goto err_unpin;
} }
ret = req->engine->emit_flush(req, EMIT_INVALIDATE);
if (ret)
goto err_unpin;
ret = req->engine->emit_bb_start(req, ret = req->engine->emit_bb_start(req,
so->batch_offset, so->batch_size, so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE); I915_DISPATCH_SECURE);

View file

@ -1224,6 +1224,10 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0100", 0 }, { "ELAN0100", 0 },
{ "ELAN0600", 0 }, { "ELAN0600", 0 },
{ "ELAN0605", 0 }, { "ELAN0605", 0 },
{ "ELAN0608", 0 },
{ "ELAN0605", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
{ "ELAN1000", 0 }, { "ELAN1000", 0 },
{ } { }
}; };

View file

@ -142,9 +142,9 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root)
struct device_node *np; struct device_node *np;
void __iomem *regs; void __iomem *regs;
np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc"); np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
if (!np) if (!np)
np = of_find_compatible_node(root, NULL, np = of_find_compatible_node(NULL, NULL,
"atmel,at91sam9x5-rtc"); "atmel,at91sam9x5-rtc");
if (!np) if (!np)
@ -196,7 +196,6 @@ static void __init aic_common_irq_fixup(const struct of_device_id *matches)
return; return;
match = of_match_node(matches, root); match = of_match_node(matches, root);
of_node_put(root);
if (match) { if (match) {
void (*fixup)(struct device_node *) = match->data; void (*fixup)(struct device_node *) = match->data;

View file

@ -7979,7 +7979,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
if (mddev->safemode == 1) if (mddev->safemode == 1)
mddev->safemode = 0; mddev->safemode = 0;
/* sync_checkers is always 0 when writes_pending is in per-cpu mode */ /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
if (mddev->in_sync || !mddev->sync_checkers) { if (mddev->in_sync || mddev->sync_checkers) {
spin_lock(&mddev->lock); spin_lock(&mddev->lock);
if (mddev->in_sync) { if (mddev->in_sync) {
mddev->in_sync = 0; mddev->in_sync = 0;
@ -8639,6 +8639,9 @@ void md_check_recovery(struct mddev *mddev)
if (mddev_trylock(mddev)) { if (mddev_trylock(mddev)) {
int spares = 0; int spares = 0;
if (!mddev->external && mddev->safemode == 1)
mddev->safemode = 0;
if (mddev->ro) { if (mddev->ro) {
struct md_rdev *rdev; struct md_rdev *rdev;
if (!mddev->external && mddev->in_sync) if (!mddev->external && mddev->in_sync)

View file

@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */

View file

@ -956,7 +956,7 @@ static int __init dino_probe(struct parisc_device *dev)
dino_dev->hba.dev = dev; dino_dev->hba.dev = dev;
dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096); dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */ dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
spin_lock_init(&dino_dev->dinosaur_pen); spin_lock_init(&dino_dev->dinosaur_pen);
dino_dev->hba.iommu = ccio_get_iommu(dev); dino_dev->hba.iommu = ccio_get_iommu(dev);

View file

@ -127,6 +127,22 @@ out:
*/ */
#define USB_ACPI_LOCATION_VALID (1 << 31) #define USB_ACPI_LOCATION_VALID (1 << 31)
static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent,
int raw)
{
struct acpi_device *adev;
if (!parent)
return NULL;
list_for_each_entry(adev, &parent->children, node) {
if (acpi_device_adr(adev) == raw)
return adev;
}
return acpi_find_child_device(parent, raw, false);
}
static struct acpi_device *usb_acpi_find_companion(struct device *dev) static struct acpi_device *usb_acpi_find_companion(struct device *dev)
{ {
struct usb_device *udev; struct usb_device *udev;
@ -174,8 +190,10 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
int raw; int raw;
raw = usb_hcd_find_raw_port_number(hcd, port1); raw = usb_hcd_find_raw_port_number(hcd, port1);
adev = acpi_find_child_device(ACPI_COMPANION(&udev->dev),
raw, false); adev = usb_acpi_find_port(ACPI_COMPANION(&udev->dev),
raw);
if (!adev) if (!adev)
return NULL; return NULL;
} else { } else {
@ -186,7 +204,9 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
return NULL; return NULL;
acpi_bus_get_device(parent_handle, &adev); acpi_bus_get_device(parent_handle, &adev);
adev = acpi_find_child_device(adev, port1, false);
adev = usb_acpi_find_port(adev, port1);
if (!adev) if (!adev)
return NULL; return NULL;
} }

View file

@ -10,8 +10,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page)); unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page)); unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
((bfn1 == bfn2) || ((bfn1+1) == bfn2));
#else #else
/* /*
* XXX: Add support for merging bio_vec when using different page * XXX: Add support for merging bio_vec when using different page

View file

@ -666,8 +666,7 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
{ {
unsigned long random_variable = 0; unsigned long random_variable = 0;
if ((current->flags & PF_RANDOMIZE) && if (current->flags & PF_RANDOMIZE) {
!(current->personality & ADDR_NO_RANDOMIZE)) {
random_variable = get_random_long(); random_variable = get_random_long();
random_variable &= STACK_RND_MASK; random_variable &= STACK_RND_MASK;
random_variable <<= PAGE_SHIFT; random_variable <<= PAGE_SHIFT;

View file

@ -65,6 +65,7 @@ extern bool movable_node_enabled;
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
#define __init_memblock __meminit #define __init_memblock __meminit
#define __initdata_memblock __meminitdata #define __initdata_memblock __meminitdata
void memblock_discard(void);
#else #else
#define __init_memblock #define __init_memblock
#define __initdata_memblock #define __initdata_memblock
@ -78,8 +79,6 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
int nid, ulong flags); int nid, ulong flags);
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align); phys_addr_t size, phys_addr_t align);
phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
void memblock_allow_resize(void); void memblock_allow_resize(void);
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
int memblock_add(phys_addr_t base, phys_addr_t size); int memblock_add(phys_addr_t base, phys_addr_t size);
@ -114,6 +113,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
phys_addr_t *out_end); phys_addr_t *out_end);
void __memblock_free_early(phys_addr_t base, phys_addr_t size);
void __memblock_free_late(phys_addr_t base, phys_addr_t size);
/** /**
* for_each_mem_range - iterate through memblock areas from type_a and not * for_each_mem_range - iterate through memblock areas from type_a and not
* included in type_b. Or just type_a if type_b is NULL. * included in type_b. Or just type_a if type_b is NULL.

View file

@ -155,6 +155,14 @@ extern int sysctl_hardlockup_all_cpu_backtrace;
#define sysctl_softlockup_all_cpu_backtrace 0 #define sysctl_softlockup_all_cpu_backtrace 0
#define sysctl_hardlockup_all_cpu_backtrace 0 #define sysctl_hardlockup_all_cpu_backtrace 0
#endif #endif
#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
defined(CONFIG_HARDLOCKUP_DETECTOR)
void watchdog_update_hrtimer_threshold(u64 period);
#else
static inline void watchdog_update_hrtimer_threshold(u64 period) { }
#endif
extern bool is_hardlockup(void); extern bool is_hardlockup(void);
struct ctl_table; struct ctl_table;
extern int proc_watchdog(struct ctl_table *, int , extern int proc_watchdog(struct ctl_table *, int ,

View file

@ -310,8 +310,8 @@ struct pmu {
* Notification that the event was mapped or unmapped. Called * Notification that the event was mapped or unmapped. Called
* in the context of the mapping task. * in the context of the mapping task.
*/ */
void (*event_mapped) (struct perf_event *event); /*optional*/ void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
void (*event_unmapped) (struct perf_event *event); /*optional*/ void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
/* /*
* Flags for ->add()/->del()/ ->start()/->stop(). There are * Flags for ->add()/->del()/ ->start()/->stop(). There are

View file

@ -8,7 +8,9 @@ enum pid_type
PIDTYPE_PID, PIDTYPE_PID,
PIDTYPE_PGID, PIDTYPE_PGID,
PIDTYPE_SID, PIDTYPE_SID,
PIDTYPE_MAX PIDTYPE_MAX,
/* only valid to __task_pid_nr_ns() */
__PIDTYPE_TGID
}; };
/* /*

View file

@ -1132,13 +1132,6 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk)
return tsk->tgid; return tsk->tgid;
} }
extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
return pid_vnr(task_tgid(tsk));
}
/** /**
* pid_alive - check that a task structure is not stale * pid_alive - check that a task structure is not stale
* @p: Task structure to be checked. * @p: Task structure to be checked.
@ -1154,23 +1147,6 @@ static inline int pid_alive(const struct task_struct *p)
return p->pids[PIDTYPE_PID].pid != NULL; return p->pids[PIDTYPE_PID].pid != NULL;
} }
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
pid_t pid = 0;
rcu_read_lock();
if (pid_alive(tsk))
pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
rcu_read_unlock();
return pid;
}
static inline pid_t task_ppid_nr(const struct task_struct *tsk)
{
return task_ppid_nr_ns(tsk, &init_pid_ns);
}
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{ {
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
@ -1192,6 +1168,33 @@ static inline pid_t task_session_vnr(struct task_struct *tsk)
return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
} }
static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
}
static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
}
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
pid_t pid = 0;
rcu_read_lock();
if (pid_alive(tsk))
pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
rcu_read_unlock();
return pid;
}
static inline pid_t task_ppid_nr(const struct task_struct *tsk)
{
return task_ppid_nr_ns(tsk, &init_pid_ns);
}
/* Obsolete, do not use: */ /* Obsolete, do not use: */
static inline pid_t task_pgrp_nr(struct task_struct *tsk) static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{ {

View file

@ -457,14 +457,16 @@ void audit_remove_watch_rule(struct audit_krule *krule)
list_del(&krule->rlist); list_del(&krule->rlist);
if (list_empty(&watch->rules)) { if (list_empty(&watch->rules)) {
audit_remove_watch(watch); /*
* audit_remove_watch() drops our reference to 'parent' which
if (list_empty(&parent->watches)) { * can get freed. Grab our own reference to be safe.
*/
audit_get_parent(parent); audit_get_parent(parent);
audit_remove_watch(watch);
if (list_empty(&parent->watches))
fsnotify_destroy_mark(&parent->mark, audit_watch_group); fsnotify_destroy_mark(&parent->mark, audit_watch_group);
audit_put_parent(parent); audit_put_parent(parent);
} }
}
} }
/* Update watch data in audit rules based on fsnotify events. */ /* Update watch data in audit rules based on fsnotify events. */

View file

@ -5084,7 +5084,7 @@ static void perf_mmap_open(struct vm_area_struct *vma)
atomic_inc(&event->rb->aux_mmap_count); atomic_inc(&event->rb->aux_mmap_count);
if (event->pmu->event_mapped) if (event->pmu->event_mapped)
event->pmu->event_mapped(event); event->pmu->event_mapped(event, vma->vm_mm);
} }
static void perf_pmu_output_stop(struct perf_event *event); static void perf_pmu_output_stop(struct perf_event *event);
@ -5107,7 +5107,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
unsigned long size = perf_data_size(rb); unsigned long size = perf_data_size(rb);
if (event->pmu->event_unmapped) if (event->pmu->event_unmapped)
event->pmu->event_unmapped(event); event->pmu->event_unmapped(event, vma->vm_mm);
/* /*
* rb->aux_mmap_count will always drop before rb->mmap_count and * rb->aux_mmap_count will always drop before rb->mmap_count and
@ -5405,7 +5405,7 @@ aux_unlock:
vma->vm_ops = &perf_mmap_vmops; vma->vm_ops = &perf_mmap_vmops;
if (event->pmu->event_mapped) if (event->pmu->event_mapped)
event->pmu->event_mapped(event); event->pmu->event_mapped(event, vma->vm_mm);
return ret; return ret;
} }

View file

@ -898,13 +898,15 @@ EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
{ {
unsigned long flags; unsigned long flags, trigger, tmp;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
if (!desc) if (!desc)
return; return;
irq_settings_clr_and_set(desc, clr, set); irq_settings_clr_and_set(desc, clr, set);
trigger = irqd_get_trigger_type(&desc->irq_data);
irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
if (irq_settings_has_no_balance_set(desc)) if (irq_settings_has_no_balance_set(desc))
@ -916,7 +918,11 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
if (irq_settings_is_level(desc)) if (irq_settings_is_level(desc))
irqd_set(&desc->irq_data, IRQD_LEVEL); irqd_set(&desc->irq_data, IRQD_LEVEL);
irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); tmp = irq_settings_get_trigger_mask(desc);
if (tmp != IRQ_TYPE_NONE)
trigger = tmp;
irqd_set(&desc->irq_data, trigger);
irq_put_desc_unlock(desc, flags); irq_put_desc_unlock(desc, flags);
} }

View file

@ -165,7 +165,7 @@ irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
struct irq_data *data = irq_get_irq_data(irq); struct irq_data *data = irq_get_irq_data(irq);
struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
if (!data || !ipimask || cpu > nr_cpu_ids) if (!data || !ipimask || cpu >= nr_cpu_ids)
return INVALID_HWIRQ; return INVALID_HWIRQ;
if (!cpumask_test_cpu(cpu, ipimask)) if (!cpumask_test_cpu(cpu, ipimask))
@ -195,7 +195,7 @@ static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
if (!chip->ipi_send_single && !chip->ipi_send_mask) if (!chip->ipi_send_single && !chip->ipi_send_mask)
return -EINVAL; return -EINVAL;
if (cpu > nr_cpu_ids) if (cpu >= nr_cpu_ids)
return -EINVAL; return -EINVAL;
if (dest) { if (dest) {

View file

@ -527,8 +527,11 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
if (!ns) if (!ns)
ns = task_active_pid_ns(current); ns = task_active_pid_ns(current);
if (likely(pid_alive(task))) { if (likely(pid_alive(task))) {
if (type != PIDTYPE_PID) if (type != PIDTYPE_PID) {
if (type == __PIDTYPE_TGID)
type = PIDTYPE_PID;
task = task->group_leader; task = task->group_leader;
}
nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns); nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
} }
rcu_read_unlock(); rcu_read_unlock();
@ -537,12 +540,6 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
} }
EXPORT_SYMBOL(__task_pid_nr_ns); EXPORT_SYMBOL(__task_pid_nr_ns);
pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return pid_nr_ns(task_tgid(tsk), ns);
}
EXPORT_SYMBOL(task_tgid_nr_ns);
struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
{ {
return ns_of_pid(task_pid(tsk)); return ns_of_pid(task_pid(tsk));

View file

@ -161,6 +161,7 @@ static void set_sample_period(void)
* hardlockup detector generates a warning * hardlockup detector generates a warning
*/ */
sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
watchdog_update_hrtimer_threshold(sample_period);
} }
/* Commands for resetting the watchdog */ /* Commands for resetting the watchdog */

View file

@ -70,6 +70,62 @@ void touch_nmi_watchdog(void)
} }
EXPORT_SYMBOL(touch_nmi_watchdog); EXPORT_SYMBOL(touch_nmi_watchdog);
#ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP
static DEFINE_PER_CPU(ktime_t, last_timestamp);
static DEFINE_PER_CPU(unsigned int, nmi_rearmed);
static ktime_t watchdog_hrtimer_sample_threshold __read_mostly;
void watchdog_update_hrtimer_threshold(u64 period)
{
/*
* The hrtimer runs with a period of (watchdog_threshold * 2) / 5
*
* So it runs effectively with 2.5 times the rate of the NMI
* watchdog. That means the hrtimer should fire 2-3 times before
* the NMI watchdog expires. The NMI watchdog on x86 is based on
* unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles
* might run way faster than expected and the NMI fires in a
* smaller period than the one deduced from the nominal CPU
* frequency. Depending on the Turbo-Mode factor this might be fast
* enough to get the NMI period smaller than the hrtimer watchdog
* period and trigger false positives.
*
* The sample threshold is used to check in the NMI handler whether
* the minimum time between two NMI samples has elapsed. That
* prevents false positives.
*
* Set this to 4/5 of the actual watchdog threshold period so the
* hrtimer is guaranteed to fire at least once within the real
* watchdog threshold.
*/
watchdog_hrtimer_sample_threshold = period * 2;
}
static bool watchdog_check_timestamp(void)
{
ktime_t delta, now = ktime_get_mono_fast_ns();
delta = now - __this_cpu_read(last_timestamp);
if (delta < watchdog_hrtimer_sample_threshold) {
/*
* If ktime is jiffies based, a stalled timer would prevent
* jiffies from being incremented and the filter would look
* at a stale timestamp and never trigger.
*/
if (__this_cpu_inc_return(nmi_rearmed) < 10)
return false;
}
__this_cpu_write(nmi_rearmed, 0);
__this_cpu_write(last_timestamp, now);
return true;
}
#else
static inline bool watchdog_check_timestamp(void)
{
return true;
}
#endif
static struct perf_event_attr wd_hw_attr = { static struct perf_event_attr wd_hw_attr = {
.type = PERF_TYPE_HARDWARE, .type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES, .config = PERF_COUNT_HW_CPU_CYCLES,
@ -94,6 +150,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
return; return;
} }
if (!watchdog_check_timestamp())
return;
/* check for a hardlockup /* check for a hardlockup
* This is done by making sure our timer interrupt * This is done by making sure our timer interrupt
* is incrementing. The timer interrupt should have * is incrementing. The timer interrupt should have

View file

@ -344,6 +344,13 @@ config SECTION_MISMATCH_WARN_ONLY
If unsure, say Y. If unsure, say Y.
#
# Enables a timestamp based low pass filter to compensate for perf based
# hard lockup detection which runs too fast due to turbo modes.
#
config HARDLOCKUP_CHECK_TIMESTAMP
bool
# #
# Select this config option from the architecture Kconfig, if it # Select this config option from the architecture Kconfig, if it
# is preferred to always offer frame pointers as a config # is preferred to always offer frame pointers as a config

View file

@ -167,7 +167,7 @@ static void cma_debugfs_add_one(struct cma *cma, int idx)
char name[16]; char name[16];
int u32s; int u32s;
sprintf(name, "cma-%s", cma->name); scnprintf(name, sizeof(name), "cma-%s", cma->name);
tmp = debugfs_create_dir(name, cma_debugfs_root); tmp = debugfs_create_dir(name, cma_debugfs_root);

View file

@ -288,31 +288,27 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
} }
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
/**
phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( * Discard memory and reserved arrays if they were allocated
phys_addr_t *addr) */
void __init memblock_discard(void)
{ {
if (memblock.reserved.regions == memblock_reserved_init_regions) phys_addr_t addr, size;
return 0;
*addr = __pa(memblock.reserved.regions); if (memblock.reserved.regions != memblock_reserved_init_regions) {
addr = __pa(memblock.reserved.regions);
return PAGE_ALIGN(sizeof(struct memblock_region) * size = PAGE_ALIGN(sizeof(struct memblock_region) *
memblock.reserved.max); memblock.reserved.max);
} __memblock_free_late(addr, size);
}
phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info( if (memblock.memory.regions == memblock_memory_init_regions) {
phys_addr_t *addr) addr = __pa(memblock.memory.regions);
{ size = PAGE_ALIGN(sizeof(struct memblock_region) *
if (memblock.memory.regions == memblock_memory_init_regions)
return 0;
*addr = __pa(memblock.memory.regions);
return PAGE_ALIGN(sizeof(struct memblock_region) *
memblock.memory.max); memblock.memory.max);
__memblock_free_late(addr, size);
}
} }
#endif #endif
/** /**

View file

@ -3882,8 +3882,18 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
* further. * further.
*/ */
if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR) if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR)
&& test_bit(MMF_UNSTABLE, &vma->vm_mm->flags))) && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags))) {
/*
* We are going to enforce SIGBUS but the PF path might have
* dropped the mmap_sem already so take it again so that
* we do not break expectations of all arch specific PF paths
* and g-u-p
*/
if (ret & VM_FAULT_RETRY)
down_read(&vma->vm_mm->mmap_sem);
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
}
return ret; return ret;
} }

View file

@ -931,11 +931,6 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
*policy |= (pol->flags & MPOL_MODE_FLAGS); *policy |= (pol->flags & MPOL_MODE_FLAGS);
} }
if (vma) {
up_read(&current->mm->mmap_sem);
vma = NULL;
}
err = 0; err = 0;
if (nmask) { if (nmask) {
if (mpol_store_user_nodemask(pol)) { if (mpol_store_user_nodemask(pol)) {

View file

@ -41,6 +41,7 @@
#include <linux/page_idle.h> #include <linux/page_idle.h>
#include <linux/page_owner.h> #include <linux/page_owner.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/ptrace.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
@ -1649,7 +1650,6 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
const int __user *, nodes, const int __user *, nodes,
int __user *, status, int, flags) int __user *, status, int, flags)
{ {
const struct cred *cred = current_cred(), *tcred;
struct task_struct *task; struct task_struct *task;
struct mm_struct *mm; struct mm_struct *mm;
int err; int err;
@ -1673,14 +1673,9 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
/* /*
* Check if this process has the right to modify the specified * Check if this process has the right to modify the specified
* process. The right exists if the process has administrative * process. Use the regular "ptrace_may_access()" checks.
* capabilities, superuser privileges or the same
* userid as the target process.
*/ */
tcred = __task_cred(task); if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
!uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
!capable(CAP_SYS_NICE)) {
rcu_read_unlock(); rcu_read_unlock();
err = -EPERM; err = -EPERM;
goto out; goto out;

View file

@ -146,22 +146,6 @@ static unsigned long __init free_low_memory_core_early(void)
NULL) NULL)
count += __free_memory_core(start, end); count += __free_memory_core(start, end);
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
{
phys_addr_t size;
/* Free memblock.reserved array if it was allocated */
size = get_allocated_memblock_reserved_regions_info(&start);
if (size)
count += __free_memory_core(start, start + size);
/* Free memblock.memory array if it was allocated */
size = get_allocated_memblock_memory_regions_info(&start);
if (size)
count += __free_memory_core(start, start + size);
}
#endif
return count; return count;
} }

View file

@ -1582,6 +1582,10 @@ void __init page_alloc_init_late(void)
/* Reinit limits that are based on free pages after the kernel is up */ /* Reinit limits that are based on free pages after the kernel is up */
files_maxfiles_init(); files_maxfiles_init();
#endif #endif
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
/* Discard memblock private memory */
memblock_discard();
#endif
for_each_populated_zone(zone) for_each_populated_zone(zone)
set_zone_contiguous(zone); set_zone_contiguous(zone);

View file

@ -5637,13 +5637,14 @@ static void sysfs_slab_remove_workfn(struct work_struct *work)
* A cache is never shut down before deactivation is * A cache is never shut down before deactivation is
* complete, so no need to worry about synchronization. * complete, so no need to worry about synchronization.
*/ */
return; goto out;
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
kset_unregister(s->memcg_kset); kset_unregister(s->memcg_kset);
#endif #endif
kobject_uevent(&s->kobj, KOBJ_REMOVE); kobject_uevent(&s->kobj, KOBJ_REMOVE);
kobject_del(&s->kobj); kobject_del(&s->kobj);
out:
kobject_put(&s->kobj); kobject_put(&s->kobj);
} }

View file

@ -1669,7 +1669,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
struct page **pages; struct page **pages;
unsigned int nr_pages, array_size, i; unsigned int nr_pages, array_size, i;
const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
const gfp_t alloc_mask = gfp_mask | __GFP_HIGHMEM | __GFP_NOWARN; const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
0 :
__GFP_HIGHMEM;
nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *)); array_size = (nr_pages * sizeof(struct page *));
@ -1677,7 +1680,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
area->nr_pages = nr_pages; area->nr_pages = nr_pages;
/* Please note that the recursion is strictly bounded. */ /* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) { if (array_size > PAGE_SIZE) {
pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM, pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
PAGE_KERNEL, node, area->caller); PAGE_KERNEL, node, area->caller);
} else { } else {
pages = kmalloc_node(array_size, nested_gfp, node); pages = kmalloc_node(array_size, nested_gfp, node);
@ -1698,9 +1701,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
} }
if (node == NUMA_NO_NODE) if (node == NUMA_NO_NODE)
page = alloc_page(alloc_mask); page = alloc_page(alloc_mask|highmem_mask);
else else
page = alloc_pages_node(node, alloc_mask, 0); page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
if (unlikely(!page)) { if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */ /* Successfully allocated i pages, free them in __vunmap() */
@ -1708,7 +1711,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
goto fail; goto fail;
} }
area->pages[i] = page; area->pages[i] = page;
if (gfpflags_allow_blocking(gfp_mask)) if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
cond_resched(); cond_resched();
} }

View file

@ -1502,16 +1502,11 @@ static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client,
static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg) static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
{ {
struct snd_seq_queue_info *info = arg; struct snd_seq_queue_info *info = arg;
int result;
struct snd_seq_queue *q; struct snd_seq_queue *q;
result = snd_seq_queue_alloc(client->number, info->locked, info->flags); q = snd_seq_queue_alloc(client->number, info->locked, info->flags);
if (result < 0) if (IS_ERR(q))
return result; return PTR_ERR(q);
q = queueptr(result);
if (q == NULL)
return -EINVAL;
info->queue = q->queue; info->queue = q->queue;
info->locked = q->locked; info->locked = q->locked;
@ -1521,7 +1516,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
if (!info->name[0]) if (!info->name[0])
snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue); snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
strlcpy(q->name, info->name, sizeof(q->name)); strlcpy(q->name, info->name, sizeof(q->name));
queuefree(q); snd_use_lock_free(&q->use_lock);
return 0; return 0;
} }

View file

@ -184,22 +184,26 @@ void __exit snd_seq_queues_delete(void)
static void queue_use(struct snd_seq_queue *queue, int client, int use); static void queue_use(struct snd_seq_queue *queue, int client, int use);
/* allocate a new queue - /* allocate a new queue -
* return queue index value or negative value for error * return pointer to new queue or ERR_PTR(-errno) for error
* The new queue's use_lock is set to 1. It is the caller's responsibility to
* call snd_use_lock_free(&q->use_lock).
*/ */
int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
{ {
struct snd_seq_queue *q; struct snd_seq_queue *q;
q = queue_new(client, locked); q = queue_new(client, locked);
if (q == NULL) if (q == NULL)
return -ENOMEM; return ERR_PTR(-ENOMEM);
q->info_flags = info_flags; q->info_flags = info_flags;
queue_use(q, client, 1); queue_use(q, client, 1);
snd_use_lock_use(&q->use_lock);
if (queue_list_add(q) < 0) { if (queue_list_add(q) < 0) {
snd_use_lock_free(&q->use_lock);
queue_delete(q); queue_delete(q);
return -ENOMEM; return ERR_PTR(-ENOMEM);
} }
return q->queue; return q;
} }
/* delete a queue - queue must be owned by the client */ /* delete a queue - queue must be owned by the client */

View file

@ -71,7 +71,7 @@ void snd_seq_queues_delete(void);
/* create new queue (constructor) */ /* create new queue (constructor) */
int snd_seq_queue_alloc(int client, int locked, unsigned int flags); struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags);
/* delete queue (destructor) */ /* delete queue (destructor) */
int snd_seq_queue_delete(int client, int queueid); int snd_seq_queue_delete(int client, int queueid);

View file

@ -542,6 +542,8 @@ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
if (size < sizeof(scale)) if (size < sizeof(scale))
return -ENOMEM; return -ENOMEM;
if (cval->min_mute)
scale[0] = SNDRV_CTL_TLVT_DB_MINMAX_MUTE;
scale[2] = cval->dBmin; scale[2] = cval->dBmin;
scale[3] = cval->dBmax; scale[3] = cval->dBmax;
if (copy_to_user(_tlv, scale, sizeof(scale))) if (copy_to_user(_tlv, scale, sizeof(scale)))

View file

@ -64,6 +64,7 @@ struct usb_mixer_elem_info {
int cached; int cached;
int cache_val[MAX_CHANNELS]; int cache_val[MAX_CHANNELS];
u8 initialized; u8 initialized;
u8 min_mute;
void *private_data; void *private_data;
}; };

View file

@ -1878,6 +1878,12 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
if (unitid == 7 && cval->control == UAC_FU_VOLUME) if (unitid == 7 && cval->control == UAC_FU_VOLUME)
snd_dragonfly_quirk_db_scale(mixer, cval, kctl); snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
break; break;
/* lowest playback value is muted on C-Media devices */
case USB_ID(0x0d8c, 0x000c):
case USB_ID(0x0d8c, 0x0014):
if (strstr(kctl->id.name, "Playback"))
cval->min_mute = 1;
break;
} }
} }

View file

@ -1142,6 +1142,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */ case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */ case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */ case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */ case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
@ -1374,6 +1375,10 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
} }
} }
break; break;
case USB_ID(0x16d0, 0x0a23):
if (fp->altsetting == 2)
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
default: default:
break; break;