updating to mainline 4.14.4

This commit is contained in:
Jake Day 2017-12-05 10:23:31 -05:00
parent d5981a30a7
commit 634960e0ed
112 changed files with 966 additions and 442 deletions

View file

@ -34,6 +34,10 @@ Required properties:
- reg: I2C address - reg: I2C address
Optional properties:
- smbus-timeout-disable: When set, the smbus timeout function will be disabled.
This is not supported on all chips.
Example: Example:
temp-sensor@1a { temp-sensor@1a {

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 14 PATCHLEVEL = 14
SUBLEVEL = 3 SUBLEVEL = 4
EXTRAVERSION = EXTRAVERSION =
NAME = Petit Gorille NAME = Petit Gorille

View file

@ -77,9 +77,6 @@ endif
ifeq ($(CONFIG_ARM64_MODULE_PLTS),y) ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds
ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
KBUILD_LDFLAGS_MODULE += $(objtree)/arch/arm64/kernel/ftrace-mod.o
endif
endif endif
# Default value # Default value

View file

@ -32,7 +32,7 @@ struct mod_arch_specific {
struct mod_plt_sec init; struct mod_plt_sec init;
/* for CONFIG_DYNAMIC_FTRACE */ /* for CONFIG_DYNAMIC_FTRACE */
void *ftrace_trampoline; struct plt_entry *ftrace_trampoline;
}; };
#endif #endif
@ -45,4 +45,48 @@ extern u64 module_alloc_base;
#define module_alloc_base ((u64)_etext - MODULES_VSIZE) #define module_alloc_base ((u64)_etext - MODULES_VSIZE)
#endif #endif
struct plt_entry {
/*
* A program that conforms to the AArch64 Procedure Call Standard
* (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
* IP1 (x17) may be inserted at any branch instruction that is
* exposed to a relocation that supports long branches. Since that
* is exactly what we are dealing with here, we are free to use x16
* as a scratch register in the PLT veneers.
*/
__le32 mov0; /* movn x16, #0x.... */
__le32 mov1; /* movk x16, #0x...., lsl #16 */
__le32 mov2; /* movk x16, #0x...., lsl #32 */
__le32 br; /* br x16 */
};
static inline struct plt_entry get_plt_entry(u64 val)
{
/*
* MOVK/MOVN/MOVZ opcode:
* +--------+------------+--------+-----------+-------------+---------+
* | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
* +--------+------------+--------+-----------+-------------+---------+
*
* Rd := 0x10 (x16)
* hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
* opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
* sf := 1 (64-bit variant)
*/
return (struct plt_entry){
cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
cpu_to_le32(0xd61f0200)
};
}
static inline bool plt_entries_equal(const struct plt_entry *a,
const struct plt_entry *b)
{
return a->mov0 == b->mov0 &&
a->mov1 == b->mov1 &&
a->mov2 == b->mov2;
}
#endif /* __ASM_MODULE_H */ #endif /* __ASM_MODULE_H */

View file

@ -63,6 +63,3 @@ extra-y += $(head-y) vmlinux.lds
ifeq ($(CONFIG_DEBUG_EFI),y) ifeq ($(CONFIG_DEBUG_EFI),y)
AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\"" AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
endif endif
# will be included by each individual module but not by the core kernel itself
extra-$(CONFIG_DYNAMIC_FTRACE) += ftrace-mod.o

View file

@ -1,18 +0,0 @@
/*
* Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.section ".text.ftrace_trampoline", "ax"
.align 3
0: .quad 0
__ftrace_trampoline:
ldr x16, 0b
br x16
ENDPROC(__ftrace_trampoline)

View file

@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (offset < -SZ_128M || offset >= SZ_128M) { if (offset < -SZ_128M || offset >= SZ_128M) {
#ifdef CONFIG_ARM64_MODULE_PLTS #ifdef CONFIG_ARM64_MODULE_PLTS
unsigned long *trampoline; struct plt_entry trampoline;
struct module *mod; struct module *mod;
/* /*
@ -104,22 +104,24 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
* is added in the future, but for now, the pr_err() below * is added in the future, but for now, the pr_err() below
* deals with a theoretical issue only. * deals with a theoretical issue only.
*/ */
trampoline = (unsigned long *)mod->arch.ftrace_trampoline; trampoline = get_plt_entry(addr);
if (trampoline[0] != addr) { if (!plt_entries_equal(mod->arch.ftrace_trampoline,
if (trampoline[0] != 0) { &trampoline)) {
if (!plt_entries_equal(mod->arch.ftrace_trampoline,
&(struct plt_entry){})) {
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
return -EINVAL; return -EINVAL;
} }
/* point the trampoline to our ftrace entry point */ /* point the trampoline to our ftrace entry point */
module_disable_ro(mod); module_disable_ro(mod);
trampoline[0] = addr; *mod->arch.ftrace_trampoline = trampoline;
module_enable_ro(mod, true); module_enable_ro(mod, true);
/* update trampoline before patching in the branch */ /* update trampoline before patching in the branch */
smp_wmb(); smp_wmb();
} }
addr = (unsigned long)&trampoline[1]; addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
#else /* CONFIG_ARM64_MODULE_PLTS */ #else /* CONFIG_ARM64_MODULE_PLTS */
return -EINVAL; return -EINVAL;
#endif /* CONFIG_ARM64_MODULE_PLTS */ #endif /* CONFIG_ARM64_MODULE_PLTS */

View file

@ -11,21 +11,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/sort.h> #include <linux/sort.h>
struct plt_entry {
/*
* A program that conforms to the AArch64 Procedure Call Standard
* (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
* IP1 (x17) may be inserted at any branch instruction that is
* exposed to a relocation that supports long branches. Since that
* is exactly what we are dealing with here, we are free to use x16
* as a scratch register in the PLT veneers.
*/
__le32 mov0; /* movn x16, #0x.... */
__le32 mov1; /* movk x16, #0x...., lsl #16 */
__le32 mov2; /* movk x16, #0x...., lsl #32 */
__le32 br; /* br x16 */
};
static bool in_init(const struct module *mod, void *loc) static bool in_init(const struct module *mod, void *loc)
{ {
return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size; return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
@ -40,33 +25,14 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
int i = pltsec->plt_num_entries; int i = pltsec->plt_num_entries;
u64 val = sym->st_value + rela->r_addend; u64 val = sym->st_value + rela->r_addend;
/* plt[i] = get_plt_entry(val);
* MOVK/MOVN/MOVZ opcode:
* +--------+------------+--------+-----------+-------------+---------+
* | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
* +--------+------------+--------+-----------+-------------+---------+
*
* Rd := 0x10 (x16)
* hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
* opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
* sf := 1 (64-bit variant)
*/
plt[i] = (struct plt_entry){
cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
cpu_to_le32(0xd61f0200)
};
/* /*
* Check if the entry we just created is a duplicate. Given that the * Check if the entry we just created is a duplicate. Given that the
* relocations are sorted, this will be the last entry we allocated. * relocations are sorted, this will be the last entry we allocated.
* (if one exists). * (if one exists).
*/ */
if (i > 0 && if (i > 0 && plt_entries_equal(plt + i, plt + i - 1))
plt[i].mov0 == plt[i - 1].mov0 &&
plt[i].mov1 == plt[i - 1].mov1 &&
plt[i].mov2 == plt[i - 1].mov2)
return (u64)&plt[i - 1]; return (u64)&plt[i - 1];
pltsec->plt_num_entries++; pltsec->plt_num_entries++;
@ -154,6 +120,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
unsigned long core_plts = 0; unsigned long core_plts = 0;
unsigned long init_plts = 0; unsigned long init_plts = 0;
Elf64_Sym *syms = NULL; Elf64_Sym *syms = NULL;
Elf_Shdr *tramp = NULL;
int i; int i;
/* /*
@ -165,6 +132,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
mod->arch.core.plt = sechdrs + i; mod->arch.core.plt = sechdrs + i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt")) else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
mod->arch.init.plt = sechdrs + i; mod->arch.init.plt = sechdrs + i;
else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
!strcmp(secstrings + sechdrs[i].sh_name,
".text.ftrace_trampoline"))
tramp = sechdrs + i;
else if (sechdrs[i].sh_type == SHT_SYMTAB) else if (sechdrs[i].sh_type == SHT_SYMTAB)
syms = (Elf64_Sym *)sechdrs[i].sh_addr; syms = (Elf64_Sym *)sechdrs[i].sh_addr;
} }
@ -215,5 +186,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
mod->arch.init.plt_num_entries = 0; mod->arch.init.plt_num_entries = 0;
mod->arch.init.plt_max_entries = init_plts; mod->arch.init.plt_max_entries = init_plts;
if (tramp) {
tramp->sh_type = SHT_NOBITS;
tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
tramp->sh_addralign = __alignof__(struct plt_entry);
tramp->sh_size = sizeof(struct plt_entry);
}
return 0; return 0;
} }

View file

@ -1,4 +1,5 @@
SECTIONS { SECTIONS {
.plt (NOLOAD) : { BYTE(0) } .plt (NOLOAD) : { BYTE(0) }
.init.plt (NOLOAD) : { BYTE(0) } .init.plt (NOLOAD) : { BYTE(0) }
.text.ftrace_trampoline (NOLOAD) : { BYTE(0) }
} }

View file

@ -623,7 +623,9 @@ BEGIN_FTR_SECTION
* NOTE, we rely on r0 being 0 from above. * NOTE, we rely on r0 being 0 from above.
*/ */
mtspr SPRN_IAMR,r0 mtspr SPRN_IAMR,r0
BEGIN_FTR_SECTION_NESTED(42)
mtspr SPRN_AMOR,r0 mtspr SPRN_AMOR,r0
END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
/* save regs for local vars on new stack. /* save regs for local vars on new stack.

View file

@ -47,7 +47,8 @@
DEFINE_RAW_SPINLOCK(native_tlbie_lock); DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) static inline unsigned long ___tlbie(unsigned long vpn, int psize,
int apsize, int ssize)
{ {
unsigned long va; unsigned long va;
unsigned int penc; unsigned int penc;
@ -100,7 +101,15 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
: "memory"); : "memory");
break; break;
} }
trace_tlbie(0, 0, va, 0, 0, 0, 0); return va;
}
static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
{
unsigned long rb;
rb = ___tlbie(vpn, psize, apsize, ssize);
trace_tlbie(0, 0, rb, 0, 0, 0, 0);
} }
static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
@ -652,7 +661,7 @@ static void native_hpte_clear(void)
if (hpte_v & HPTE_V_VALID) { if (hpte_v & HPTE_V_VALID) {
hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn); hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
hptep->v = 0; hptep->v = 0;
__tlbie(vpn, psize, apsize, ssize); ___tlbie(vpn, psize, apsize, ssize);
} }
} }

View file

@ -194,13 +194,14 @@ struct arch_elf_state {
#define CORE_DUMP_USE_REGSET #define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE #define ELF_EXEC_PAGESIZE PAGE_SIZE
/* /* This is the location that an ET_DYN program is loaded if exec'ed. Typical
* This is the base location for PIE (ET_DYN with INTERP) loads. On use of this is to invoke "./ld.so someprog" to test out a new version of
* 64-bit, this is raised to 4GB to leave the entire 32-bit address the loader. We need to make sure that it is out of the way of the program
* space open for things that want to use the area for 32-bit pointers. that it will "exec", and that there is sufficient room for the brk. 64-bit
*/ tasks are aligned to 4GB. */
#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \ #define ELF_ET_DYN_BASE (is_compat_task() ? \
0x100000000UL) (STACK_TOP / 3 * 2) : \
(STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
/* This yields a mask that user programs can use to figure out what /* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */ instruction set this CPU supports. */

View file

@ -51,19 +51,15 @@ ENTRY(native_usergs_sysret64)
END(native_usergs_sysret64) END(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT */
.macro TRACE_IRQS_FLAGS flags:req .macro TRACE_IRQS_IRETQ
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
bt $9, \flags /* interrupts off? */ bt $9, EFLAGS(%rsp) /* interrupts off? */
jnc 1f jnc 1f
TRACE_IRQS_ON TRACE_IRQS_ON
1: 1:
#endif #endif
.endm .endm
.macro TRACE_IRQS_IRETQ
TRACE_IRQS_FLAGS EFLAGS(%rsp)
.endm
/* /*
* When dynamic function tracer is enabled it will add a breakpoint * When dynamic function tracer is enabled it will add a breakpoint
* to all locations that it is about to modify, sync CPUs, update * to all locations that it is about to modify, sync CPUs, update
@ -927,13 +923,11 @@ ENTRY(native_load_gs_index)
FRAME_BEGIN FRAME_BEGIN
pushfq pushfq
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
TRACE_IRQS_OFF
SWAPGS SWAPGS
.Lgs_change: .Lgs_change:
movl %edi, %gs movl %edi, %gs
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
SWAPGS SWAPGS
TRACE_IRQS_FLAGS (%rsp)
popfq popfq
FRAME_END FRAME_END
ret ret

View file

@ -1093,6 +1093,12 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
} }
#define pud_write pud_write
static inline int pud_write(pud_t pud)
{
return pud_flags(pud) & _PAGE_RW;
}
/* /*
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count); * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
* *

View file

@ -266,9 +266,14 @@ static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
recalculate_apic_map(apic->vcpu->kvm); recalculate_apic_map(apic->vcpu->kvm);
} }
static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
{
return ((id >> 4) << 16) | (1 << (id & 0xf));
}
static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id) static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
{ {
u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf)); u32 ldr = kvm_apic_calc_x2apic_ldr(id);
WARN_ON_ONCE(id != apic->vcpu->vcpu_id); WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
@ -2196,6 +2201,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
{ {
if (apic_x2apic_mode(vcpu->arch.apic)) { if (apic_x2apic_mode(vcpu->arch.apic)) {
u32 *id = (u32 *)(s->regs + APIC_ID); u32 *id = (u32 *)(s->regs + APIC_ID);
u32 *ldr = (u32 *)(s->regs + APIC_LDR);
if (vcpu->kvm->arch.x2apic_format) { if (vcpu->kvm->arch.x2apic_format) {
if (*id != vcpu->vcpu_id) if (*id != vcpu->vcpu_id)
@ -2206,6 +2212,10 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
else else
*id <<= 24; *id <<= 24;
} }
/* In x2APIC mode, the LDR is fixed and based on the id */
if (set)
*ldr = kvm_apic_calc_x2apic_ldr(*id);
} }
return 0; return 0;

View file

@ -2189,6 +2189,8 @@ static int ud_interception(struct vcpu_svm *svm)
int er; int er;
er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
if (er == EMULATE_USER_EXIT)
return 0;
if (er != EMULATE_DONE) if (er != EMULATE_DONE)
kvm_queue_exception(&svm->vcpu, UD_VECTOR); kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1; return 1;

View file

@ -5914,6 +5914,8 @@ static int handle_exception(struct kvm_vcpu *vcpu)
return 1; return 1;
} }
er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
if (er == EMULATE_USER_EXIT)
return 0;
if (er != EMULATE_DONE) if (er != EMULATE_DONE)
kvm_queue_exception(vcpu, UD_VECTOR); kvm_queue_exception(vcpu, UD_VECTOR);
return 1; return 1;

View file

@ -1830,6 +1830,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
*/ */
BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
if (guest_hv_clock.version & 1)
++guest_hv_clock.version; /* first time write, random junk */
vcpu->hv_clock.version = guest_hv_clock.version + 1; vcpu->hv_clock.version = guest_hv_clock.version + 1;
kvm_write_guest_cached(v->kvm, &vcpu->pv_time, kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
&vcpu->hv_clock, &vcpu->hv_clock,
@ -5705,6 +5708,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
emulation_type)) emulation_type))
return EMULATE_DONE; return EMULATE_DONE;
if (ctxt->have_exception && inject_emulated_exception(vcpu))
return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP) if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL; return EMULATE_FAIL;
return handle_emulation_failure(vcpu); return handle_emulation_failure(vcpu);

View file

@ -1047,6 +1047,18 @@ unlock:
} }
EXPORT_SYMBOL_GPL(af_alg_sendpage); EXPORT_SYMBOL_GPL(af_alg_sendpage);
/**
* af_alg_free_resources - release resources required for crypto request
*/
void af_alg_free_resources(struct af_alg_async_req *areq)
{
struct sock *sk = areq->sk;
af_alg_free_areq_sgls(areq);
sock_kfree_s(sk, areq, areq->areqlen);
}
EXPORT_SYMBOL_GPL(af_alg_free_resources);
/** /**
* af_alg_async_cb - AIO callback handler * af_alg_async_cb - AIO callback handler
* *
@ -1063,18 +1075,13 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
struct kiocb *iocb = areq->iocb; struct kiocb *iocb = areq->iocb;
unsigned int resultlen; unsigned int resultlen;
lock_sock(sk);
/* Buffer size written by crypto operation. */ /* Buffer size written by crypto operation. */
resultlen = areq->outlen; resultlen = areq->outlen;
af_alg_free_areq_sgls(areq); af_alg_free_resources(areq);
sock_kfree_s(sk, areq, areq->areqlen); sock_put(sk);
__sock_put(sk);
iocb->ki_complete(iocb, err ? err : resultlen, 0); iocb->ki_complete(iocb, err ? err : resultlen, 0);
release_sock(sk);
} }
EXPORT_SYMBOL_GPL(af_alg_async_cb); EXPORT_SYMBOL_GPL(af_alg_async_cb);

View file

@ -101,10 +101,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
struct aead_tfm *aeadc = pask->private; struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead; struct crypto_aead *tfm = aeadc->aead;
struct crypto_skcipher *null_tfm = aeadc->null_tfm; struct crypto_skcipher *null_tfm = aeadc->null_tfm;
unsigned int as = crypto_aead_authsize(tfm); unsigned int i, as = crypto_aead_authsize(tfm);
struct af_alg_async_req *areq; struct af_alg_async_req *areq;
struct af_alg_tsgl *tsgl; struct af_alg_tsgl *tsgl, *tmp;
struct scatterlist *src; struct scatterlist *rsgl_src, *tsgl_src = NULL;
int err = 0; int err = 0;
size_t used = 0; /* [in] TX bufs to be en/decrypted */ size_t used = 0; /* [in] TX bufs to be en/decrypted */
size_t outlen = 0; /* [out] RX bufs produced by kernel */ size_t outlen = 0; /* [out] RX bufs produced by kernel */
@ -178,7 +178,22 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
} }
processed = used + ctx->aead_assoclen; processed = used + ctx->aead_assoclen;
tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list); list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
for (i = 0; i < tsgl->cur; i++) {
struct scatterlist *process_sg = tsgl->sg + i;
if (!(process_sg->length) || !sg_page(process_sg))
continue;
tsgl_src = process_sg;
break;
}
if (tsgl_src)
break;
}
if (processed && !tsgl_src) {
err = -EFAULT;
goto free;
}
/* /*
* Copy of AAD from source to destination * Copy of AAD from source to destination
@ -194,7 +209,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
*/ */
/* Use the RX SGL as source (and destination) for crypto op. */ /* Use the RX SGL as source (and destination) for crypto op. */
src = areq->first_rsgl.sgl.sg; rsgl_src = areq->first_rsgl.sgl.sg;
if (ctx->enc) { if (ctx->enc) {
/* /*
@ -207,7 +222,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
* v v * v v
* RX SGL: AAD || PT || Tag * RX SGL: AAD || PT || Tag
*/ */
err = crypto_aead_copy_sgl(null_tfm, tsgl->sg, err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
areq->first_rsgl.sgl.sg, processed); areq->first_rsgl.sgl.sg, processed);
if (err) if (err)
goto free; goto free;
@ -225,7 +240,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
*/ */
/* Copy AAD || CT to RX SGL buffer for in-place operation. */ /* Copy AAD || CT to RX SGL buffer for in-place operation. */
err = crypto_aead_copy_sgl(null_tfm, tsgl->sg, err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
areq->first_rsgl.sgl.sg, outlen); areq->first_rsgl.sgl.sg, outlen);
if (err) if (err)
goto free; goto free;
@ -257,23 +272,34 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
areq->tsgl); areq->tsgl);
} else } else
/* no RX SGL present (e.g. authentication only) */ /* no RX SGL present (e.g. authentication only) */
src = areq->tsgl; rsgl_src = areq->tsgl;
} }
/* Initialize the crypto operation */ /* Initialize the crypto operation */
aead_request_set_crypt(&areq->cra_u.aead_req, src, aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
areq->first_rsgl.sgl.sg, used, ctx->iv); areq->first_rsgl.sgl.sg, used, ctx->iv);
aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen); aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
aead_request_set_tfm(&areq->cra_u.aead_req, tfm); aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */ /* AIO operation */
sock_hold(sk);
areq->iocb = msg->msg_iocb; areq->iocb = msg->msg_iocb;
aead_request_set_callback(&areq->cra_u.aead_req, aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_async_cb, areq); af_alg_async_cb, areq);
err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req); crypto_aead_decrypt(&areq->cra_u.aead_req);
/* AIO operation in progress */
if (err == -EINPROGRESS || err == -EBUSY) {
/* Remember output size that will be generated. */
areq->outlen = outlen;
return -EIOCBQUEUED;
}
sock_put(sk);
} else { } else {
/* Synchronous operation */ /* Synchronous operation */
aead_request_set_callback(&areq->cra_u.aead_req, aead_request_set_callback(&areq->cra_u.aead_req,
@ -285,19 +311,9 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
&ctx->completion); &ctx->completion);
} }
/* AIO operation in progress */
if (err == -EINPROGRESS) {
sock_hold(sk);
/* Remember output size that will be generated. */
areq->outlen = outlen;
return -EIOCBQUEUED;
}
free: free:
af_alg_free_areq_sgls(areq); af_alg_free_resources(areq);
sock_kfree_s(sk, areq, areq->areqlen);
return err ? err : outlen; return err ? err : outlen;
} }

View file

@ -117,6 +117,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */ /* AIO operation */
sock_hold(sk);
areq->iocb = msg->msg_iocb; areq->iocb = msg->msg_iocb;
skcipher_request_set_callback(&areq->cra_u.skcipher_req, skcipher_request_set_callback(&areq->cra_u.skcipher_req,
CRYPTO_TFM_REQ_MAY_SLEEP, CRYPTO_TFM_REQ_MAY_SLEEP,
@ -124,6 +125,16 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
err = ctx->enc ? err = ctx->enc ?
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
/* AIO operation in progress */
if (err == -EINPROGRESS || err == -EBUSY) {
/* Remember output size that will be generated. */
areq->outlen = len;
return -EIOCBQUEUED;
}
sock_put(sk);
} else { } else {
/* Synchronous operation */ /* Synchronous operation */
skcipher_request_set_callback(&areq->cra_u.skcipher_req, skcipher_request_set_callback(&areq->cra_u.skcipher_req,
@ -137,19 +148,9 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
&ctx->completion); &ctx->completion);
} }
/* AIO operation in progress */
if (err == -EINPROGRESS) {
sock_hold(sk);
/* Remember output size that will be generated. */
areq->outlen = len;
return -EIOCBQUEUED;
}
free: free:
af_alg_free_areq_sgls(areq); af_alg_free_resources(areq);
sock_kfree_s(sk, areq, areq->areqlen);
return err ? err : len; return err ? err : len;
} }

View file

@ -522,6 +522,9 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
scatterwalk_done(&walk->in, 0, walk->total);
scatterwalk_done(&walk->out, 0, walk->total);
walk->iv = req->iv; walk->iv = req->iv;
walk->oiv = req->iv; walk->oiv = req->iv;

View file

@ -1597,32 +1597,41 @@ static int acpi_ec_add(struct acpi_device *device)
{ {
struct acpi_ec *ec = NULL; struct acpi_ec *ec = NULL;
int ret; int ret;
bool is_ecdt = false;
acpi_status status;
strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_EC_CLASS); strcpy(acpi_device_class(device), ACPI_EC_CLASS);
ec = acpi_ec_alloc(); if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
if (!ec) is_ecdt = true;
return -ENOMEM; ec = boot_ec;
if (ec_parse_device(device->handle, 0, ec, NULL) != } else {
AE_CTRL_TERMINATE) { ec = acpi_ec_alloc();
if (!ec)
return -ENOMEM;
status = ec_parse_device(device->handle, 0, ec, NULL);
if (status != AE_CTRL_TERMINATE) {
ret = -EINVAL; ret = -EINVAL;
goto err_alloc; goto err_alloc;
}
} }
if (acpi_is_boot_ec(ec)) { if (acpi_is_boot_ec(ec)) {
boot_ec_is_ecdt = false; boot_ec_is_ecdt = is_ecdt;
/* if (!is_ecdt) {
* Trust PNP0C09 namespace location rather than ECDT ID. /*
* * Trust PNP0C09 namespace location rather than
* But trust ECDT GPE rather than _GPE because of ASUS quirks, * ECDT ID. But trust ECDT GPE rather than _GPE
* so do not change boot_ec->gpe to ec->gpe. * because of ASUS quirks, so do not change
*/ * boot_ec->gpe to ec->gpe.
boot_ec->handle = ec->handle; */
acpi_handle_debug(ec->handle, "duplicated.\n"); boot_ec->handle = ec->handle;
acpi_ec_free(ec); acpi_handle_debug(ec->handle, "duplicated.\n");
ec = boot_ec; acpi_ec_free(ec);
ret = acpi_config_boot_ec(ec, ec->handle, true, false); ec = boot_ec;
}
ret = acpi_config_boot_ec(ec, ec->handle, true, is_ecdt);
} else } else
ret = acpi_ec_setup(ec, true); ret = acpi_ec_setup(ec, true);
if (ret) if (ret)
@ -1635,8 +1644,10 @@ static int acpi_ec_add(struct acpi_device *device)
ret = !!request_region(ec->command_addr, 1, "EC cmd"); ret = !!request_region(ec->command_addr, 1, "EC cmd");
WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
/* Reprobe devices depending on the EC */ if (!is_ecdt) {
acpi_walk_dep_device_list(ec->handle); /* Reprobe devices depending on the EC */
acpi_walk_dep_device_list(ec->handle);
}
acpi_handle_debug(ec->handle, "enumerated.\n"); acpi_handle_debug(ec->handle, "enumerated.\n");
return 0; return 0;
@ -1692,6 +1703,7 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
static const struct acpi_device_id ec_device_ids[] = { static const struct acpi_device_id ec_device_ids[] = {
{"PNP0C09", 0}, {"PNP0C09", 0},
{ACPI_ECDT_HID, 0},
{"", 0}, {"", 0},
}; };
@ -1764,11 +1776,14 @@ static int __init acpi_ec_ecdt_start(void)
* Note: ec->handle can be valid if this function is called after * Note: ec->handle can be valid if this function is called after
* acpi_ec_add(), hence the fast path. * acpi_ec_add(), hence the fast path.
*/ */
if (boot_ec->handle != ACPI_ROOT_OBJECT) if (boot_ec->handle == ACPI_ROOT_OBJECT) {
handle = boot_ec->handle; if (!acpi_ec_ecdt_get_handle(&handle))
else if (!acpi_ec_ecdt_get_handle(&handle)) return -ENODEV;
return -ENODEV; boot_ec->handle = handle;
return acpi_config_boot_ec(boot_ec, handle, true, true); }
/* Register to ACPI bus with PM ops attached */
return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
} }
#if 0 #if 0
@ -2020,6 +2035,12 @@ int __init acpi_ec_init(void)
/* Drivers must be started after acpi_ec_query_init() */ /* Drivers must be started after acpi_ec_query_init() */
dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
/*
* Register ECDT to ACPI bus only when PNP0C09 probe fails. This is
* useful for platforms (confirmed on ASUS X550ZE) with valid ECDT
* settings but invalid DSDT settings.
* https://bugzilla.kernel.org/show_bug.cgi?id=196847
*/
ecdt_fail = acpi_ec_ecdt_start(); ecdt_fail = acpi_ec_ecdt_start();
return ecdt_fail && dsdt_fail ? -ENODEV : 0; return ecdt_fail && dsdt_fail ? -ENODEV : 0;
} }

View file

@ -115,6 +115,7 @@ bool acpi_device_is_present(const struct acpi_device *adev);
bool acpi_device_is_battery(struct acpi_device *adev); bool acpi_device_is_battery(struct acpi_device *adev);
bool acpi_device_is_first_physical_node(struct acpi_device *adev, bool acpi_device_is_first_physical_node(struct acpi_device *adev,
const struct device *dev); const struct device *dev);
int acpi_bus_register_early_device(int type);
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------
Device Matching and Notification Device Matching and Notification

View file

@ -1024,6 +1024,9 @@ static void acpi_device_get_busid(struct acpi_device *device)
case ACPI_BUS_TYPE_SLEEP_BUTTON: case ACPI_BUS_TYPE_SLEEP_BUTTON:
strcpy(device->pnp.bus_id, "SLPF"); strcpy(device->pnp.bus_id, "SLPF");
break; break;
case ACPI_BUS_TYPE_ECDT_EC:
strcpy(device->pnp.bus_id, "ECDT");
break;
default: default:
acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer); acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
/* Clean up trailing underscores (if any) */ /* Clean up trailing underscores (if any) */
@ -1304,6 +1307,9 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
case ACPI_BUS_TYPE_SLEEP_BUTTON: case ACPI_BUS_TYPE_SLEEP_BUTTON:
acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF); acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF);
break; break;
case ACPI_BUS_TYPE_ECDT_EC:
acpi_add_id(pnp, ACPI_ECDT_HID);
break;
} }
} }
@ -2049,6 +2055,21 @@ void acpi_bus_trim(struct acpi_device *adev)
} }
EXPORT_SYMBOL_GPL(acpi_bus_trim); EXPORT_SYMBOL_GPL(acpi_bus_trim);
int acpi_bus_register_early_device(int type)
{
struct acpi_device *device = NULL;
int result;
result = acpi_add_single_object(&device, NULL,
type, ACPI_STA_DEFAULT);
if (result)
return result;
device->flags.match_driver = true;
return device_attach(&device->dev);
}
EXPORT_SYMBOL_GPL(acpi_bus_register_early_device);
static int acpi_bus_scan_fixed(void) static int acpi_bus_scan_fixed(void)
{ {
int result = 0; int result = 0;

View file

@ -427,9 +427,21 @@ static int dev_dax_fault(struct vm_fault *vmf)
return dev_dax_huge_fault(vmf, PE_SIZE_PTE); return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
} }
static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
{
struct file *filp = vma->vm_file;
struct dev_dax *dev_dax = filp->private_data;
struct dax_region *dax_region = dev_dax->region;
if (!IS_ALIGNED(addr, dax_region->align))
return -EINVAL;
return 0;
}
static const struct vm_operations_struct dax_vm_ops = { static const struct vm_operations_struct dax_vm_ops = {
.fault = dev_dax_fault, .fault = dev_dax_fault,
.huge_fault = dev_dax_huge_fault, .huge_fault = dev_dax_huge_fault,
.split = dev_dax_split,
}; };
static int dax_mmap(struct file *filp, struct vm_area_struct *vma) static int dax_mmap(struct file *filp, struct vm_area_struct *vma)

View file

@ -266,8 +266,7 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
* @dst: the destination reservation object * @dst: the destination reservation object
* @src: the source reservation object * @src: the source reservation object
* *
* Copy all fences from src to dst. Both src->lock as well as dst-lock must be * Copy all fences from src to dst. dst-lock must be held.
* held.
*/ */
int reservation_object_copy_fences(struct reservation_object *dst, int reservation_object_copy_fences(struct reservation_object *dst,
struct reservation_object *src) struct reservation_object *src)
@ -277,33 +276,62 @@ int reservation_object_copy_fences(struct reservation_object *dst,
size_t size; size_t size;
unsigned i; unsigned i;
src_list = reservation_object_get_list(src); rcu_read_lock();
src_list = rcu_dereference(src->fence);
retry:
if (src_list) { if (src_list) {
size = offsetof(typeof(*src_list), unsigned shared_count = src_list->shared_count;
shared[src_list->shared_count]);
size = offsetof(typeof(*src_list), shared[shared_count]);
rcu_read_unlock();
dst_list = kmalloc(size, GFP_KERNEL); dst_list = kmalloc(size, GFP_KERNEL);
if (!dst_list) if (!dst_list)
return -ENOMEM; return -ENOMEM;
dst_list->shared_count = src_list->shared_count; rcu_read_lock();
dst_list->shared_max = src_list->shared_count; src_list = rcu_dereference(src->fence);
for (i = 0; i < src_list->shared_count; ++i) if (!src_list || src_list->shared_count > shared_count) {
dst_list->shared[i] = kfree(dst_list);
dma_fence_get(src_list->shared[i]); goto retry;
}
dst_list->shared_count = 0;
dst_list->shared_max = shared_count;
for (i = 0; i < src_list->shared_count; ++i) {
struct dma_fence *fence;
fence = rcu_dereference(src_list->shared[i]);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
continue;
if (!dma_fence_get_rcu(fence)) {
kfree(dst_list);
src_list = rcu_dereference(src->fence);
goto retry;
}
if (dma_fence_is_signaled(fence)) {
dma_fence_put(fence);
continue;
}
dst_list->shared[dst_list->shared_count++] = fence;
}
} else { } else {
dst_list = NULL; dst_list = NULL;
} }
new = dma_fence_get_rcu_safe(&src->fence_excl);
rcu_read_unlock();
kfree(dst->staged); kfree(dst->staged);
dst->staged = NULL; dst->staged = NULL;
src_list = reservation_object_get_list(dst); src_list = reservation_object_get_list(dst);
old = reservation_object_get_excl(dst); old = reservation_object_get_excl(dst);
new = reservation_object_get_excl(src);
dma_fence_get(new);
preempt_disable(); preempt_disable();
write_seqcount_begin(&dst->seq); write_seqcount_begin(&dst->seq);

View file

@ -1536,18 +1536,14 @@ struct amdgpu_device {
/* sdma */ /* sdma */
struct amdgpu_sdma sdma; struct amdgpu_sdma sdma;
union { /* uvd */
struct { struct amdgpu_uvd uvd;
/* uvd */
struct amdgpu_uvd uvd;
/* vce */ /* vce */
struct amdgpu_vce vce; struct amdgpu_vce vce;
};
/* vcn */ /* vcn */
struct amdgpu_vcn vcn; struct amdgpu_vcn vcn;
};
/* firmwares */ /* firmwares */
struct amdgpu_firmware firmware; struct amdgpu_firmware firmware;

View file

@ -1766,34 +1766,32 @@ bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
return true; return true;
} }
/* Atom needs data in little endian format /* Atom needs data in little endian format so swap as appropriate when copying
* so swap as appropriate when copying data to * data to or from atom. Note that atom operates on dw units.
* or from atom. Note that atom operates on *
* dw units. * Use to_le=true when sending data to atom and provide at least
* ALIGN(num_bytes,4) bytes in the dst buffer.
*
* Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
* byes in the src buffer.
*/ */
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{ {
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ u32 src_tmp[5], dst_tmp[5];
u32 *dst32, *src32;
int i; int i;
u8 align_num_bytes = ALIGN(num_bytes, 4);
memcpy(src_tmp, src, num_bytes);
src32 = (u32 *)src_tmp;
dst32 = (u32 *)dst_tmp;
if (to_le) { if (to_le) {
for (i = 0; i < ((num_bytes + 3) / 4); i++) memcpy(src_tmp, src, num_bytes);
dst32[i] = cpu_to_le32(src32[i]); for (i = 0; i < align_num_bytes / 4; i++)
memcpy(dst, dst_tmp, num_bytes); dst_tmp[i] = cpu_to_le32(src_tmp[i]);
memcpy(dst, dst_tmp, align_num_bytes);
} else { } else {
u8 dws = num_bytes & ~3; memcpy(src_tmp, src, align_num_bytes);
for (i = 0; i < ((num_bytes + 3) / 4); i++) for (i = 0; i < align_num_bytes / 4; i++)
dst32[i] = le32_to_cpu(src32[i]); dst_tmp[i] = le32_to_cpu(src_tmp[i]);
memcpy(dst, dst_tmp, dws); memcpy(dst, dst_tmp, num_bytes);
if (num_bytes % 4) {
for (i = 0; i < (num_bytes % 4); i++)
dst[dws+i] = dst_tmp[dws+i];
}
} }
#else #else
memcpy(dst, src, num_bytes); memcpy(dst, src, num_bytes);

View file

@ -59,12 +59,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size)
return false; return false;
} }
tmp = bios[0x18] | (bios[0x19] << 8);
if (bios[tmp + 0x14] != 0x0) {
DRM_INFO("Not an x86 BIOS ROM\n");
return false;
}
bios_header_start = bios[0x48] | (bios[0x49] << 8); bios_header_start = bios[0x48] | (bios[0x49] << 8);
if (!bios_header_start) { if (!bios_header_start) {
DRM_INFO("Can't locate bios header\n"); DRM_INFO("Can't locate bios header\n");

View file

@ -391,6 +391,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL, &bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv, &amdgpu_ttm_bo_destroy); acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
if (unlikely(r != 0))
return r;
bytes_moved = atomic64_read(&adev->num_bytes_moved) - bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved; initial_bytes_moved;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size && if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
@ -400,9 +403,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
else else
amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0); amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
if (unlikely(r != 0))
return r;
if (kernel) if (kernel)
bo->tbo.priority = 1; bo->tbo.priority = 1;

View file

@ -647,7 +647,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t allocated = 0; uint32_t allocated = 0;
uint32_t tmp, handle = 0; uint32_t tmp, handle = 0;
uint32_t *size = &tmp; uint32_t *size = &tmp;
int i, r, idx = 0; int i, r = 0, idx = 0;
p->job->vm = NULL; p->job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);

View file

@ -1201,7 +1201,7 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
int amdgpu_vm_update_directories(struct amdgpu_device *adev, int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm) struct amdgpu_vm *vm)
{ {
int r; int r = 0;
r = amdgpu_vm_update_level(adev, vm, &vm->root, 0); r = amdgpu_vm_update_level(adev, vm, &vm->root, 0);
if (r) if (r)
@ -2586,7 +2586,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{ {
struct amdgpu_bo_va_mapping *mapping, *tmp; struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
int i; struct amdgpu_bo *root;
int i, r;
amd_sched_entity_fini(vm->entity.sched, &vm->entity); amd_sched_entity_fini(vm->entity.sched, &vm->entity);
@ -2609,7 +2610,15 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_vm_free_mapping(adev, vm, mapping, NULL); amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
} }
amdgpu_vm_free_levels(&vm->root); root = amdgpu_bo_ref(vm->root.bo);
r = amdgpu_bo_reserve(root, true);
if (r) {
dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
} else {
amdgpu_vm_free_levels(&vm->root);
amdgpu_bo_unreserve(root);
}
amdgpu_bo_unref(&root);
dma_fence_put(vm->last_dir_update); dma_fence_put(vm->last_dir_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
amdgpu_vm_free_reserved_vmid(adev, vm, i); amdgpu_vm_free_reserved_vmid(adev, vm, i);

View file

@ -395,7 +395,16 @@ static int gmc_v9_0_early_init(void *handle)
static int gmc_v9_0_late_init(void *handle) static int gmc_v9_0_late_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 }; /*
* The latest engine allocation on gfx9 is:
* Engine 0, 1: idle
* Engine 2, 3: firmware
* Engine 4~13: amdgpu ring, subject to change when ring number changes
* Engine 14~15: idle
* Engine 16: kfd tlb invalidation
* Engine 17: Gart flushes
*/
unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
unsigned i; unsigned i;
for(i = 0; i < adev->num_rings; ++i) { for(i = 0; i < adev->num_rings; ++i) {
@ -408,9 +417,9 @@ static int gmc_v9_0_late_init(void *handle)
ring->funcs->vmhub); ring->funcs->vmhub);
} }
/* Engine 17 is used for GART flushes */ /* Engine 16 is used for KFD and 17 for GART flushes */
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
BUG_ON(vm_inv_eng[i] > 17); BUG_ON(vm_inv_eng[i] > 16);
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
} }

View file

@ -279,10 +279,7 @@ static void soc15_init_golden_registers(struct amdgpu_device *adev)
} }
static u32 soc15_get_xclk(struct amdgpu_device *adev) static u32 soc15_get_xclk(struct amdgpu_device *adev)
{ {
if (adev->asic_type == CHIP_VEGA10) return adev->clock.spll.reference_freq;
return adev->clock.spll.reference_freq/4;
else
return adev->clock.spll.reference_freq;
} }

View file

@ -1175,7 +1175,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev) static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs; adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
} }

View file

@ -850,9 +850,9 @@ static int init_over_drive_limits(
const ATOM_Tonga_POWERPLAYTABLE *powerplay_table) const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
{ {
hwmgr->platform_descriptor.overdriveLimit.engineClock = hwmgr->platform_descriptor.overdriveLimit.engineClock =
le16_to_cpu(powerplay_table->ulMaxODEngineClock); le32_to_cpu(powerplay_table->ulMaxODEngineClock);
hwmgr->platform_descriptor.overdriveLimit.memoryClock = hwmgr->platform_descriptor.overdriveLimit.memoryClock =
le16_to_cpu(powerplay_table->ulMaxODMemoryClock); le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
hwmgr->platform_descriptor.minOverdriveVDDC = 0; hwmgr->platform_descriptor.minOverdriveVDDC = 0;
hwmgr->platform_descriptor.maxOverdriveVDDC = 0; hwmgr->platform_descriptor.maxOverdriveVDDC = 0;

View file

@ -4809,7 +4809,8 @@ void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
const struct drm_display_mode *mode, const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range, enum hdmi_quantization_range rgb_quant_range,
bool rgb_quant_range_selectable) bool rgb_quant_range_selectable,
bool is_hdmi2_sink)
{ {
/* /*
* CEA-861: * CEA-861:
@ -4833,8 +4834,15 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
* YQ-field to match the RGB Quantization Range being transmitted * YQ-field to match the RGB Quantization Range being transmitted
* (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB, * (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB,
* set YQ=1) and the Sink shall ignore the YQ-field." * set YQ=1) and the Sink shall ignore the YQ-field."
*
* Unfortunate certain sinks (eg. VIZ Model 67/E261VA) get confused
* by non-zero YQ when receiving RGB. There doesn't seem to be any
* good way to tell which version of CEA-861 the sink supports, so
* we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
* on on CEA-861-F.
*/ */
if (rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED) if (!is_hdmi2_sink ||
rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
frame->ycc_quantization_range = frame->ycc_quantization_range =
HDMI_YCC_QUANTIZATION_RANGE_LIMITED; HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
else else

View file

@ -1809,6 +1809,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
DRM_INFO("Cannot find any crtc or sizes\n"); DRM_INFO("Cannot find any crtc or sizes\n");
/* First time: disable all crtc's.. */
if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master))
restore_fbdev_mode(fb_helper);
return -EAGAIN; return -EAGAIN;
} }

View file

@ -311,8 +311,8 @@ u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
u32 vblank; u32 vblank;
unsigned long flags; unsigned long flags;
WARN(!dev->driver->get_vblank_timestamp, WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp,
"This function requires support for accurate vblank timestamps."); "This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags); spin_lock_irqsave(&dev->vblank_time_lock, flags);
@ -869,7 +869,7 @@ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
assert_spin_locked(&dev->event_lock); assert_spin_locked(&dev->event_lock);
e->pipe = pipe; e->pipe = pipe;
e->event.sequence = drm_vblank_count(dev, pipe); e->event.sequence = drm_crtc_accurate_vblank_count(crtc) + 1;
e->event.crtc_id = crtc->base.id; e->event.crtc_id = crtc->base.id;
list_add_tail(&e->base.link, &dev->vblank_event_list); list_add_tail(&e->base.link, &dev->vblank_event_list);
} }

View file

@ -102,7 +102,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
{ {
struct drm_encoder *encoder = &fsl_dev->encoder; struct drm_encoder *encoder = &fsl_dev->encoder;
struct drm_connector *connector = &fsl_dev->connector.base; struct drm_connector *connector = &fsl_dev->connector.base;
struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
int ret; int ret;
fsl_dev->connector.encoder = encoder; fsl_dev->connector.encoder = encoder;
@ -122,10 +121,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
if (ret < 0) if (ret < 0)
goto err_sysfs; goto err_sysfs;
drm_object_property_set_value(&connector->base,
mode_config->dpms_property,
DRM_MODE_DPMS_OFF);
ret = drm_panel_attach(panel, connector); ret = drm_panel_attach(panel, connector);
if (ret) { if (ret) {
dev_err(fsl_dev->dev, "failed to attach panel\n"); dev_err(fsl_dev->dev, "failed to attach panel\n");

View file

@ -534,9 +534,12 @@ static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
{ {
struct ade_crtc *acrtc = to_ade_crtc(crtc); struct ade_crtc *acrtc = to_ade_crtc(crtc);
struct ade_hw_ctx *ctx = acrtc->ctx; struct ade_hw_ctx *ctx = acrtc->ctx;
struct drm_display_mode *mode = &crtc->state->mode;
struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
if (!ctx->power_on) if (!ctx->power_on)
(void)ade_power_up(ctx); (void)ade_power_up(ctx);
ade_ldi_set_mode(acrtc, mode, adj_mode);
} }
static void ade_crtc_atomic_flush(struct drm_crtc *crtc, static void ade_crtc_atomic_flush(struct drm_crtc *crtc,

View file

@ -311,9 +311,9 @@ static inline int gtt_set_entry64(void *pt,
#define GTT_HAW 46 #define GTT_HAW 46
#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30) #define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21) #define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12) #define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{ {

View file

@ -2598,6 +2598,8 @@ static int intel_runtime_resume(struct device *kdev)
ret = vlv_resume_prepare(dev_priv, true); ret = vlv_resume_prepare(dev_priv, true);
} }
intel_uncore_runtime_resume(dev_priv);
/* /*
* No point of rolling back things in case of an error, as the best * No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM). * we can do is to hope that things will still work (and disable RPM).

View file

@ -694,10 +694,8 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
/* Due to peculiar init order wrt to hpd handling this is separate. */ /* Due to peculiar init order wrt to hpd handling this is separate. */
if (drm_fb_helper_initial_config(&ifbdev->helper, if (drm_fb_helper_initial_config(&ifbdev->helper,
ifbdev->preferred_bpp)) { ifbdev->preferred_bpp))
intel_fbdev_unregister(to_i915(ifbdev->helper.dev)); intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
intel_fbdev_fini(to_i915(ifbdev->helper.dev));
}
} }
void intel_fbdev_initial_config_async(struct drm_device *dev) void intel_fbdev_initial_config_async(struct drm_device *dev)
@ -797,7 +795,11 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
{ {
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
if (ifbdev) if (!ifbdev)
return;
intel_fbdev_sync(ifbdev);
if (ifbdev->vma)
drm_fb_helper_hotplug_event(&ifbdev->helper); drm_fb_helper_hotplug_event(&ifbdev->helper);
} }

View file

@ -481,7 +481,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
crtc_state->limited_color_range ? crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL, HDMI_QUANTIZATION_RANGE_FULL,
intel_hdmi->rgb_quant_range_selectable); intel_hdmi->rgb_quant_range_selectable,
is_hdmi2_sink);
/* TODO: handle pixel repetition for YCBCR420 outputs */ /* TODO: handle pixel repetition for YCBCR420 outputs */
intel_write_infoframe(encoder, crtc_state, &frame); intel_write_infoframe(encoder, crtc_state, &frame);

View file

@ -438,7 +438,9 @@ static bool
gmbus_is_index_read(struct i2c_msg *msgs, int i, int num) gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
{ {
return (i + 1 < num && return (i + 1 < num &&
!(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 && msgs[i].addr == msgs[i + 1].addr &&
!(msgs[i].flags & I2C_M_RD) &&
(msgs[i].len == 1 || msgs[i].len == 2) &&
(msgs[i + 1].flags & I2C_M_RD)); (msgs[i + 1].flags & I2C_M_RD));
} }

View file

@ -434,6 +434,12 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
i915_check_and_clear_faults(dev_priv); i915_check_and_clear_faults(dev_priv);
} }
void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
{
iosf_mbi_register_pmic_bus_access_notifier(
&dev_priv->uncore.pmic_bus_access_nb);
}
void intel_uncore_sanitize(struct drm_i915_private *dev_priv) void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
{ {
i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
@ -1171,8 +1177,15 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
* bus, which will be busy after this notification, leading to: * bus, which will be busy after this notification, leading to:
* "render: timed out waiting for forcewake ack request." * "render: timed out waiting for forcewake ack request."
* errors. * errors.
*
* The notifier is unregistered during intel_runtime_suspend(),
* so it's ok to access the HW here without holding a RPM
* wake reference -> disable wakeref asserts for the time of
* the access.
*/ */
disable_rpm_wakeref_asserts(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
enable_rpm_wakeref_asserts(dev_priv);
break; break;
case MBI_PMIC_BUS_ACCESS_END: case MBI_PMIC_BUS_ACCESS_END:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);

View file

@ -121,6 +121,7 @@ bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv
void intel_uncore_fini(struct drm_i915_private *dev_priv); void intel_uncore_fini(struct drm_i915_private *dev_priv);
void intel_uncore_suspend(struct drm_i915_private *dev_priv); void intel_uncore_suspend(struct drm_i915_private *dev_priv);
void intel_uncore_resume_early(struct drm_i915_private *dev_priv); void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);

View file

@ -566,8 +566,8 @@ static int dpi_verify_pll(struct dss_pll *pll)
} }
static const struct soc_device_attribute dpi_soc_devices[] = { static const struct soc_device_attribute dpi_soc_devices[] = {
{ .family = "OMAP3[456]*" }, { .machine = "OMAP3[456]*" },
{ .family = "[AD]M37*" }, { .machine = "[AD]M37*" },
{ /* sentinel */ } { /* sentinel */ }
}; };

View file

@ -889,25 +889,36 @@ struct hdmi4_features {
bool audio_use_mclk; bool audio_use_mclk;
}; };
static const struct hdmi4_features hdmi4_es1_features = { static const struct hdmi4_features hdmi4430_es1_features = {
.cts_swmode = false, .cts_swmode = false,
.audio_use_mclk = false, .audio_use_mclk = false,
}; };
static const struct hdmi4_features hdmi4_es2_features = { static const struct hdmi4_features hdmi4430_es2_features = {
.cts_swmode = true, .cts_swmode = true,
.audio_use_mclk = false, .audio_use_mclk = false,
}; };
static const struct hdmi4_features hdmi4_es3_features = { static const struct hdmi4_features hdmi4_features = {
.cts_swmode = true, .cts_swmode = true,
.audio_use_mclk = true, .audio_use_mclk = true,
}; };
static const struct soc_device_attribute hdmi4_soc_devices[] = { static const struct soc_device_attribute hdmi4_soc_devices[] = {
{ .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features }, {
{ .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features }, .machine = "OMAP4430",
{ .family = "OMAP4", .data = &hdmi4_es3_features }, .revision = "ES1.?",
.data = &hdmi4430_es1_features,
},
{
.machine = "OMAP4430",
.revision = "ES2.?",
.data = &hdmi4430_es2_features,
},
{
.family = "OMAP4",
.data = &hdmi4_features,
},
{ /* sentinel */ } { /* sentinel */ }
}; };

View file

@ -369,6 +369,7 @@ static int panel_simple_remove(struct device *dev)
drm_panel_remove(&panel->base); drm_panel_remove(&panel->base);
panel_simple_disable(&panel->base); panel_simple_disable(&panel->base);
panel_simple_unprepare(&panel->base);
if (panel->ddc) if (panel->ddc)
put_device(&panel->ddc->dev); put_device(&panel->ddc->dev);
@ -384,6 +385,7 @@ static void panel_simple_shutdown(struct device *dev)
struct panel_simple *panel = dev_get_drvdata(dev); struct panel_simple *panel = dev_get_drvdata(dev);
panel_simple_disable(&panel->base); panel_simple_disable(&panel->base);
panel_simple_unprepare(&panel->base);
} }
static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = { static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = {

View file

@ -45,34 +45,32 @@ static char *pre_emph_names[] = {
/***** radeon AUX functions *****/ /***** radeon AUX functions *****/
/* Atom needs data in little endian format /* Atom needs data in little endian format so swap as appropriate when copying
* so swap as appropriate when copying data to * data to or from atom. Note that atom operates on dw units.
* or from atom. Note that atom operates on *
* dw units. * Use to_le=true when sending data to atom and provide at least
* ALIGN(num_bytes,4) bytes in the dst buffer.
*
* Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
* byes in the src buffer.
*/ */
void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{ {
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ u32 src_tmp[5], dst_tmp[5];
u32 *dst32, *src32;
int i; int i;
u8 align_num_bytes = ALIGN(num_bytes, 4);
memcpy(src_tmp, src, num_bytes);
src32 = (u32 *)src_tmp;
dst32 = (u32 *)dst_tmp;
if (to_le) { if (to_le) {
for (i = 0; i < ((num_bytes + 3) / 4); i++) memcpy(src_tmp, src, num_bytes);
dst32[i] = cpu_to_le32(src32[i]); for (i = 0; i < align_num_bytes / 4; i++)
memcpy(dst, dst_tmp, num_bytes); dst_tmp[i] = cpu_to_le32(src_tmp[i]);
memcpy(dst, dst_tmp, align_num_bytes);
} else { } else {
u8 dws = num_bytes & ~3; memcpy(src_tmp, src, align_num_bytes);
for (i = 0; i < ((num_bytes + 3) / 4); i++) for (i = 0; i < align_num_bytes / 4; i++)
dst32[i] = le32_to_cpu(src32[i]); dst_tmp[i] = le32_to_cpu(src_tmp[i]);
memcpy(dst, dst_tmp, dws); memcpy(dst, dst_tmp, num_bytes);
if (num_bytes % 4) {
for (i = 0; i < (num_bytes % 4); i++)
dst[dws+i] = dst_tmp[dws+i];
}
} }
#else #else
memcpy(dst, src, num_bytes); memcpy(dst, src, num_bytes);

View file

@ -245,7 +245,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
} }
info->par = rfbdev; info->par = rfbdev;
info->skip_vt_switch = true;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) { if (ret) {

View file

@ -24,6 +24,7 @@
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/of_graph.h> #include <linux/of_graph.h>
#include <linux/math64.h>
#include "tilcdc_drv.h" #include "tilcdc_drv.h"
#include "tilcdc_regs.h" #include "tilcdc_regs.h"
@ -48,6 +49,7 @@ struct tilcdc_crtc {
unsigned int lcd_fck_rate; unsigned int lcd_fck_rate;
ktime_t last_vblank; ktime_t last_vblank;
unsigned int hvtotal_us;
struct drm_framebuffer *curr_fb; struct drm_framebuffer *curr_fb;
struct drm_framebuffer *next_fb; struct drm_framebuffer *next_fb;
@ -292,6 +294,12 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
LCDC_V2_CORE_CLK_EN); LCDC_V2_CORE_CLK_EN);
} }
uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode)
{
return (uint) div_u64(1000llu * mode->htotal * mode->vtotal,
mode->clock);
}
static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
{ {
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
@ -459,6 +467,9 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
drm_framebuffer_reference(fb); drm_framebuffer_reference(fb);
crtc->hwmode = crtc->state->adjusted_mode; crtc->hwmode = crtc->state->adjusted_mode;
tilcdc_crtc->hvtotal_us =
tilcdc_mode_hvtotal(&crtc->hwmode);
} }
static void tilcdc_crtc_enable(struct drm_crtc *crtc) static void tilcdc_crtc_enable(struct drm_crtc *crtc)
@ -648,7 +659,7 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
1000000 / crtc->hwmode.vrefresh); tilcdc_crtc->hvtotal_us);
tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)

View file

@ -150,8 +150,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
ttm_tt_destroy(bo->ttm); ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count); atomic_dec(&bo->glob->bo_count);
dma_fence_put(bo->moving); dma_fence_put(bo->moving);
if (bo->resv == &bo->ttm_resv) reservation_object_fini(&bo->ttm_resv);
reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex); mutex_destroy(&bo->wu_mutex);
if (bo->destroy) if (bo->destroy)
bo->destroy(bo); bo->destroy(bo);
@ -402,14 +401,11 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
if (bo->resv == &bo->ttm_resv) if (bo->resv == &bo->ttm_resv)
return 0; return 0;
reservation_object_init(&bo->ttm_resv);
BUG_ON(!reservation_object_trylock(&bo->ttm_resv)); BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv); r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
if (r) { if (r)
reservation_object_unlock(&bo->ttm_resv); reservation_object_unlock(&bo->ttm_resv);
reservation_object_fini(&bo->ttm_resv);
}
return r; return r;
} }
@ -440,28 +436,30 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
struct ttm_bo_global *glob = bo->glob; struct ttm_bo_global *glob = bo->glob;
int ret; int ret;
ret = ttm_bo_individualize_resv(bo);
if (ret) {
/* Last resort, if we fail to allocate memory for the
* fences block for the BO to become idle
*/
reservation_object_wait_timeout_rcu(bo->resv, true, false,
30 * HZ);
spin_lock(&glob->lru_lock);
goto error;
}
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
ret = __ttm_bo_reserve(bo, false, true, NULL); ret = __ttm_bo_reserve(bo, false, true, NULL);
if (!ret) { if (!ret) {
if (!ttm_bo_wait(bo, false, true)) { if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
ttm_bo_del_from_lru(bo); ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ttm_bo_cleanup_memtype_use(bo); if (bo->resv != &bo->ttm_resv)
reservation_object_unlock(&bo->ttm_resv);
return;
}
ret = ttm_bo_individualize_resv(bo);
if (ret) {
/* Last resort, if we fail to allocate memory for the
* fences block for the BO to become idle and free it.
*/
spin_unlock(&glob->lru_lock);
ttm_bo_wait(bo, true, true);
ttm_bo_cleanup_memtype_use(bo); ttm_bo_cleanup_memtype_use(bo);
return; return;
} }
ttm_bo_flush_all_fences(bo); ttm_bo_flush_all_fences(bo);
/* /*
@ -474,11 +472,12 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_add_to_lru(bo); ttm_bo_add_to_lru(bo);
} }
if (bo->resv != &bo->ttm_resv)
reservation_object_unlock(&bo->ttm_resv);
__ttm_bo_unreserve(bo); __ttm_bo_unreserve(bo);
} }
if (bo->resv != &bo->ttm_resv)
reservation_object_unlock(&bo->ttm_resv);
error:
kref_get(&bo->list_kref); kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy); list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
@ -1203,8 +1202,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
lockdep_assert_held(&bo->resv->lock.base); lockdep_assert_held(&bo->resv->lock.base);
} else { } else {
bo->resv = &bo->ttm_resv; bo->resv = &bo->ttm_resv;
reservation_object_init(&bo->ttm_resv);
} }
reservation_object_init(&bo->ttm_resv);
atomic_inc(&bo->glob->bo_count); atomic_inc(&bo->glob->bo_count);
drm_vma_node_reset(&bo->vma_node); drm_vma_node_reset(&bo->vma_node);
bo->priority = 0; bo->priority = 0;

View file

@ -474,6 +474,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap); INIT_LIST_HEAD(&fbo->swap);
INIT_LIST_HEAD(&fbo->io_reserve_lru); INIT_LIST_HEAD(&fbo->io_reserve_lru);
mutex_init(&fbo->wu_mutex);
fbo->moving = NULL; fbo->moving = NULL;
drm_vma_node_reset(&fbo->vma_node); drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0); atomic_set(&fbo->cpu_writers, 0);

View file

@ -433,7 +433,8 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
vc4_encoder->limited_rgb_range ? vc4_encoder->limited_rgb_range ?
HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL, HDMI_QUANTIZATION_RANGE_FULL,
vc4_encoder->rgb_range_selectable); vc4_encoder->rgb_range_selectable,
false);
vc4_hdmi_write_infoframe(encoder, &frame); vc4_hdmi_write_infoframe(encoder, &frame);
} }

View file

@ -22,6 +22,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#include <linux/bitops.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -45,6 +46,7 @@ static const unsigned short normal_i2c[] = {
#define JC42_REG_TEMP 0x05 #define JC42_REG_TEMP 0x05
#define JC42_REG_MANID 0x06 #define JC42_REG_MANID 0x06
#define JC42_REG_DEVICEID 0x07 #define JC42_REG_DEVICEID 0x07
#define JC42_REG_SMBUS 0x22 /* NXP and Atmel, possibly others? */
/* Status bits in temperature register */ /* Status bits in temperature register */
#define JC42_ALARM_CRIT_BIT 15 #define JC42_ALARM_CRIT_BIT 15
@ -75,6 +77,9 @@ static const unsigned short normal_i2c[] = {
#define GT_MANID 0x1c68 /* Giantec */ #define GT_MANID 0x1c68 /* Giantec */
#define GT_MANID2 0x132d /* Giantec, 2nd mfg ID */ #define GT_MANID2 0x132d /* Giantec, 2nd mfg ID */
/* SMBUS register */
#define SMBUS_STMOUT BIT(7) /* SMBus time-out, active low */
/* Supported chips */ /* Supported chips */
/* Analog Devices */ /* Analog Devices */
@ -495,6 +500,22 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
data->extended = !!(cap & JC42_CAP_RANGE); data->extended = !!(cap & JC42_CAP_RANGE);
if (device_property_read_bool(dev, "smbus-timeout-disable")) {
int smbus;
/*
* Not all chips support this register, but from a
* quick read of various datasheets no chip appears
* incompatible with the below attempt to disable
* the timeout. And the whole thing is opt-in...
*/
smbus = i2c_smbus_read_word_swapped(client, JC42_REG_SMBUS);
if (smbus < 0)
return smbus;
i2c_smbus_write_word_swapped(client, JC42_REG_SMBUS,
smbus | SMBUS_STMOUT);
}
config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG); config = i2c_smbus_read_word_swapped(client, JC42_REG_CONFIG);
if (config < 0) if (config < 0)
return config; return config;

View file

@ -1617,6 +1617,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Default timeout in interrupt mode: 200 ms */ /* Default timeout in interrupt mode: 200 ms */
priv->adapter.timeout = HZ / 5; priv->adapter.timeout = HZ / 5;
if (dev->irq == IRQ_NOTCONNECTED)
priv->features &= ~FEATURE_IRQ;
if (priv->features & FEATURE_IRQ) { if (priv->features & FEATURE_IRQ) {
u16 pcictl, pcists; u16 pcictl, pcists;

View file

@ -191,7 +191,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
sg_list_start = umem->sg_head.sgl; sg_list_start = umem->sg_head.sgl;
while (npages) { while (npages) {
ret = get_user_pages(cur_base, ret = get_user_pages_longterm(cur_base,
min_t(unsigned long, npages, min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)), PAGE_SIZE / sizeof (struct page *)),
gup_flags, page_list, vma_list); gup_flags, page_list, vma_list);

View file

@ -229,7 +229,16 @@ static void recv_handler(struct ib_mad_agent *agent,
packet->mad.hdr.status = 0; packet->mad.hdr.status = 0;
packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len;
packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid); /*
* On OPA devices it is okay to lose the upper 16 bits of LID as this
* information is obtained elsewhere. Mask off the upper 16 bits.
*/
if (agent->device->port_immutable[agent->port_num].core_cap_flags &
RDMA_CORE_PORT_INTEL_OPA)
packet->mad.hdr.lid = ib_lid_be16(0xFFFF &
mad_recv_wc->wc->slid);
else
packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
packet->mad.hdr.sl = mad_recv_wc->wc->sl; packet->mad.hdr.sl = mad_recv_wc->wc->sl;
packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;

View file

@ -4293,7 +4293,6 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
const struct ib_wc *in_wc) const struct ib_wc *in_wc)
{ {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u16 slid = ib_lid_cpu16(in_wc->slid);
u16 pkey; u16 pkey;
if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys)) if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
@ -4320,7 +4319,11 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
*/ */
if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
return 0; return 0;
ingress_pkey_table_fail(ppd, pkey, slid); /*
* On OPA devices it is okay to lose the upper 16 bits of LID as this
* information is obtained elsewhere. Mask off the upper 16 bits.
*/
ingress_pkey_table_fail(ppd, pkey, ib_lid_cpu16(0xFFFF & in_wc->slid));
return 1; return 1;
} }

View file

@ -480,7 +480,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
if (b == -1) if (b == -1)
goto err; goto err;
k->ptr[i] = PTR(ca->buckets[b].gen, k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
bucket_to_sector(c, b), bucket_to_sector(c, b),
ca->sb.nr_this_dev); ca->sb.nr_this_dev);

View file

@ -585,7 +585,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
return false; return false;
for (i = 0; i < KEY_PTRS(l); i++) for (i = 0; i < KEY_PTRS(l); i++)
if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
return false; return false;

View file

@ -507,7 +507,7 @@ static void journal_reclaim(struct cache_set *c)
continue; continue;
ja->cur_idx = next; ja->cur_idx = next;
k->ptr[n++] = PTR(0, k->ptr[n++] = MAKE_PTR(0,
bucket_to_sector(c, ca->sb.d[ja->cur_idx]), bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
ca->sb.nr_this_dev); ca->sb.nr_this_dev);
} }

View file

@ -699,7 +699,14 @@ static void cached_dev_read_error(struct closure *cl)
struct search *s = container_of(cl, struct search, cl); struct search *s = container_of(cl, struct search, cl);
struct bio *bio = &s->bio.bio; struct bio *bio = &s->bio.bio;
if (s->recoverable) { /*
* If read request hit dirty data (s->read_dirty_data is true),
* then recovery a failed read request from cached device may
* get a stale data back. So read failure recovery is only
* permitted when read request hit clean data in cache device,
* or when cache read race happened.
*/
if (s->recoverable && !s->read_dirty_data) {
/* Retry from the backing device: */ /* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio); trace_bcache_read_retry(s->orig_bio);

View file

@ -1816,6 +1816,12 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
BUG_ON(file && mddev->bitmap_info.offset); BUG_ON(file && mddev->bitmap_info.offset);
if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
mdname(mddev));
return ERR_PTR(-EBUSY);
}
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap) if (!bitmap)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);

View file

@ -6362,7 +6362,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
break; break;
} }
} }
if (has_journal) { if (has_journal || mddev->bitmap) {
export_rdev(rdev); export_rdev(rdev);
return -EBUSY; return -EBUSY;
} }

View file

@ -7156,6 +7156,13 @@ static int raid5_run(struct mddev *mddev)
min_offset_diff = diff; min_offset_diff = diff;
} }
if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) &&
(mddev->bitmap_info.offset || mddev->bitmap_info.file)) {
pr_notice("md/raid:%s: array cannot have both journal and bitmap\n",
mdname(mddev));
return -EINVAL;
}
if (mddev->reshape_position != MaxSector) { if (mddev->reshape_position != MaxSector) {
/* Check that we can continue the reshape. /* Check that we can continue the reshape.
* Difficulties arise if the stripe we would write to * Difficulties arise if the stripe we would write to

View file

@ -185,12 +185,13 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n", dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages); data, size, dma->nr_pages);
err = get_user_pages(data & PAGE_MASK, dma->nr_pages, err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages,
flags, dma->pages, NULL); flags, dma->pages, NULL);
if (err != dma->nr_pages) { if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0; dma->nr_pages = (err >= 0) ? err : 0;
dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages); dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err,
dma->nr_pages);
return err < 0 ? err : -EINVAL; return err < 0 ? err : -EINVAL;
} }
return 0; return 0;

View file

@ -2043,6 +2043,9 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
/* There should only be one entry, but go through the list /* There should only be one entry, but go through the list
* anyway * anyway
*/ */
if (afu->phb == NULL)
return result;
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
if (!afu_dev->driver) if (!afu_dev->driver)
continue; continue;
@ -2084,8 +2087,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
* Tell the AFU drivers; but we don't care what they * Tell the AFU drivers; but we don't care what they
* say, we're going away. * say, we're going away.
*/ */
if (afu->phb != NULL) cxl_vphb_error_detected(afu, state);
cxl_vphb_error_detected(afu, state);
} }
return PCI_ERS_RESULT_DISCONNECT; return PCI_ERS_RESULT_DISCONNECT;
} }
@ -2225,6 +2227,9 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
if (cxl_afu_select_best_mode(afu)) if (cxl_afu_select_best_mode(afu))
goto err; goto err;
if (afu->phb == NULL)
continue;
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
/* Reset the device context. /* Reset the device context.
* TODO: make this less disruptive * TODO: make this less disruptive
@ -2287,6 +2292,9 @@ static void cxl_pci_resume(struct pci_dev *pdev)
for (i = 0; i < adapter->slices; i++) { for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i]; afu = adapter->afu[i];
if (afu->phb == NULL)
continue;
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
if (afu_dev->driver && afu_dev->driver->err_handler && if (afu_dev->driver && afu_dev->driver->err_handler &&
afu_dev->driver->err_handler->resume) afu_dev->driver->err_handler->resume)

View file

@ -365,7 +365,8 @@ static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf,
memset(msg, 0, sizeof(msg)); memset(msg, 0, sizeof(msg));
msg[0].addr = client->addr; msg[0].addr = client->addr;
msg[0].buf = addrbuf; msg[0].buf = addrbuf;
addrbuf[0] = 0x90 + offset; /* EUI-48 starts from 0x9a, EUI-64 from 0x98 */
addrbuf[0] = 0xa0 - at24->chip.byte_len + offset;
msg[0].len = 1; msg[0].len = 1;
msg[1].addr = client->addr; msg[1].addr = client->addr;
msg[1].flags = I2C_M_RD; msg[1].flags = I2C_M_RD;
@ -506,6 +507,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
if (unlikely(!count)) if (unlikely(!count))
return count; return count;
if (off + count > at24->chip.byte_len)
return -EINVAL;
/* /*
* Read data from chip, protecting against concurrent updates * Read data from chip, protecting against concurrent updates
* from this host, but not from other I2C masters. * from this host, but not from other I2C masters.
@ -538,6 +542,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
if (unlikely(!count)) if (unlikely(!count))
return -EINVAL; return -EINVAL;
if (off + count > at24->chip.byte_len)
return -EINVAL;
/* /*
* Write data to chip, protecting against concurrent updates * Write data to chip, protecting against concurrent updates
* from this host, but not from other I2C masters. * from this host, but not from other I2C masters.
@ -631,6 +638,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev_warn(&client->dev, dev_warn(&client->dev,
"page_size looks suspicious (no power of 2)!\n"); "page_size looks suspicious (no power of 2)!\n");
/*
* REVISIT: the size of the EUI-48 byte array is 6 in at24mac402, while
* the call to ilog2() in AT24_DEVICE_MAGIC() rounds it down to 4.
*
* Eventually we'll get rid of the magic values altoghether in favor of
* real structs, but for now just manually set the right size.
*/
if (chip.flags & AT24_FLAG_MAC && chip.byte_len == 4)
chip.byte_len = 6;
/* Use I2C operations unless we're stuck with SMBus extensions. */ /* Use I2C operations unless we're stuck with SMBus extensions. */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
if (chip.flags & AT24_FLAG_ADDR16) if (chip.flags & AT24_FLAG_ADDR16)

View file

@ -119,6 +119,10 @@ struct mmc_blk_data {
struct device_attribute force_ro; struct device_attribute force_ro;
struct device_attribute power_ro_lock; struct device_attribute power_ro_lock;
int area_type; int area_type;
/* debugfs files (only in main mmc_blk_data) */
struct dentry *status_dentry;
struct dentry *ext_csd_dentry;
}; };
static DEFINE_MUTEX(open_lock); static DEFINE_MUTEX(open_lock);
@ -204,9 +208,14 @@ static ssize_t power_ro_lock_store(struct device *dev,
/* Dispatch locking to the block layer */ /* Dispatch locking to the block layer */
req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM); req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM);
if (IS_ERR(req)) {
count = PTR_ERR(req);
goto out_put;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
blk_execute_rq(mq->queue, NULL, req, 0); blk_execute_rq(mq->queue, NULL, req, 0);
ret = req_to_mmc_queue_req(req)->drv_op_result; ret = req_to_mmc_queue_req(req)->drv_op_result;
blk_put_request(req);
if (!ret) { if (!ret) {
pr_info("%s: Locking boot partition ro until next power on\n", pr_info("%s: Locking boot partition ro until next power on\n",
@ -219,7 +228,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
set_disk_ro(part_md->disk, 1); set_disk_ro(part_md->disk, 1);
} }
} }
out_put:
mmc_blk_put(md); mmc_blk_put(md);
return count; return count;
} }
@ -580,6 +589,10 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
req = blk_get_request(mq->queue, req = blk_get_request(mq->queue,
idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
__GFP_RECLAIM); __GFP_RECLAIM);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto cmd_done;
}
idatas[0] = idata; idatas[0] = idata;
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idatas; req_to_mmc_queue_req(req)->drv_op_data = idatas;
@ -643,6 +656,10 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
req = blk_get_request(mq->queue, req = blk_get_request(mq->queue,
idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
__GFP_RECLAIM); __GFP_RECLAIM);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto cmd_err;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idata; req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
@ -2314,6 +2331,8 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
/* Ask the block layer about the card status */ /* Ask the block layer about the card status */
req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
blk_execute_rq(mq->queue, NULL, req, 0); blk_execute_rq(mq->queue, NULL, req, 0);
ret = req_to_mmc_queue_req(req)->drv_op_result; ret = req_to_mmc_queue_req(req)->drv_op_result;
@ -2321,6 +2340,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
*val = ret; *val = ret;
ret = 0; ret = 0;
} }
blk_put_request(req);
return ret; return ret;
} }
@ -2347,10 +2367,15 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
/* Ask the block layer for the EXT CSD */ /* Ask the block layer for the EXT CSD */
req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); req = blk_get_request(mq->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out_free;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd; req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
blk_execute_rq(mq->queue, NULL, req, 0); blk_execute_rq(mq->queue, NULL, req, 0);
err = req_to_mmc_queue_req(req)->drv_op_result; err = req_to_mmc_queue_req(req)->drv_op_result;
blk_put_request(req);
if (err) { if (err) {
pr_err("FAILED %d\n", err); pr_err("FAILED %d\n", err);
goto out_free; goto out_free;
@ -2396,7 +2421,7 @@ static const struct file_operations mmc_dbg_ext_csd_fops = {
.llseek = default_llseek, .llseek = default_llseek,
}; };
static int mmc_blk_add_debugfs(struct mmc_card *card) static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
{ {
struct dentry *root; struct dentry *root;
@ -2406,28 +2431,53 @@ static int mmc_blk_add_debugfs(struct mmc_card *card)
root = card->debugfs_root; root = card->debugfs_root;
if (mmc_card_mmc(card) || mmc_card_sd(card)) { if (mmc_card_mmc(card) || mmc_card_sd(card)) {
if (!debugfs_create_file("status", S_IRUSR, root, card, md->status_dentry =
&mmc_dbg_card_status_fops)) debugfs_create_file("status", S_IRUSR, root, card,
&mmc_dbg_card_status_fops);
if (!md->status_dentry)
return -EIO; return -EIO;
} }
if (mmc_card_mmc(card)) { if (mmc_card_mmc(card)) {
if (!debugfs_create_file("ext_csd", S_IRUSR, root, card, md->ext_csd_dentry =
&mmc_dbg_ext_csd_fops)) debugfs_create_file("ext_csd", S_IRUSR, root, card,
&mmc_dbg_ext_csd_fops);
if (!md->ext_csd_dentry)
return -EIO; return -EIO;
} }
return 0; return 0;
} }
static void mmc_blk_remove_debugfs(struct mmc_card *card,
struct mmc_blk_data *md)
{
if (!card->debugfs_root)
return;
if (!IS_ERR_OR_NULL(md->status_dentry)) {
debugfs_remove(md->status_dentry);
md->status_dentry = NULL;
}
if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
debugfs_remove(md->ext_csd_dentry);
md->ext_csd_dentry = NULL;
}
}
#else #else
static int mmc_blk_add_debugfs(struct mmc_card *card) static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
{ {
return 0; return 0;
} }
static void mmc_blk_remove_debugfs(struct mmc_card *card,
struct mmc_blk_data *md)
{
}
#endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_DEBUG_FS */
static int mmc_blk_probe(struct mmc_card *card) static int mmc_blk_probe(struct mmc_card *card)
@ -2467,7 +2517,7 @@ static int mmc_blk_probe(struct mmc_card *card)
} }
/* Add two debugfs entries */ /* Add two debugfs entries */
mmc_blk_add_debugfs(card); mmc_blk_add_debugfs(card, md);
pm_runtime_set_autosuspend_delay(&card->dev, 3000); pm_runtime_set_autosuspend_delay(&card->dev, 3000);
pm_runtime_use_autosuspend(&card->dev); pm_runtime_use_autosuspend(&card->dev);
@ -2493,6 +2543,7 @@ static void mmc_blk_remove(struct mmc_card *card)
{ {
struct mmc_blk_data *md = dev_get_drvdata(&card->dev); struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
mmc_blk_remove_debugfs(card, md);
mmc_blk_remove_parts(card, md); mmc_blk_remove_parts(card, md);
pm_runtime_get_sync(&card->dev); pm_runtime_get_sync(&card->dev);
mmc_claim_host(card->host); mmc_claim_host(card->host);

View file

@ -157,6 +157,9 @@ static int mmc_bus_suspend(struct device *dev)
return ret; return ret;
ret = host->bus_ops->suspend(host); ret = host->bus_ops->suspend(host);
if (ret)
pm_generic_resume(dev);
return ret; return ret;
} }

View file

@ -314,4 +314,5 @@ err:
void mmc_remove_card_debugfs(struct mmc_card *card) void mmc_remove_card_debugfs(struct mmc_card *card)
{ {
debugfs_remove_recursive(card->debugfs_root); debugfs_remove_recursive(card->debugfs_root);
card->debugfs_root = NULL;
} }

View file

@ -780,7 +780,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv); MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info); MMC_DEV_ATTR(pre_eol_info, "0x%02x\n", card->ext_csd.pre_eol_info);
MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n", MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
card->ext_csd.device_life_time_est_typ_a, card->ext_csd.device_life_time_est_typ_a,
card->ext_csd.device_life_time_est_typ_b); card->ext_csd.device_life_time_est_typ_b);
@ -790,7 +790,7 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size); MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult); MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors); MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
MMC_DEV_ATTR(ocr, "%08x\n", card->ocr); MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en); MMC_DEV_ATTR(cmdq_en, "%d\n", card->ext_csd.cmdq_en);
static ssize_t mmc_fwrev_show(struct device *dev, static ssize_t mmc_fwrev_show(struct device *dev,

View file

@ -675,7 +675,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
MMC_DEV_ATTR(ocr, "%08x\n", card->ocr); MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
static ssize_t mmc_dsr_show(struct device *dev, static ssize_t mmc_dsr_show(struct device *dev,

View file

@ -21,6 +21,7 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/swiotlb.h>
#include <linux/regulator/consumer.h> #include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/of.h> #include <linux/of.h>
@ -3650,17 +3651,6 @@ int sdhci_setup_host(struct sdhci_host *host)
spin_lock_init(&host->lock); spin_lock_init(&host->lock);
/*
* Maximum number of segments. Depends on if the hardware
* can do scatter/gather or not.
*/
if (host->flags & SDHCI_USE_ADMA)
mmc->max_segs = SDHCI_MAX_SEGS;
else if (host->flags & SDHCI_USE_SDMA)
mmc->max_segs = 1;
else /* PIO */
mmc->max_segs = SDHCI_MAX_SEGS;
/* /*
* Maximum number of sectors in one transfer. Limited by SDMA boundary * Maximum number of sectors in one transfer. Limited by SDMA boundary
* size (512KiB). Note some tuning modes impose a 4MiB limit, but this * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
@ -3668,6 +3658,24 @@ int sdhci_setup_host(struct sdhci_host *host)
*/ */
mmc->max_req_size = 524288; mmc->max_req_size = 524288;
/*
* Maximum number of segments. Depends on if the hardware
* can do scatter/gather or not.
*/
if (host->flags & SDHCI_USE_ADMA) {
mmc->max_segs = SDHCI_MAX_SEGS;
} else if (host->flags & SDHCI_USE_SDMA) {
mmc->max_segs = 1;
if (swiotlb_max_segment()) {
unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
IO_TLB_SEGSIZE;
mmc->max_req_size = min(mmc->max_req_size,
max_req_size);
}
} else { /* PIO */
mmc->max_segs = SDHCI_MAX_SEGS;
}
/* /*
* Maximum segment size. Could be one segment with the maximum number * Maximum segment size. Could be one segment with the maximum number
* of bytes. When doing hardware scatter/gather, each entry cannot * of bytes. When doing hardware scatter/gather, each entry cannot

View file

@ -113,7 +113,8 @@
#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ #define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ #define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ #define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) #define E1000_TARC0_CB_MULTIQ_3_REQ 0x30000000
#define E1000_TARC0_CB_MULTIQ_2_REQ 0x20000000
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7 #define E1000_ICH_RAR_ENTRIES 7

View file

@ -3030,9 +3030,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(IOSFPC, reg_val); ew32(IOSFPC, reg_val);
reg_val = er32(TARC(0)); reg_val = er32(TARC(0));
/* SPT and KBL Si errata workaround to avoid Tx hang */ /* SPT and KBL Si errata workaround to avoid Tx hang.
reg_val &= ~BIT(28); * Dropping the number of outstanding requests from
reg_val |= BIT(29); * 3 to 2 in order to avoid a buffer overrun.
*/
reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
ew32(TARC(0), reg_val); ew32(TARC(0), reg_val);
} }
} }

View file

@ -108,7 +108,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
* NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
* found empirically. * found empirically.
*/ */
#define NVME_QUIRK_DELAY_AMOUNT 2000 #define NVME_QUIRK_DELAY_AMOUNT 2300
enum nvme_ctrl_state { enum nvme_ctrl_state {
NVME_CTRL_NEW, NVME_CTRL_NEW,

View file

@ -2519,6 +2519,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_IDENTIFY_CNS, }, .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */

View file

@ -0,0 +1,11 @@
--- drivers/platform/x86/hp-wmi.c
+++ drivers/platform/x86/hp-wmi.c
@@ -297,7 +297,7 @@ static int hp_wmi_hw_state(int mask)
if (state < 0)
return state;
- return state & 0x1;
+ return !!(state & mask);
}
static int __init hp_wmi_bios_2008_later(void)

View file

@ -281,8 +281,8 @@ static int autofs4_mount_wait(const struct path *path, bool rcu_walk)
pr_debug("waiting for mount name=%pd\n", path->dentry); pr_debug("waiting for mount name=%pd\n", path->dentry);
status = autofs4_wait(sbi, path, NFY_MOUNT); status = autofs4_wait(sbi, path, NFY_MOUNT);
pr_debug("mount wait done status=%d\n", status); pr_debug("mount wait done status=%d\n", status);
ino->last_used = jiffies;
} }
ino->last_used = jiffies;
return status; return status;
} }
@ -321,21 +321,16 @@ static struct dentry *autofs4_mountpoint_changed(struct path *path)
*/ */
if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) { if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
struct dentry *parent = dentry->d_parent; struct dentry *parent = dentry->d_parent;
struct autofs_info *ino;
struct dentry *new; struct dentry *new;
new = d_lookup(parent, &dentry->d_name); new = d_lookup(parent, &dentry->d_name);
if (!new) if (!new)
return NULL; return NULL;
if (new == dentry) ino = autofs4_dentry_ino(new);
dput(new); ino->last_used = jiffies;
else { dput(path->dentry);
struct autofs_info *ino; path->dentry = new;
ino = autofs4_dentry_ino(new);
ino->last_used = jiffies;
dput(path->dentry);
path->dentry = new;
}
} }
return path->dentry; return path->dentry;
} }

View file

@ -3526,13 +3526,6 @@ again:
goto again; goto again;
} }
/* We've already setup this transaction, go ahead and exit */
if (block_group->cache_generation == trans->transid &&
i_size_read(inode)) {
dcs = BTRFS_DC_SETUP;
goto out_put;
}
/* /*
* We want to set the generation to 0, that way if anything goes wrong * We want to set the generation to 0, that way if anything goes wrong
* from here on out we know not to trust this cache when we load up next * from here on out we know not to trust this cache when we load up next
@ -3556,6 +3549,13 @@ again:
} }
WARN_ON(ret); WARN_ON(ret);
/* We've already setup this transaction, go ahead and exit */
if (block_group->cache_generation == trans->transid &&
i_size_read(inode)) {
dcs = BTRFS_DC_SETUP;
goto out_put;
}
if (i_size_read(inode) > 0) { if (i_size_read(inode) > 0) {
ret = btrfs_check_trunc_cache_free_space(fs_info, ret = btrfs_check_trunc_cache_free_space(fs_info,
&fs_info->global_block_rsv); &fs_info->global_block_rsv);

View file

@ -1340,10 +1340,15 @@ void setup_new_exec(struct linux_binprm * bprm)
* avoid bad behavior from the prior rlimits. This has to * avoid bad behavior from the prior rlimits. This has to
* happen before arch_pick_mmap_layout(), which examines * happen before arch_pick_mmap_layout(), which examines
* RLIMIT_STACK, but after the point of no return to avoid * RLIMIT_STACK, but after the point of no return to avoid
* needing to clean up the change on failure. * races from other threads changing the limits. This also
* must be protected from races with prlimit() calls.
*/ */
task_lock(current->group_leader);
if (current->signal->rlim[RLIMIT_STACK].rlim_cur > _STK_LIM) if (current->signal->rlim[RLIMIT_STACK].rlim_cur > _STK_LIM)
current->signal->rlim[RLIMIT_STACK].rlim_cur = _STK_LIM; current->signal->rlim[RLIMIT_STACK].rlim_cur = _STK_LIM;
if (current->signal->rlim[RLIMIT_STACK].rlim_max > _STK_LIM)
current->signal->rlim[RLIMIT_STACK].rlim_max = _STK_LIM;
task_unlock(current->group_leader);
} }
arch_pick_mmap_layout(current->mm); arch_pick_mmap_layout(current->mm);

View file

@ -779,7 +779,7 @@ static void __exit fat_destroy_inodecache(void)
static int fat_remount(struct super_block *sb, int *flags, char *data) static int fat_remount(struct super_block *sb, int *flags, char *data)
{ {
int new_rdonly; bool new_rdonly;
struct msdos_sb_info *sbi = MSDOS_SB(sb); struct msdos_sb_info *sbi = MSDOS_SB(sb);
*flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME); *flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME);

View file

@ -274,6 +274,8 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
if (ln->nlmsvc_users) { if (ln->nlmsvc_users) {
if (--ln->nlmsvc_users == 0) { if (--ln->nlmsvc_users == 0) {
nlm_shutdown_hosts_net(net); nlm_shutdown_hosts_net(net);
cancel_delayed_work_sync(&ln->grace_period_end);
locks_end_grace(&ln->lockd_manager);
svc_shutdown_net(serv, net); svc_shutdown_net(serv, net);
dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net); dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
} }

View file

@ -1129,18 +1129,9 @@ static int follow_automount(struct path *path, struct nameidata *nd,
* of the daemon to instantiate them before they can be used. * of the daemon to instantiate them before they can be used.
*/ */
if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
LOOKUP_AUTOMOUNT))) { path->dentry->d_inode)
/* Positive dentry that isn't meant to trigger an return -EISDIR;
* automount, EISDIR will allow it to be used,
* otherwise there's no mount here "now" so return
* ENOENT.
*/
if (path->dentry->d_inode)
return -EISDIR;
else
return -ENOENT;
}
if (path->dentry->d_sb->s_user_ns != &init_user_ns) if (path->dentry->d_sb->s_user_ns != &init_user_ns)
return -EACCES; return -EACCES;

View file

@ -3512,7 +3512,9 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
/* ignore lock owners */ /* ignore lock owners */
if (local->st_stateowner->so_is_open_owner == 0) if (local->st_stateowner->so_is_open_owner == 0)
continue; continue;
if (local->st_stateowner == &oo->oo_owner) { if (local->st_stateowner != &oo->oo_owner)
continue;
if (local->st_stid.sc_type == NFS4_OPEN_STID) {
ret = local; ret = local;
atomic_inc(&ret->st_stid.sc_count); atomic_inc(&ret->st_stid.sc_count);
break; break;
@ -3521,6 +3523,52 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
return ret; return ret;
} }
static __be32
nfsd4_verify_open_stid(struct nfs4_stid *s)
{
__be32 ret = nfs_ok;
switch (s->sc_type) {
default:
break;
case NFS4_CLOSED_STID:
case NFS4_CLOSED_DELEG_STID:
ret = nfserr_bad_stateid;
break;
case NFS4_REVOKED_DELEG_STID:
ret = nfserr_deleg_revoked;
}
return ret;
}
/* Lock the stateid st_mutex, and deal with races with CLOSE */
static __be32
nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
{
__be32 ret;
mutex_lock(&stp->st_mutex);
ret = nfsd4_verify_open_stid(&stp->st_stid);
if (ret != nfs_ok)
mutex_unlock(&stp->st_mutex);
return ret;
}
static struct nfs4_ol_stateid *
nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
{
struct nfs4_ol_stateid *stp;
for (;;) {
spin_lock(&fp->fi_lock);
stp = nfsd4_find_existing_open(fp, open);
spin_unlock(&fp->fi_lock);
if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
break;
nfs4_put_stid(&stp->st_stid);
}
return stp;
}
static struct nfs4_openowner * static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
struct nfsd4_compound_state *cstate) struct nfsd4_compound_state *cstate)
@ -3565,6 +3613,7 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
mutex_init(&stp->st_mutex); mutex_init(&stp->st_mutex);
mutex_lock(&stp->st_mutex); mutex_lock(&stp->st_mutex);
retry:
spin_lock(&oo->oo_owner.so_client->cl_lock); spin_lock(&oo->oo_owner.so_client->cl_lock);
spin_lock(&fp->fi_lock); spin_lock(&fp->fi_lock);
@ -3589,7 +3638,11 @@ out_unlock:
spin_unlock(&fp->fi_lock); spin_unlock(&fp->fi_lock);
spin_unlock(&oo->oo_owner.so_client->cl_lock); spin_unlock(&oo->oo_owner.so_client->cl_lock);
if (retstp) { if (retstp) {
mutex_lock(&retstp->st_mutex); /* Handle races with CLOSE */
if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
nfs4_put_stid(&retstp->st_stid);
goto retry;
}
/* To keep mutex tracking happy */ /* To keep mutex tracking happy */
mutex_unlock(&stp->st_mutex); mutex_unlock(&stp->st_mutex);
stp = retstp; stp = retstp;
@ -4399,6 +4452,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
struct nfs4_ol_stateid *stp = NULL; struct nfs4_ol_stateid *stp = NULL;
struct nfs4_delegation *dp = NULL; struct nfs4_delegation *dp = NULL;
__be32 status; __be32 status;
bool new_stp = false;
/* /*
* Lookup file; if found, lookup stateid and check open request, * Lookup file; if found, lookup stateid and check open request,
@ -4410,9 +4464,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
status = nfs4_check_deleg(cl, open, &dp); status = nfs4_check_deleg(cl, open, &dp);
if (status) if (status)
goto out; goto out;
spin_lock(&fp->fi_lock); stp = nfsd4_find_and_lock_existing_open(fp, open);
stp = nfsd4_find_existing_open(fp, open);
spin_unlock(&fp->fi_lock);
} else { } else {
open->op_file = NULL; open->op_file = NULL;
status = nfserr_bad_stateid; status = nfserr_bad_stateid;
@ -4420,35 +4472,31 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
goto out; goto out;
} }
if (!stp) {
stp = init_open_stateid(fp, open);
if (!open->op_stp)
new_stp = true;
}
/* /*
* OPEN the file, or upgrade an existing OPEN. * OPEN the file, or upgrade an existing OPEN.
* If truncate fails, the OPEN fails. * If truncate fails, the OPEN fails.
*
* stp is already locked.
*/ */
if (stp) { if (!new_stp) {
/* Stateid was found, this is an OPEN upgrade */ /* Stateid was found, this is an OPEN upgrade */
mutex_lock(&stp->st_mutex);
status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
if (status) { if (status) {
mutex_unlock(&stp->st_mutex); mutex_unlock(&stp->st_mutex);
goto out; goto out;
} }
} else { } else {
/* stp is returned locked. */
stp = init_open_stateid(fp, open);
/* See if we lost the race to some other thread */
if (stp->st_access_bmap != 0) {
status = nfs4_upgrade_open(rqstp, fp, current_fh,
stp, open);
if (status) {
mutex_unlock(&stp->st_mutex);
goto out;
}
goto upgrade_out;
}
status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
if (status) { if (status) {
mutex_unlock(&stp->st_mutex); stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_open_stateid(stp); release_open_stateid(stp);
mutex_unlock(&stp->st_mutex);
goto out; goto out;
} }
@ -4457,7 +4505,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
if (stp->st_clnt_odstate == open->op_odstate) if (stp->st_clnt_odstate == open->op_odstate)
open->op_odstate = NULL; open->op_odstate = NULL;
} }
upgrade_out:
nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex); mutex_unlock(&stp->st_mutex);
@ -4684,7 +4732,7 @@ nfs4_laundromat(struct nfsd_net *nn)
spin_unlock(&nn->blocked_locks_lock); spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) { while (!list_empty(&reaplist)) {
nbl = list_first_entry(&nn->blocked_locks_lru, nbl = list_first_entry(&reaplist,
struct nfsd4_blocked_lock, nbl_lru); struct nfsd4_blocked_lock, nbl_lru);
list_del_init(&nbl->nbl_lru); list_del_init(&nbl->nbl_lru);
posix_unblock_lock(&nbl->nbl_lock); posix_unblock_lock(&nbl->nbl_lock);
@ -5317,7 +5365,6 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
bool unhashed; bool unhashed;
LIST_HEAD(reaplist); LIST_HEAD(reaplist);
s->st_stid.sc_type = NFS4_CLOSED_STID;
spin_lock(&clp->cl_lock); spin_lock(&clp->cl_lock);
unhashed = unhash_open_stateid(s, &reaplist); unhashed = unhash_open_stateid(s, &reaplist);
@ -5357,10 +5404,12 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfsd4_bump_seqid(cstate, status); nfsd4_bump_seqid(cstate, status);
if (status) if (status)
goto out; goto out;
stp->st_stid.sc_type = NFS4_CLOSED_STID;
nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex);
nfsd4_close_open_stateid(stp); nfsd4_close_open_stateid(stp);
mutex_unlock(&stp->st_mutex);
/* put reference from nfs4_preprocess_seqid_op */ /* put reference from nfs4_preprocess_seqid_op */
nfs4_put_stid(&stp->st_stid); nfs4_put_stid(&stp->st_stid);
@ -7103,7 +7152,7 @@ nfs4_state_shutdown_net(struct net *net)
spin_unlock(&nn->blocked_locks_lock); spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) { while (!list_empty(&reaplist)) {
nbl = list_first_entry(&nn->blocked_locks_lru, nbl = list_first_entry(&reaplist,
struct nfsd4_blocked_lock, nbl_lru); struct nfsd4_blocked_lock, nbl_lru);
list_del_init(&nbl->nbl_lru); list_del_init(&nbl->nbl_lru);
posix_unblock_lock(&nbl->nbl_lock); posix_unblock_lock(&nbl->nbl_lock);

View file

@ -105,6 +105,7 @@ enum acpi_bus_device_type {
ACPI_BUS_TYPE_THERMAL, ACPI_BUS_TYPE_THERMAL,
ACPI_BUS_TYPE_POWER_BUTTON, ACPI_BUS_TYPE_POWER_BUTTON,
ACPI_BUS_TYPE_SLEEP_BUTTON, ACPI_BUS_TYPE_SLEEP_BUTTON,
ACPI_BUS_TYPE_ECDT_EC,
ACPI_BUS_DEVICE_TYPE_COUNT ACPI_BUS_DEVICE_TYPE_COUNT
}; };

View file

@ -58,6 +58,7 @@
#define ACPI_VIDEO_HID "LNXVIDEO" #define ACPI_VIDEO_HID "LNXVIDEO"
#define ACPI_BAY_HID "LNXIOBAY" #define ACPI_BAY_HID "LNXIOBAY"
#define ACPI_DOCK_HID "LNXDOCK" #define ACPI_DOCK_HID "LNXDOCK"
#define ACPI_ECDT_HID "LNXEC"
/* Quirk for broken IBM BIOSes */ /* Quirk for broken IBM BIOSes */
#define ACPI_SMBUS_IBM_HID "SMBUSIBM" #define ACPI_SMBUS_IBM_HID "SMBUSIBM"

View file

@ -814,6 +814,14 @@ static inline int pmd_write(pmd_t pmd)
#endif /* __HAVE_ARCH_PMD_WRITE */ #endif /* __HAVE_ARCH_PMD_WRITE */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifndef pud_write
static inline int pud_write(pud_t pud)
{
BUG();
return 0;
}
#endif /* pud_write */
#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
(defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
!defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))

View file

@ -255,6 +255,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unsigned int ivsize); unsigned int ivsize);
ssize_t af_alg_sendpage(struct socket *sock, struct page *page, ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags); int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
void af_alg_async_cb(struct crypto_async_request *_req, int err); void af_alg_async_cb(struct crypto_async_request *_req, int err);
unsigned int af_alg_poll(struct file *file, struct socket *sock, unsigned int af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait); poll_table *wait);

View file

@ -360,7 +360,8 @@ void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
const struct drm_display_mode *mode, const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range, enum hdmi_quantization_range rgb_quant_range,
bool rgb_quant_range_selectable); bool rgb_quant_range_selectable,
bool is_hdmi2_sink);
/** /**
* drm_eld_mnl - Get ELD monitor name length in bytes. * drm_eld_mnl - Get ELD monitor name length in bytes.

View file

@ -16,3 +16,6 @@
* with any version that can compile the kernel * with any version that can compile the kernel
*/ */
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
#define randomized_struct_fields_start struct {
#define randomized_struct_fields_end };

View file

@ -3069,7 +3069,8 @@ static inline int vfs_lstat(const char __user *name, struct kstat *stat)
static inline int vfs_fstatat(int dfd, const char __user *filename, static inline int vfs_fstatat(int dfd, const char __user *filename,
struct kstat *stat, int flags) struct kstat *stat, int flags)
{ {
return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS); return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
stat, STATX_BASIC_STATS);
} }
static inline int vfs_fstat(int fd, struct kstat *stat) static inline int vfs_fstat(int fd, struct kstat *stat)
{ {
@ -3175,6 +3176,20 @@ static inline bool vma_is_dax(struct vm_area_struct *vma)
return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
} }
static inline bool vma_is_fsdax(struct vm_area_struct *vma)
{
struct inode *inode;
if (!vma->vm_file)
return false;
if (!vma_is_dax(vma))
return false;
inode = file_inode(vma->vm_file);
if (inode->i_mode == S_IFCHR)
return false; /* device-dax */
return true;
}
static inline int iocb_flags(struct file *file) static inline int iocb_flags(struct file *file)
{ {
int res = 0; int res = 0;

View file

@ -239,14 +239,6 @@ static inline int pgd_write(pgd_t pgd)
} }
#endif #endif
#ifndef pud_write
static inline int pud_write(pud_t pud)
{
BUG();
return 0;
}
#endif
#define HUGETLB_ANON_FILE "anon_hugepage" #define HUGETLB_ANON_FILE "anon_hugepage"
enum { enum {

View file

@ -54,7 +54,7 @@ static inline struct page *new_page_nodemask(struct page *page,
new_page = __alloc_pages_nodemask(gfp_mask, order, new_page = __alloc_pages_nodemask(gfp_mask, order,
preferred_nid, nodemask); preferred_nid, nodemask);
if (new_page && PageTransHuge(page)) if (new_page && PageTransHuge(new_page))
prep_transhuge_page(new_page); prep_transhuge_page(new_page);
return new_page; return new_page;

Some files were not shown because too many files have changed in this diff Show more