updating to mainline 4.14-rc5

This commit is contained in:
Jake Day 2017-10-17 11:12:13 -04:00
parent 2a2d6c5cc8
commit 2f5f0b4f89
245 changed files with 1892 additions and 1118 deletions

View file

@ -14,13 +14,3 @@ Description: Enable/disable VMA based swap readahead.
still used for tmpfs etc. other users. If set to still used for tmpfs etc. other users. If set to
false, the global swap readahead algorithm will be false, the global swap readahead algorithm will be
used for all swappable pages. used for all swappable pages.
What: /sys/kernel/mm/swap/vma_ra_max_order
Date: August 2017
Contact: Linux memory management mailing list <linux-mm@kvack.org>
Description: The max readahead size in order for VMA based swap readahead
VMA based swap readahead algorithm will readahead at
most 1 << max_order pages for each readahead. The
real readahead size for each readahead will be scaled
according to the estimation algorithm.

View file

@ -2387,7 +2387,7 @@ broadcast: Like active-backup, there is not much advantage to this
and packet type ID), so in a "gatewayed" configuration, all and packet type ID), so in a "gatewayed" configuration, all
outgoing traffic will generally use the same device. Incoming outgoing traffic will generally use the same device. Incoming
traffic may also end up on a single device, but that is traffic may also end up on a single device, but that is
dependent upon the balancing policy of the peer's 8023.ad dependent upon the balancing policy of the peer's 802.3ad
implementation. In a "local" configuration, traffic will be implementation. In a "local" configuration, traffic will be
distributed across the devices in the bond. distributed across the devices in the bond.

View file

@ -5346,9 +5346,7 @@ M: "J. Bruce Fields" <bfields@fieldses.org>
L: linux-fsdevel@vger.kernel.org L: linux-fsdevel@vger.kernel.org
S: Maintained S: Maintained
F: include/linux/fcntl.h F: include/linux/fcntl.h
F: include/linux/fs.h
F: include/uapi/linux/fcntl.h F: include/uapi/linux/fcntl.h
F: include/uapi/linux/fs.h
F: fs/fcntl.c F: fs/fcntl.c
F: fs/locks.c F: fs/locks.c
@ -5357,6 +5355,8 @@ M: Alexander Viro <viro@zeniv.linux.org.uk>
L: linux-fsdevel@vger.kernel.org L: linux-fsdevel@vger.kernel.org
S: Maintained S: Maintained
F: fs/* F: fs/*
F: include/linux/fs.h
F: include/uapi/linux/fs.h
FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
M: Riku Voipio <riku.voipio@iki.fi> M: Riku Voipio <riku.voipio@iki.fi>
@ -7571,7 +7571,7 @@ F: arch/mips/include/asm/kvm*
F: arch/mips/kvm/ F: arch/mips/kvm/
KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc) KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc)
M: Alexander Graf <agraf@suse.com> M: Paul Mackerras <paulus@ozlabs.org>
L: kvm-ppc@vger.kernel.org L: kvm-ppc@vger.kernel.org
W: http://www.linux-kvm.org/ W: http://www.linux-kvm.org/
T: git git://github.com/agraf/linux-2.6.git T: git git://github.com/agraf/linux-2.6.git

View file

@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 14 PATCHLEVEL = 14
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc4 EXTRAVERSION = -rc5
NAME = Fearless Coyote NAME = Fearless Coyote
# *DOCUMENTATION* # *DOCUMENTATION*
@ -933,7 +933,11 @@ ifdef CONFIG_STACK_VALIDATION
ifeq ($(has_libelf),1) ifeq ($(has_libelf),1)
objtool_target := tools/objtool FORCE objtool_target := tools/objtool FORCE
else else
$(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel") ifdef CONFIG_ORC_UNWINDER
$(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
else
$(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
endif
SKIP_STACK_VALIDATION := 1 SKIP_STACK_VALIDATION := 1
export SKIP_STACK_VALIDATION export SKIP_STACK_VALIDATION
endif endif

View file

@ -937,9 +937,6 @@ config STRICT_MODULE_RWX
and non-text memory will be made non-executable. This provides and non-text memory will be made non-executable. This provides
protection against certain security exploits (e.g. writing to text) protection against certain security exploits (e.g. writing to text)
config ARCH_WANT_RELAX_ORDER
bool
config ARCH_HAS_REFCOUNT config ARCH_HAS_REFCOUNT
bool bool
help help

View file

@ -155,14 +155,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return __cmpxchg_small(ptr, old, new, size); return __cmpxchg_small(ptr, old, new, size);
case 4: case 4:
return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new); return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr,
(u32)old, new);
case 8: case 8:
/* lld/scd are only available for MIPS64 */ /* lld/scd are only available for MIPS64 */
if (!IS_ENABLED(CONFIG_64BIT)) if (!IS_ENABLED(CONFIG_64BIT))
return __cmpxchg_called_with_bad_pointer(); return __cmpxchg_called_with_bad_pointer();
return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, old, new); return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr,
(u64)old, new);
default: default:
return __cmpxchg_called_with_bad_pointer(); return __cmpxchg_called_with_bad_pointer();

View file

@ -183,18 +183,20 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
} }
static struct plat_stmmacenet_data ls1x_eth0_pdata = { static struct plat_stmmacenet_data ls1x_eth0_pdata = {
.bus_id = 0, .bus_id = 0,
.phy_addr = -1, .phy_addr = -1,
#if defined(CONFIG_LOONGSON1_LS1B) #if defined(CONFIG_LOONGSON1_LS1B)
.interface = PHY_INTERFACE_MODE_MII, .interface = PHY_INTERFACE_MODE_MII,
#elif defined(CONFIG_LOONGSON1_LS1C) #elif defined(CONFIG_LOONGSON1_LS1C)
.interface = PHY_INTERFACE_MODE_RMII, .interface = PHY_INTERFACE_MODE_RMII,
#endif #endif
.mdio_bus_data = &ls1x_mdio_bus_data, .mdio_bus_data = &ls1x_mdio_bus_data,
.dma_cfg = &ls1x_eth_dma_cfg, .dma_cfg = &ls1x_eth_dma_cfg,
.has_gmac = 1, .has_gmac = 1,
.tx_coe = 1, .tx_coe = 1,
.init = ls1x_eth_mux_init, .rx_queues_to_use = 1,
.tx_queues_to_use = 1,
.init = ls1x_eth_mux_init,
}; };
static struct resource ls1x_eth0_resources[] = { static struct resource ls1x_eth0_resources[] = {
@ -222,14 +224,16 @@ struct platform_device ls1x_eth0_pdev = {
#ifdef CONFIG_LOONGSON1_LS1B #ifdef CONFIG_LOONGSON1_LS1B
static struct plat_stmmacenet_data ls1x_eth1_pdata = { static struct plat_stmmacenet_data ls1x_eth1_pdata = {
.bus_id = 1, .bus_id = 1,
.phy_addr = -1, .phy_addr = -1,
.interface = PHY_INTERFACE_MODE_MII, .interface = PHY_INTERFACE_MODE_MII,
.mdio_bus_data = &ls1x_mdio_bus_data, .mdio_bus_data = &ls1x_mdio_bus_data,
.dma_cfg = &ls1x_eth_dma_cfg, .dma_cfg = &ls1x_eth_dma_cfg,
.has_gmac = 1, .has_gmac = 1,
.tx_coe = 1, .tx_coe = 1,
.init = ls1x_eth_mux_init, .rx_queues_to_use = 1,
.tx_queues_to_use = 1,
.init = ls1x_eth_mux_init,
}; };
static struct resource ls1x_eth1_resources[] = { static struct resource ls1x_eth1_resources[] = {

View file

@ -2558,7 +2558,6 @@ dcopuop:
break; break;
default: default:
/* Reserved R6 ops */ /* Reserved R6 ops */
pr_err("Reserved MIPS R6 CMP.condn.S operation\n");
return SIGILL; return SIGILL;
} }
} }
@ -2719,7 +2718,6 @@ dcopuop:
break; break;
default: default:
/* Reserved R6 ops */ /* Reserved R6 ops */
pr_err("Reserved MIPS R6 CMP.condn.D operation\n");
return SIGILL; return SIGILL;
} }
} }

View file

@ -667,7 +667,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
{ {
int src, dst, r, td, ts, mem_off, b_off; int src, dst, r, td, ts, mem_off, b_off;
bool need_swap, did_move, cmp_eq; bool need_swap, did_move, cmp_eq;
unsigned int target; unsigned int target = 0;
u64 t64; u64 t64;
s64 t64s; s64 t64s;
int bpf_op = BPF_OP(insn->code); int bpf_op = BPF_OP(insn->code);

View file

@ -30,8 +30,6 @@ cfg="$4"
boards_origin="$5" boards_origin="$5"
shift 5 shift 5
cd "${srctree}"
# Only print Skipping... lines if the user explicitly specified BOARDS=. In the # Only print Skipping... lines if the user explicitly specified BOARDS=. In the
# general case it only serves to obscure the useful output about what actually # general case it only serves to obscure the useful output about what actually
# was included. # was included.
@ -48,7 +46,7 @@ environment*)
esac esac
for board in $@; do for board in $@; do
board_cfg="arch/mips/configs/generic/board-${board}.config" board_cfg="${srctree}/arch/mips/configs/generic/board-${board}.config"
if [ ! -f "${board_cfg}" ]; then if [ ! -f "${board_cfg}" ]; then
echo "WARNING: Board config '${board_cfg}' not found" echo "WARNING: Board config '${board_cfg}' not found"
continue continue
@ -84,7 +82,7 @@ for board in $@; do
done || continue done || continue
# Merge this board config fragment into our final config file # Merge this board config fragment into our final config file
./scripts/kconfig/merge_config.sh \ ${srctree}/scripts/kconfig/merge_config.sh \
-m -O ${objtree} ${cfg} ${board_cfg} \ -m -O ${objtree} ${cfg} ${board_cfg} \
| grep -Ev '^(#|Using)' | grep -Ev '^(#|Using)'
done done

View file

@ -734,7 +734,29 @@ EXC_REAL(program_check, 0x700, 0x100)
EXC_VIRT(program_check, 0x4700, 0x100, 0x700) EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
TRAMP_KVM(PACA_EXGEN, 0x700) TRAMP_KVM(PACA_EXGEN, 0x700)
EXC_COMMON_BEGIN(program_check_common) EXC_COMMON_BEGIN(program_check_common)
EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) /*
* It's possible to receive a TM Bad Thing type program check with
* userspace register values (in particular r1), but with SRR1 reporting
* that we came from the kernel. Normally that would confuse the bad
* stack logic, and we would report a bad kernel stack pointer. Instead
* we switch to the emergency stack if we're taking a TM Bad Thing from
* the kernel.
*/
li r10,MSR_PR /* Build a mask of MSR_PR .. */
oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */
and r10,r10,r12 /* Mask SRR1 with that. */
srdi r10,r10,8 /* Shift it so we can compare */
cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */
bne 1f /* If != go to normal path. */
/* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */
andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */
/* 3 in EXCEPTION_PROLOG_COMMON */
mr r10,r1 /* Save r1 */
ld r1,PACAEMERGSP(r13) /* Use emergency stack */
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
b 3f /* Jump into the macro !! */
1: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
bl save_nvgprs bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11) RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD

View file

@ -452,9 +452,20 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
if (MSR_TM_RESV(msr)) if (MSR_TM_RESV(msr))
return -EINVAL; return -EINVAL;
/* pull in MSR TM from user context */ /* pull in MSR TS bits from user context */
regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
/*
* Ensure that TM is enabled in regs->msr before we leave the signal
* handler. It could be the case that (a) user disabled the TM bit
* through the manipulation of the MSR bits in uc_mcontext or (b) the
* TM bit was disabled because a sufficient number of context switches
* happened whilst in the signal handler and load_tm overflowed,
* disabling the TM bit. In either case we can end up with an illegal
* TM state leading to a TM Bad Thing when we return to userspace.
*/
regs->msr |= MSR_TM;
/* pull in MSR LE from user context */ /* pull in MSR LE from user context */
regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);

View file

@ -181,34 +181,25 @@ _GLOBAL(ftrace_stub)
* - we have no stack frame and can not allocate one * - we have no stack frame and can not allocate one
* - LR points back to the original caller (in A) * - LR points back to the original caller (in A)
* - CTR holds the new NIP in C * - CTR holds the new NIP in C
* - r0 & r12 are free * - r0, r11 & r12 are free
*
* r0 can't be used as the base register for a DS-form load or store, so
* we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
*/ */
livepatch_handler: livepatch_handler:
CURRENT_THREAD_INFO(r12, r1) CURRENT_THREAD_INFO(r12, r1)
/* Save stack pointer into r0 */
mr r0, r1
/* Allocate 3 x 8 bytes */ /* Allocate 3 x 8 bytes */
ld r1, TI_livepatch_sp(r12) ld r11, TI_livepatch_sp(r12)
addi r1, r1, 24 addi r11, r11, 24
std r1, TI_livepatch_sp(r12) std r11, TI_livepatch_sp(r12)
/* Save toc & real LR on livepatch stack */ /* Save toc & real LR on livepatch stack */
std r2, -24(r1) std r2, -24(r11)
mflr r12 mflr r12
std r12, -16(r1) std r12, -16(r11)
/* Store stack end marker */ /* Store stack end marker */
lis r12, STACK_END_MAGIC@h lis r12, STACK_END_MAGIC@h
ori r12, r12, STACK_END_MAGIC@l ori r12, r12, STACK_END_MAGIC@l
std r12, -8(r1) std r12, -8(r11)
/* Restore real stack pointer */
mr r1, r0
/* Put ctr in r12 for global entry and branch there */ /* Put ctr in r12 for global entry and branch there */
mfctr r12 mfctr r12
@ -216,36 +207,30 @@ livepatch_handler:
/* /*
* Now we are returning from the patched function to the original * Now we are returning from the patched function to the original
* caller A. We are free to use r0 and r12, and we can use r2 until we * caller A. We are free to use r11, r12 and we can use r2 until we
* restore it. * restore it.
*/ */
CURRENT_THREAD_INFO(r12, r1) CURRENT_THREAD_INFO(r12, r1)
/* Save stack pointer into r0 */ ld r11, TI_livepatch_sp(r12)
mr r0, r1
ld r1, TI_livepatch_sp(r12)
/* Check stack marker hasn't been trashed */ /* Check stack marker hasn't been trashed */
lis r2, STACK_END_MAGIC@h lis r2, STACK_END_MAGIC@h
ori r2, r2, STACK_END_MAGIC@l ori r2, r2, STACK_END_MAGIC@l
ld r12, -8(r1) ld r12, -8(r11)
1: tdne r12, r2 1: tdne r12, r2
EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
/* Restore LR & toc from livepatch stack */ /* Restore LR & toc from livepatch stack */
ld r12, -16(r1) ld r12, -16(r11)
mtlr r12 mtlr r12
ld r2, -24(r1) ld r2, -24(r11)
/* Pop livepatch stack frame */ /* Pop livepatch stack frame */
CURRENT_THREAD_INFO(r12, r0) CURRENT_THREAD_INFO(r12, r1)
subi r1, r1, 24 subi r11, r11, 24
std r1, TI_livepatch_sp(r12) std r11, TI_livepatch_sp(r12)
/* Restore real stack pointer */
mr r1, r0
/* Return to original caller of live patched function */ /* Return to original caller of live patched function */
blr blr

View file

@ -1684,11 +1684,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
* Logical instructions * Logical instructions
*/ */
case 26: /* cntlzw */ case 26: /* cntlzw */
op->val = __builtin_clz((unsigned int) regs->gpr[rd]); val = (unsigned int) regs->gpr[rd];
op->val = ( val ? __builtin_clz(val) : 32 );
goto logical_done; goto logical_done;
#ifdef __powerpc64__ #ifdef __powerpc64__
case 58: /* cntlzd */ case 58: /* cntlzd */
op->val = __builtin_clzl(regs->gpr[rd]); val = regs->gpr[rd];
op->val = ( val ? __builtin_clzl(val) : 64 );
goto logical_done; goto logical_done;
#endif #endif
case 28: /* and */ case 28: /* and */

View file

@ -1438,7 +1438,6 @@ out:
int arch_update_cpu_topology(void) int arch_update_cpu_topology(void)
{ {
lockdep_assert_cpus_held();
return numa_update_cpu_topology(true); return numa_update_cpu_topology(true);
} }

View file

@ -399,6 +399,20 @@ static void nest_imc_counters_release(struct perf_event *event)
/* Take the mutex lock for this node and then decrement the reference count */ /* Take the mutex lock for this node and then decrement the reference count */
mutex_lock(&ref->lock); mutex_lock(&ref->lock);
if (ref->refc == 0) {
/*
* The scenario where this is true is, when perf session is
* started, followed by offlining of all cpus in a given node.
*
* In the cpuhotplug offline path, ppc_nest_imc_cpu_offline()
* function set the ref->count to zero, if the cpu which is
* about to offline is the last cpu in a given node and make
* an OPAL call to disable the engine in that node.
*
*/
mutex_unlock(&ref->lock);
return;
}
ref->refc--; ref->refc--;
if (ref->refc == 0) { if (ref->refc == 0) {
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
@ -523,8 +537,8 @@ static int core_imc_mem_init(int cpu, int size)
/* We need only vbase for core counters */ /* We need only vbase for core counters */
mem_info->vbase = page_address(alloc_pages_node(phys_id, mem_info->vbase = page_address(alloc_pages_node(phys_id,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
get_order(size))); __GFP_NOWARN, get_order(size)));
if (!mem_info->vbase) if (!mem_info->vbase)
return -ENOMEM; return -ENOMEM;
@ -646,6 +660,20 @@ static void core_imc_counters_release(struct perf_event *event)
return; return;
mutex_lock(&ref->lock); mutex_lock(&ref->lock);
if (ref->refc == 0) {
/*
* The scenario where this is true is, when perf session is
* started, followed by offlining of all cpus in a given core.
*
* In the cpuhotplug offline path, ppc_core_imc_cpu_offline()
* function set the ref->count to zero, if the cpu which is
* about to offline is the last cpu in a given core and make
* an OPAL call to disable the engine in that core.
*
*/
mutex_unlock(&ref->lock);
return;
}
ref->refc--; ref->refc--;
if (ref->refc == 0) { if (ref->refc == 0) {
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
@ -763,8 +791,8 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
* free the memory in cpu offline path. * free the memory in cpu offline path.
*/ */
local_mem = page_address(alloc_pages_node(phys_id, local_mem = page_address(alloc_pages_node(phys_id,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
get_order(size))); __GFP_NOWARN, get_order(size)));
if (!local_mem) if (!local_mem)
return -ENOMEM; return -ENOMEM;
@ -1148,7 +1176,8 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
} }
/* Only free the attr_groups which are dynamically allocated */ /* Only free the attr_groups which are dynamically allocated */
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
kfree(pmu_ptr); kfree(pmu_ptr);
return; return;

View file

@ -44,7 +44,6 @@ config SPARC
select ARCH_HAS_SG_CHAIN select ARCH_HAS_SG_CHAIN
select CPU_NO_EFFICIENT_FFS select CPU_NO_EFFICIENT_FFS
select LOCKDEP_SMALL if LOCKDEP select LOCKDEP_SMALL if LOCKDEP
select ARCH_WANT_RELAX_ORDER
config SPARC32 config SPARC32
def_bool !64BIT def_bool !64BIT

View file

@ -176,7 +176,7 @@
/* /*
* This is a sneaky trick to help the unwinder find pt_regs on the stack. The * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
* frame pointer is replaced with an encoded pointer to pt_regs. The encoding * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
* is just setting the LSB, which makes it an invalid stack address and is also * is just clearing the MSB, which makes it an invalid stack address and is also
* a signal to the unwinder that it's a pt_regs pointer in disguise. * a signal to the unwinder that it's a pt_regs pointer in disguise.
* *
* NOTE: This macro must be used *after* SAVE_ALL because it corrupts the * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
@ -185,7 +185,7 @@
.macro ENCODE_FRAME_POINTER .macro ENCODE_FRAME_POINTER
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
mov %esp, %ebp mov %esp, %ebp
orl $0x1, %ebp andl $0x7fffffff, %ebp
#endif #endif
.endm .endm

View file

@ -822,7 +822,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
pmus[i].type = type; pmus[i].type = type;
pmus[i].boxes = kzalloc(size, GFP_KERNEL); pmus[i].boxes = kzalloc(size, GFP_KERNEL);
if (!pmus[i].boxes) if (!pmus[i].boxes)
return -ENOMEM; goto err;
} }
type->pmus = pmus; type->pmus = pmus;
@ -836,7 +836,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) + attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
sizeof(*attr_group), GFP_KERNEL); sizeof(*attr_group), GFP_KERNEL);
if (!attr_group) if (!attr_group)
return -ENOMEM; goto err;
attrs = (struct attribute **)(attr_group + 1); attrs = (struct attribute **)(attr_group + 1);
attr_group->name = "events"; attr_group->name = "events";
@ -849,7 +849,15 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
} }
type->pmu_group = &uncore_pmu_attr_group; type->pmu_group = &uncore_pmu_attr_group;
return 0; return 0;
err:
for (i = 0; i < type->num_boxes; i++)
kfree(pmus[i].boxes);
kfree(pmus);
return -ENOMEM;
} }
static int __init static int __init

View file

@ -85,6 +85,8 @@ EXPORT_SYMBOL_GPL(hyperv_cs);
u32 *hv_vp_index; u32 *hv_vp_index;
EXPORT_SYMBOL_GPL(hv_vp_index); EXPORT_SYMBOL_GPL(hv_vp_index);
u32 hv_max_vp_index;
static int hv_cpu_init(unsigned int cpu) static int hv_cpu_init(unsigned int cpu)
{ {
u64 msr_vp_index; u64 msr_vp_index;
@ -93,6 +95,9 @@ static int hv_cpu_init(unsigned int cpu)
hv_vp_index[smp_processor_id()] = msr_vp_index; hv_vp_index[smp_processor_id()] = msr_vp_index;
if (msr_vp_index > hv_max_vp_index)
hv_max_vp_index = msr_vp_index;
return 0; return 0;
} }

View file

@ -36,9 +36,9 @@ struct hv_flush_pcpu_ex {
/* Each gva in gva_list encodes up to 4096 pages to flush */ /* Each gva in gva_list encodes up to 4096 pages to flush */
#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
static struct hv_flush_pcpu __percpu *pcpu_flush; static struct hv_flush_pcpu __percpu **pcpu_flush;
static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex; static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex;
/* /*
* Fills in gva_list starting from offset. Returns the number of items added. * Fills in gva_list starting from offset. Returns the number of items added.
@ -76,6 +76,18 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
{ {
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
/* valid_bank_mask can represent up to 64 banks */
if (hv_max_vp_index / 64 >= 64)
return 0;
/*
* Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex
* structs are not cleared between calls, we risk flushing unneeded
* vCPUs otherwise.
*/
for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
flush->hv_vp_set.bank_contents[vcpu_bank] = 0;
/* /*
* Some banks may end up being empty but this is acceptable. * Some banks may end up being empty but this is acceptable.
*/ */
@ -83,11 +95,6 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
vcpu = hv_cpu_number_to_vp_number(cpu); vcpu = hv_cpu_number_to_vp_number(cpu);
vcpu_bank = vcpu / 64; vcpu_bank = vcpu / 64;
vcpu_offset = vcpu % 64; vcpu_offset = vcpu % 64;
/* valid_bank_mask can represent up to 64 banks */
if (vcpu_bank >= 64)
return 0;
__set_bit(vcpu_offset, (unsigned long *) __set_bit(vcpu_offset, (unsigned long *)
&flush->hv_vp_set.bank_contents[vcpu_bank]); &flush->hv_vp_set.bank_contents[vcpu_bank]);
if (vcpu_bank >= nr_bank) if (vcpu_bank >= nr_bank)
@ -102,6 +109,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
const struct flush_tlb_info *info) const struct flush_tlb_info *info)
{ {
int cpu, vcpu, gva_n, max_gvas; int cpu, vcpu, gva_n, max_gvas;
struct hv_flush_pcpu **flush_pcpu;
struct hv_flush_pcpu *flush; struct hv_flush_pcpu *flush;
u64 status = U64_MAX; u64 status = U64_MAX;
unsigned long flags; unsigned long flags;
@ -116,7 +124,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
local_irq_save(flags); local_irq_save(flags);
flush = this_cpu_ptr(pcpu_flush); flush_pcpu = this_cpu_ptr(pcpu_flush);
if (unlikely(!*flush_pcpu))
*flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
flush = *flush_pcpu;
if (unlikely(!flush)) {
local_irq_restore(flags);
goto do_native;
}
if (info->mm) { if (info->mm) {
flush->address_space = virt_to_phys(info->mm->pgd); flush->address_space = virt_to_phys(info->mm->pgd);
@ -173,6 +191,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
const struct flush_tlb_info *info) const struct flush_tlb_info *info)
{ {
int nr_bank = 0, max_gvas, gva_n; int nr_bank = 0, max_gvas, gva_n;
struct hv_flush_pcpu_ex **flush_pcpu;
struct hv_flush_pcpu_ex *flush; struct hv_flush_pcpu_ex *flush;
u64 status = U64_MAX; u64 status = U64_MAX;
unsigned long flags; unsigned long flags;
@ -187,7 +206,17 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
local_irq_save(flags); local_irq_save(flags);
flush = this_cpu_ptr(pcpu_flush_ex); flush_pcpu = this_cpu_ptr(pcpu_flush_ex);
if (unlikely(!*flush_pcpu))
*flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
flush = *flush_pcpu;
if (unlikely(!flush)) {
local_irq_restore(flags);
goto do_native;
}
if (info->mm) { if (info->mm) {
flush->address_space = virt_to_phys(info->mm->pgd); flush->address_space = virt_to_phys(info->mm->pgd);
@ -222,18 +251,18 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY; flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
status = hv_do_rep_hypercall( status = hv_do_rep_hypercall(
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
0, nr_bank + 2, flush, NULL); 0, nr_bank, flush, NULL);
} else if (info->end && } else if (info->end &&
((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) { ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
status = hv_do_rep_hypercall( status = hv_do_rep_hypercall(
HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
0, nr_bank + 2, flush, NULL); 0, nr_bank, flush, NULL);
} else { } else {
gva_n = fill_gva_list(flush->gva_list, nr_bank, gva_n = fill_gva_list(flush->gva_list, nr_bank,
info->start, info->end); info->start, info->end);
status = hv_do_rep_hypercall( status = hv_do_rep_hypercall(
HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX, HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
gva_n, nr_bank + 2, flush, NULL); gva_n, nr_bank, flush, NULL);
} }
local_irq_restore(flags); local_irq_restore(flags);
@ -266,7 +295,7 @@ void hyper_alloc_mmu(void)
return; return;
if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); pcpu_flush = alloc_percpu(struct hv_flush_pcpu *);
else else
pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *);
} }

View file

@ -62,8 +62,10 @@
#define new_len2 145f-144f #define new_len2 145f-144f
/* /*
* max without conditionals. Idea adapted from: * gas compatible max based on the idea from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
*
* The additional "-" is needed because gas uses a "true" value of -1.
*/ */
#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) #define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))

View file

@ -103,12 +103,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
alt_end_marker ":\n" alt_end_marker ":\n"
/* /*
* max without conditionals. Idea adapted from: * gas compatible max based on the idea from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
* *
* The additional "-" is needed because gas works with s32s. * The additional "-" is needed because gas uses a "true" value of -1.
*/ */
#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))" #define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
/* /*
* Pad the second replacement alternative with additional NOPs if it is * Pad the second replacement alternative with additional NOPs if it is

View file

@ -187,7 +187,6 @@ struct mca_msr_regs {
extern struct mce_vendor_flags mce_flags; extern struct mce_vendor_flags mce_flags;
extern struct mca_config mca_cfg;
extern struct mca_msr_regs msr_ops; extern struct mca_msr_regs msr_ops;
enum mce_notifier_prios { enum mce_notifier_prios {

View file

@ -126,13 +126,7 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
DEBUG_LOCKS_WARN_ON(preemptible()); DEBUG_LOCKS_WARN_ON(preemptible());
} }
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
{
int cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
cpumask_clear_cpu(cpu, mm_cpumask(mm));
}
static inline int init_new_context(struct task_struct *tsk, static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm) struct mm_struct *mm)

View file

@ -289,6 +289,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
* to this information. * to this information.
*/ */
extern u32 *hv_vp_index; extern u32 *hv_vp_index;
extern u32 hv_max_vp_index;
/** /**
* hv_cpu_number_to_vp_number() - Map CPU to VP. * hv_cpu_number_to_vp_number() - Map CPU to VP.

View file

@ -82,6 +82,13 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif #endif
/*
* If tlb_use_lazy_mode is true, then we try to avoid switching CR3 to point
* to init_mm when we switch to a kernel thread (e.g. the idle thread). If
* it's false, then we immediately switch CR3 when entering a kernel thread.
*/
DECLARE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
/* /*
* 6 because 6 should be plenty and struct tlb_state will fit in * 6 because 6 should be plenty and struct tlb_state will fit in
* two cache lines. * two cache lines.
@ -104,6 +111,23 @@ struct tlb_state {
u16 loaded_mm_asid; u16 loaded_mm_asid;
u16 next_asid; u16 next_asid;
/*
* We can be in one of several states:
*
* - Actively using an mm. Our CPU's bit will be set in
* mm_cpumask(loaded_mm) and is_lazy == false;
*
* - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
* will not be set in mm_cpumask(&init_mm) and is_lazy == false.
*
* - Lazily using a real mm. loaded_mm != &init_mm, our bit
* is set in mm_cpumask(loaded_mm), but is_lazy == true.
* We're heuristically guessing that the CR3 load we
* skipped more than makes up for the overhead added by
* lazy mode.
*/
bool is_lazy;
/* /*
* Access to this CR4 shadow and to H/W CR4 is protected by * Access to this CR4 shadow and to H/W CR4 is protected by
* disabling interrupts when modifying either one. * disabling interrupts when modifying either one.

View file

@ -573,11 +573,21 @@ static u32 bdx_deadline_rev(void)
return ~0U; return ~0U;
} }
static u32 skx_deadline_rev(void)
{
switch (boot_cpu_data.x86_mask) {
case 0x03: return 0x01000136;
case 0x04: return 0x02000014;
}
return ~0U;
}
static const struct x86_cpu_id deadline_match[] = { static const struct x86_cpu_id deadline_match[] = {
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev), DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev),
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020),
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev), DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev),
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_X, 0x02000014), DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev),
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22),
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20), DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20),
@ -600,7 +610,8 @@ static void apic_check_deadline_errata(void)
const struct x86_cpu_id *m; const struct x86_cpu_id *m;
u32 rev; u32 rev;
if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) ||
boot_cpu_has(X86_FEATURE_HYPERVISOR))
return; return;
m = x86_match_cpu(deadline_match); m = x86_match_cpu(deadline_match);

View file

@ -1,3 +1,6 @@
#ifndef __X86_MCE_INTERNAL_H__
#define __X86_MCE_INTERNAL_H__
#include <linux/device.h> #include <linux/device.h>
#include <asm/mce.h> #include <asm/mce.h>
@ -108,3 +111,7 @@ static inline void mce_work_trigger(void) { }
static inline void mce_register_injector_chain(struct notifier_block *nb) { } static inline void mce_register_injector_chain(struct notifier_block *nb) { }
static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
#endif #endif
extern struct mca_config mca_cfg;
#endif /* __X86_MCE_INTERNAL_H__ */

View file

@ -28,6 +28,8 @@
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/trace/irq_vectors.h> #include <asm/trace/irq_vectors.h>
#include "mce-internal.h"
#define NR_BLOCKS 5 #define NR_BLOCKS 5
#define THRESHOLD_MAX 0xFFF #define THRESHOLD_MAX 0xFFF
#define INT_TYPE_APIC 0x00020000 #define INT_TYPE_APIC 0x00020000

View file

@ -122,9 +122,6 @@ static bool __init check_loader_disabled_bsp(void)
bool *res = &dis_ucode_ldr; bool *res = &dis_ucode_ldr;
#endif #endif
if (!have_cpuid_p())
return *res;
/* /*
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
* completely accurate as xen pv guests don't see that CPUID bit set but * completely accurate as xen pv guests don't see that CPUID bit set but
@ -166,24 +163,36 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name)
void __init load_ucode_bsp(void) void __init load_ucode_bsp(void)
{ {
unsigned int cpuid_1_eax; unsigned int cpuid_1_eax;
bool intel = true;
if (check_loader_disabled_bsp()) if (!have_cpuid_p())
return; return;
cpuid_1_eax = native_cpuid_eax(1); cpuid_1_eax = native_cpuid_eax(1);
switch (x86_cpuid_vendor()) { switch (x86_cpuid_vendor()) {
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
if (x86_family(cpuid_1_eax) >= 6) if (x86_family(cpuid_1_eax) < 6)
load_ucode_intel_bsp(); return;
break; break;
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
if (x86_family(cpuid_1_eax) >= 0x10) if (x86_family(cpuid_1_eax) < 0x10)
load_ucode_amd_bsp(cpuid_1_eax); return;
intel = false;
break; break;
default: default:
break; return;
} }
if (check_loader_disabled_bsp())
return;
if (intel)
load_ucode_intel_bsp();
else
load_ucode_amd_bsp(cpuid_1_eax);
} }
static bool check_loader_disabled_ap(void) static bool check_loader_disabled_ap(void)

View file

@ -3,6 +3,15 @@
/* Kprobes and Optprobes common header */ /* Kprobes and Optprobes common header */
#include <asm/asm.h>
#ifdef CONFIG_FRAME_POINTER
# define SAVE_RBP_STRING " push %" _ASM_BP "\n" \
" mov %" _ASM_SP ", %" _ASM_BP "\n"
#else
# define SAVE_RBP_STRING " push %" _ASM_BP "\n"
#endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define SAVE_REGS_STRING \ #define SAVE_REGS_STRING \
/* Skip cs, ip, orig_ax. */ \ /* Skip cs, ip, orig_ax. */ \
@ -17,7 +26,7 @@
" pushq %r10\n" \ " pushq %r10\n" \
" pushq %r11\n" \ " pushq %r11\n" \
" pushq %rbx\n" \ " pushq %rbx\n" \
" pushq %rbp\n" \ SAVE_RBP_STRING \
" pushq %r12\n" \ " pushq %r12\n" \
" pushq %r13\n" \ " pushq %r13\n" \
" pushq %r14\n" \ " pushq %r14\n" \
@ -48,7 +57,7 @@
" pushl %es\n" \ " pushl %es\n" \
" pushl %ds\n" \ " pushl %ds\n" \
" pushl %eax\n" \ " pushl %eax\n" \
" pushl %ebp\n" \ SAVE_RBP_STRING \
" pushl %edi\n" \ " pushl %edi\n" \
" pushl %esi\n" \ " pushl %esi\n" \
" pushl %edx\n" \ " pushl %edx\n" \

View file

@ -1080,8 +1080,6 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
* raw stack chunk with redzones: * raw stack chunk with redzones:
*/ */
__memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr)); __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
regs->flags &= ~X86_EFLAGS_IF;
trace_hardirqs_off();
regs->ip = (unsigned long)(jp->entry); regs->ip = (unsigned long)(jp->entry);
/* /*

View file

@ -105,6 +105,10 @@ void __noreturn machine_real_restart(unsigned int type)
load_cr3(initial_page_table); load_cr3(initial_page_table);
#else #else
write_cr3(real_mode_header->trampoline_pgd); write_cr3(real_mode_header->trampoline_pgd);
/* Exiting long mode will fail if CR4.PCIDE is set. */
if (static_cpu_has(X86_FEATURE_PCID))
cr4_clear_bits(X86_CR4_PCIDE);
#endif #endif
/* Jump to the identity-mapped low memory code */ /* Jump to the identity-mapped low memory code */

View file

@ -44,7 +44,8 @@ static void unwind_dump(struct unwind_state *state)
state->stack_info.type, state->stack_info.next_sp, state->stack_info.type, state->stack_info.next_sp,
state->stack_mask, state->graph_idx); state->stack_mask, state->graph_idx);
for (sp = state->orig_sp; sp; sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { for (sp = PTR_ALIGN(state->orig_sp, sizeof(long)); sp;
sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
if (get_stack_info(sp, state->task, &stack_info, &visit_mask)) if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
break; break;
@ -174,6 +175,7 @@ static bool is_last_task_frame(struct unwind_state *state)
* This determines if the frame pointer actually contains an encoded pointer to * This determines if the frame pointer actually contains an encoded pointer to
* pt_regs on the stack. See ENCODE_FRAME_POINTER. * pt_regs on the stack. See ENCODE_FRAME_POINTER.
*/ */
#ifdef CONFIG_X86_64
static struct pt_regs *decode_frame_pointer(unsigned long *bp) static struct pt_regs *decode_frame_pointer(unsigned long *bp)
{ {
unsigned long regs = (unsigned long)bp; unsigned long regs = (unsigned long)bp;
@ -183,6 +185,23 @@ static struct pt_regs *decode_frame_pointer(unsigned long *bp)
return (struct pt_regs *)(regs & ~0x1); return (struct pt_regs *)(regs & ~0x1);
} }
#else
static struct pt_regs *decode_frame_pointer(unsigned long *bp)
{
unsigned long regs = (unsigned long)bp;
if (regs & 0x80000000)
return NULL;
return (struct pt_regs *)(regs | 0x80000000);
}
#endif
#ifdef CONFIG_X86_32
#define KERNEL_REGS_SIZE (sizeof(struct pt_regs) - 2*sizeof(long))
#else
#define KERNEL_REGS_SIZE (sizeof(struct pt_regs))
#endif
static bool update_stack_state(struct unwind_state *state, static bool update_stack_state(struct unwind_state *state,
unsigned long *next_bp) unsigned long *next_bp)
@ -202,7 +221,7 @@ static bool update_stack_state(struct unwind_state *state,
regs = decode_frame_pointer(next_bp); regs = decode_frame_pointer(next_bp);
if (regs) { if (regs) {
frame = (unsigned long *)regs; frame = (unsigned long *)regs;
len = regs_size(regs); len = KERNEL_REGS_SIZE;
state->got_irq = true; state->got_irq = true;
} else { } else {
frame = next_bp; frame = next_bp;
@ -226,6 +245,14 @@ static bool update_stack_state(struct unwind_state *state,
frame < prev_frame_end) frame < prev_frame_end)
return false; return false;
/*
* On 32-bit with user mode regs, make sure the last two regs are safe
* to access:
*/
if (IS_ENABLED(CONFIG_X86_32) && regs && user_mode(regs) &&
!on_stack(info, frame, len + 2*sizeof(long)))
return false;
/* Move state to the next frame: */ /* Move state to the next frame: */
if (regs) { if (regs) {
state->regs = regs; state->regs = regs;
@ -328,6 +355,13 @@ bad_address:
state->regs->sp < (unsigned long)task_pt_regs(state->task)) state->regs->sp < (unsigned long)task_pt_regs(state->task))
goto the_end; goto the_end;
/*
* There are some known frame pointer issues on 32-bit. Disable
* unwinder warnings on 32-bit until it gets objtool support.
*/
if (IS_ENABLED(CONFIG_X86_32))
goto the_end;
if (state->regs) { if (state->regs) {
printk_deferred_once(KERN_WARNING printk_deferred_once(KERN_WARNING
"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n", "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",

View file

@ -3973,13 +3973,6 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
static inline bool is_last_gpte(struct kvm_mmu *mmu, static inline bool is_last_gpte(struct kvm_mmu *mmu,
unsigned level, unsigned gpte) unsigned level, unsigned gpte)
{ {
/*
* PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
* iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
* level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
*/
gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
/* /*
* The RHS has bit 7 set iff level < mmu->last_nonleaf_level. * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
* If it is clear, there are no large pages at this level, so clear * If it is clear, there are no large pages at this level, so clear
@ -3987,6 +3980,13 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
*/ */
gpte &= level - mmu->last_nonleaf_level; gpte &= level - mmu->last_nonleaf_level;
/*
* PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
* iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
* level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
*/
gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
return gpte & PT_PAGE_SIZE_MASK; return gpte & PT_PAGE_SIZE_MASK;
} }
@ -4555,6 +4555,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
update_permission_bitmask(vcpu, context, true); update_permission_bitmask(vcpu, context, true);
update_pkru_bitmask(vcpu, context, true); update_pkru_bitmask(vcpu, context, true);
update_last_nonleaf_level(vcpu, context);
reset_rsvds_bits_mask_ept(vcpu, context, execonly); reset_rsvds_bits_mask_ept(vcpu, context, execonly);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
} }

View file

@ -334,10 +334,11 @@ retry_walk:
--walker->level; --walker->level;
index = PT_INDEX(addr, walker->level); index = PT_INDEX(addr, walker->level);
table_gfn = gpte_to_gfn(pte); table_gfn = gpte_to_gfn(pte);
offset = index * sizeof(pt_element_t); offset = index * sizeof(pt_element_t);
pte_gpa = gfn_to_gpa(table_gfn) + offset; pte_gpa = gfn_to_gpa(table_gfn) + offset;
BUG_ON(walker->level < 1);
walker->table_gfn[walker->level - 1] = table_gfn; walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa; walker->pte_gpa[walker->level - 1] = pte_gpa;

View file

@ -11297,7 +11297,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
/* Same as above - no reason to call set_cr4_guest_host_mask(). */ /* Same as above - no reason to call set_cr4_guest_host_mask(). */
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
kvm_set_cr4(vcpu, vmcs12->host_cr4); vmx_set_cr4(vcpu, vmcs12->host_cr4);
nested_ept_uninit_mmu_context(vcpu); nested_ept_uninit_mmu_context(vcpu);

View file

@ -1,5 +1,12 @@
# Kernel does not boot with instrumentation of tlb.c. # Kernel does not boot with instrumentation of tlb.c and mem_encrypt.c
KCOV_INSTRUMENT_tlb.o := n KCOV_INSTRUMENT_tlb.o := n
KCOV_INSTRUMENT_mem_encrypt.o := n
KASAN_SANITIZE_mem_encrypt.o := n
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_mem_encrypt.o = -pg
endif
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
pat.o pgtable.o physaddr.o setup_nx.o tlb.o pat.o pgtable.o physaddr.o setup_nx.o tlb.o

View file

@ -30,6 +30,8 @@
atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
DEFINE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
u16 *new_asid, bool *need_flush) u16 *new_asid, bool *need_flush)
{ {
@ -80,7 +82,7 @@ void leave_mm(int cpu)
return; return;
/* Warn if we're not lazy. */ /* Warn if we're not lazy. */
WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))); WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
switch_mm(NULL, &init_mm, NULL); switch_mm(NULL, &init_mm, NULL);
} }
@ -142,45 +144,24 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
__flush_tlb_all(); __flush_tlb_all();
} }
#endif #endif
this_cpu_write(cpu_tlbstate.is_lazy, false);
if (real_prev == next) { if (real_prev == next) {
VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
next->context.ctx_id); next->context.ctx_id);
if (cpumask_test_cpu(cpu, mm_cpumask(next))) {
/*
* There's nothing to do: we weren't lazy, and we
* aren't changing our mm. We don't need to flush
* anything, nor do we need to update CR3, CR4, or
* LDTR.
*/
return;
}
/* Resume remote flushes and then read tlb_gen. */
cpumask_set_cpu(cpu, mm_cpumask(next));
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) <
next_tlb_gen) {
/*
* Ideally, we'd have a flush_tlb() variant that
* takes the known CR3 value as input. This would
* be faster on Xen PV and on hypothetical CPUs
* on which INVPCID is fast.
*/
this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen,
next_tlb_gen);
write_cr3(build_cr3(next, prev_asid));
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
TLB_FLUSH_ALL);
}
/* /*
* We just exited lazy mode, which means that CR4 and/or LDTR * We don't currently support having a real mm loaded without
* may be stale. (Changes to the required CR4 and LDTR states * our cpu set in mm_cpumask(). We have all the bookkeeping
* are not reflected in tlb_gen.) * in place to figure out whether we would need to flush
* if our cpu were cleared in mm_cpumask(), but we don't
* currently use it.
*/ */
if (WARN_ON_ONCE(real_prev != &init_mm &&
!cpumask_test_cpu(cpu, mm_cpumask(next))))
cpumask_set_cpu(cpu, mm_cpumask(next));
return;
} else { } else {
u16 new_asid; u16 new_asid;
bool need_flush; bool need_flush;
@ -199,10 +180,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
} }
/* Stop remote flushes for the previous mm */ /* Stop remote flushes for the previous mm */
if (cpumask_test_cpu(cpu, mm_cpumask(real_prev))) VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
cpumask_clear_cpu(cpu, mm_cpumask(real_prev)); real_prev != &init_mm);
cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
VM_WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
/* /*
* Start remote flushes and then read tlb_gen. * Start remote flushes and then read tlb_gen.
@ -232,6 +212,37 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
switch_ldt(real_prev, next); switch_ldt(real_prev, next);
} }
/*
* enter_lazy_tlb() is a hint from the scheduler that we are entering a
* kernel thread or other context without an mm. Acceptable implementations
* include doing nothing whatsoever, switching to init_mm, or various clever
* lazy tricks to try to minimize TLB flushes.
*
* The scheduler reserves the right to call enter_lazy_tlb() several times
* in a row. It will notify us that we're going back to a real mm by
* calling switch_mm_irqs_off().
*/
void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
return;
if (static_branch_unlikely(&tlb_use_lazy_mode)) {
/*
* There's a significant optimization that may be possible
* here. We have accurate enough TLB flush tracking that we
* don't need to maintain coherence of TLB per se when we're
* lazy. We do, however, need to maintain coherence of
* paging-structure caches. We could, in principle, leave our
* old mm loaded and only switch to init_mm when
* tlb_remove_page() happens.
*/
this_cpu_write(cpu_tlbstate.is_lazy, true);
} else {
switch_mm(NULL, &init_mm, NULL);
}
}
/* /*
* Call this when reinitializing a CPU. It fixes the following potential * Call this when reinitializing a CPU. It fixes the following potential
* problems: * problems:
@ -303,16 +314,20 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
/* This code cannot presently handle being reentered. */ /* This code cannot presently handle being reentered. */
VM_WARN_ON(!irqs_disabled()); VM_WARN_ON(!irqs_disabled());
if (unlikely(loaded_mm == &init_mm))
return;
VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
loaded_mm->context.ctx_id); loaded_mm->context.ctx_id);
if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))) { if (this_cpu_read(cpu_tlbstate.is_lazy)) {
/* /*
* We're in lazy mode -- don't flush. We can get here on * We're in lazy mode. We need to at least flush our
* remote flushes due to races and on local flushes if a * paging-structure cache to avoid speculatively reading
* kernel thread coincidentally flushes the mm it's lazily * garbage into our TLB. Since switching to init_mm is barely
* still using. * slower than a minimal flush, just switch to init_mm.
*/ */
switch_mm_irqs_off(NULL, &init_mm, NULL);
return; return;
} }
@ -611,3 +626,57 @@ static int __init create_tlb_single_page_flush_ceiling(void)
return 0; return 0;
} }
late_initcall(create_tlb_single_page_flush_ceiling); late_initcall(create_tlb_single_page_flush_ceiling);
static ssize_t tlblazy_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
char buf[2];
buf[0] = static_branch_likely(&tlb_use_lazy_mode) ? '1' : '0';
buf[1] = '\n';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}
static ssize_t tlblazy_write_file(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
bool val;
if (kstrtobool_from_user(user_buf, count, &val))
return -EINVAL;
if (val)
static_branch_enable(&tlb_use_lazy_mode);
else
static_branch_disable(&tlb_use_lazy_mode);
return count;
}
static const struct file_operations fops_tlblazy = {
.read = tlblazy_read_file,
.write = tlblazy_write_file,
.llseek = default_llseek,
};
static int __init init_tlb_use_lazy_mode(void)
{
if (boot_cpu_has(X86_FEATURE_PCID)) {
/*
* Heuristic: with PCID on, switching to and from
* init_mm is reasonably fast, but remote flush IPIs
* as expensive as ever, so turn off lazy TLB mode.
*
* We can't do this in setup_pcid() because static keys
* haven't been initialized yet, and it would blow up
* badly.
*/
static_branch_disable(&tlb_use_lazy_mode);
}
debugfs_create_file("tlb_use_lazy_mode", S_IRUSR | S_IWUSR,
arch_debugfs_dir, NULL, &fops_tlblazy);
return 0;
}
late_initcall(init_tlb_use_lazy_mode);

View file

@ -93,11 +93,11 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
int rc; int rc;
rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE, rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
"x86/xen/hvm_guest:prepare", "x86/xen/guest:prepare",
cpu_up_prepare_cb, cpu_dead_cb); cpu_up_prepare_cb, cpu_dead_cb);
if (rc >= 0) { if (rc >= 0) {
rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"x86/xen/hvm_guest:online", "x86/xen/guest:online",
xen_cpu_up_online, NULL); xen_cpu_up_online, NULL);
if (rc < 0) if (rc < 0)
cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE); cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);

View file

@ -1239,8 +1239,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
*/ */
bmd->is_our_pages = map_data ? 0 : 1; bmd->is_our_pages = map_data ? 0 : 1;
memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
iov_iter_init(&bmd->iter, iter->type, bmd->iov, bmd->iter = *iter;
iter->nr_segs, iter->count); bmd->iter.iov = bmd->iov;
ret = -ENOMEM; ret = -ENOMEM;
bio = bio_kmalloc(gfp_mask, nr_pages); bio = bio_kmalloc(gfp_mask, nr_pages);
@ -1331,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
int ret, offset; int ret, offset;
struct iov_iter i; struct iov_iter i;
struct iovec iov; struct iovec iov;
struct bio_vec *bvec;
iov_for_each(iov, i, *iter) { iov_for_each(iov, i, *iter) {
unsigned long uaddr = (unsigned long) iov.iov_base; unsigned long uaddr = (unsigned long) iov.iov_base;
@ -1375,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
ret = get_user_pages_fast(uaddr, local_nr_pages, ret = get_user_pages_fast(uaddr, local_nr_pages,
(iter->type & WRITE) != WRITE, (iter->type & WRITE) != WRITE,
&pages[cur_page]); &pages[cur_page]);
if (ret < local_nr_pages) { if (unlikely(ret < local_nr_pages)) {
for (j = cur_page; j < page_limit; j++) {
if (!pages[j])
break;
put_page(pages[j]);
}
ret = -EFAULT; ret = -EFAULT;
goto out_unmap; goto out_unmap;
} }
@ -1383,6 +1389,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
offset = offset_in_page(uaddr); offset = offset_in_page(uaddr);
for (j = cur_page; j < page_limit; j++) { for (j = cur_page; j < page_limit; j++) {
unsigned int bytes = PAGE_SIZE - offset; unsigned int bytes = PAGE_SIZE - offset;
unsigned short prev_bi_vcnt = bio->bi_vcnt;
if (len <= 0) if (len <= 0)
break; break;
@ -1397,6 +1404,13 @@ struct bio *bio_map_user_iov(struct request_queue *q,
bytes) bytes)
break; break;
/*
* check if vector was merged with previous
* drop page reference if needed
*/
if (bio->bi_vcnt == prev_bi_vcnt)
put_page(pages[j]);
len -= bytes; len -= bytes;
offset = 0; offset = 0;
} }
@ -1423,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q,
return bio; return bio;
out_unmap: out_unmap:
for (j = 0; j < nr_pages; j++) { bio_for_each_segment_all(bvec, bio, j) {
if (!pages[j]) put_page(bvec->bv_page);
break;
put_page(pages[j]);
} }
out: out:
kfree(pages); kfree(pages);

View file

@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
int err; int err;
absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
buffer = kmalloc(absize, GFP_KERNEL); buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer) if (!buffer)
return -ENOMEM; return -ENOMEM;
@ -275,12 +275,14 @@ static int shash_async_finup(struct ahash_request *req)
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
{ {
struct scatterlist *sg = req->src;
unsigned int offset = sg->offset;
unsigned int nbytes = req->nbytes; unsigned int nbytes = req->nbytes;
struct scatterlist *sg;
unsigned int offset;
int err; int err;
if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { if (nbytes &&
(sg = req->src, offset = sg->offset,
nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
void *data; void *data;
data = kmap_atomic(sg_page(sg)); data = kmap_atomic(sg_page(sg));

View file

@ -426,14 +426,9 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
static int skcipher_walk_first(struct skcipher_walk *walk) static int skcipher_walk_first(struct skcipher_walk *walk)
{ {
walk->nbytes = 0;
if (WARN_ON_ONCE(in_irq())) if (WARN_ON_ONCE(in_irq()))
return -EDEADLK; return -EDEADLK;
if (unlikely(!walk->total))
return 0;
walk->buffer = NULL; walk->buffer = NULL;
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
int err = skcipher_copy_iv(walk); int err = skcipher_copy_iv(walk);
@ -452,10 +447,15 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
{ {
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
walk->total = req->cryptlen;
walk->nbytes = 0;
if (unlikely(!walk->total))
return 0;
scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst); scatterwalk_start(&walk->out, req->dst);
walk->total = req->cryptlen;
walk->iv = req->iv; walk->iv = req->iv;
walk->oiv = req->iv; walk->oiv = req->iv;
@ -509,6 +509,11 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
int err; int err;
walk->nbytes = 0;
if (unlikely(!walk->total))
return 0;
walk->flags &= ~SKCIPHER_WALK_PHYS; walk->flags &= ~SKCIPHER_WALK_PHYS;
scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->in, req->src);

View file

@ -554,8 +554,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
ctx->name[len - 1] = 0; ctx->name[len - 1] = 0;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
return -ENAMETOOLONG; err = -ENAMETOOLONG;
goto err_drop_spawn;
}
} else } else
goto err_drop_spawn; goto err_drop_spawn;

View file

@ -571,10 +571,9 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
* } * }
* } * }
* *
* Calling this function with index %2 return %-ENOENT and with index %3 * Calling this function with index %2 or index %3 return %-ENOENT. If the
* returns the last entry. If the property does not contain any more values * property does not contain any more values %-ENOENT is returned. The NULL
* %-ENODATA is returned. The NULL entry must be single integer and * entry must be single integer and preferably contain value %0.
* preferably contain value %0.
* *
* Return: %0 on success, negative error code on failure. * Return: %0 on success, negative error code on failure.
*/ */
@ -590,11 +589,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
data = acpi_device_data_of_node(fwnode); data = acpi_device_data_of_node(fwnode);
if (!data) if (!data)
return -EINVAL; return -ENOENT;
ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj); ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
if (ret) if (ret)
return ret; return ret == -EINVAL ? -ENOENT : -EINVAL;
/* /*
* The simplest case is when the value is a single reference. Just * The simplest case is when the value is a single reference. Just
@ -606,7 +605,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
ret = acpi_bus_get_device(obj->reference.handle, &device); ret = acpi_bus_get_device(obj->reference.handle, &device);
if (ret) if (ret)
return ret; return ret == -ENODEV ? -EINVAL : ret;
args->adev = device; args->adev = device;
args->nargs = 0; args->nargs = 0;
@ -622,8 +621,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
* The index argument is then used to determine which reference * The index argument is then used to determine which reference
* the caller wants (along with the arguments). * the caller wants (along with the arguments).
*/ */
if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count) if (obj->type != ACPI_TYPE_PACKAGE)
return -EPROTO; return -EINVAL;
if (index >= obj->package.count)
return -ENOENT;
element = obj->package.elements; element = obj->package.elements;
end = element + obj->package.count; end = element + obj->package.count;
@ -635,7 +636,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
ret = acpi_bus_get_device(element->reference.handle, ret = acpi_bus_get_device(element->reference.handle,
&device); &device);
if (ret) if (ret)
return -ENODEV; return -EINVAL;
nargs = 0; nargs = 0;
element++; element++;
@ -649,11 +650,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
else if (type == ACPI_TYPE_LOCAL_REFERENCE) else if (type == ACPI_TYPE_LOCAL_REFERENCE)
break; break;
else else
return -EPROTO; return -EINVAL;
} }
if (nargs > MAX_ACPI_REFERENCE_ARGS) if (nargs > MAX_ACPI_REFERENCE_ARGS)
return -EPROTO; return -EINVAL;
if (idx == index) { if (idx == index) {
args->adev = device; args->adev = device;
@ -670,13 +671,13 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -ENOENT; return -ENOENT;
element++; element++;
} else { } else {
return -EPROTO; return -EINVAL;
} }
idx++; idx++;
} }
return -ENODATA; return -ENOENT;
} }
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);

View file

@ -2582,6 +2582,48 @@ static bool binder_proc_transaction(struct binder_transaction *t,
return true; return true;
} }
/**
* binder_get_node_refs_for_txn() - Get required refs on node for txn
* @node: struct binder_node for which to get refs
* @proc: returns @node->proc if valid
* @error: if no @proc then returns BR_DEAD_REPLY
*
* User-space normally keeps the node alive when creating a transaction
* since it has a reference to the target. The local strong ref keeps it
* alive if the sending process dies before the target process processes
* the transaction. If the source process is malicious or has a reference
* counting bug, relying on the local strong ref can fail.
*
* Since user-space can cause the local strong ref to go away, we also take
* a tmpref on the node to ensure it survives while we are constructing
* the transaction. We also need a tmpref on the proc while we are
* constructing the transaction, so we take that here as well.
*
* Return: The target_node with refs taken or NULL if no @node->proc is NULL.
* Also sets @proc if valid. If the @node->proc is NULL indicating that the
* target proc has died, @error is set to BR_DEAD_REPLY
*/
static struct binder_node *binder_get_node_refs_for_txn(
struct binder_node *node,
struct binder_proc **procp,
uint32_t *error)
{
struct binder_node *target_node = NULL;
binder_node_inner_lock(node);
if (node->proc) {
target_node = node;
binder_inc_node_nilocked(node, 1, 0, NULL);
binder_inc_node_tmpref_ilocked(node);
node->proc->tmp_ref++;
*procp = node->proc;
} else
*error = BR_DEAD_REPLY;
binder_node_inner_unlock(node);
return target_node;
}
static void binder_transaction(struct binder_proc *proc, static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread, struct binder_thread *thread,
struct binder_transaction_data *tr, int reply, struct binder_transaction_data *tr, int reply,
@ -2685,43 +2727,35 @@ static void binder_transaction(struct binder_proc *proc,
ref = binder_get_ref_olocked(proc, tr->target.handle, ref = binder_get_ref_olocked(proc, tr->target.handle,
true); true);
if (ref) { if (ref) {
binder_inc_node(ref->node, 1, 0, NULL); target_node = binder_get_node_refs_for_txn(
target_node = ref->node; ref->node, &target_proc,
&return_error);
} else {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
} }
binder_proc_unlock(proc); binder_proc_unlock(proc);
if (target_node == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_invalid_target_handle;
}
} else { } else {
mutex_lock(&context->context_mgr_node_lock); mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node; target_node = context->binder_context_mgr_node;
if (target_node == NULL) { if (target_node)
target_node = binder_get_node_refs_for_txn(
target_node, &target_proc,
&return_error);
else
return_error = BR_DEAD_REPLY; return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
return_error_line = __LINE__;
goto err_no_context_mgr_node;
}
binder_inc_node(target_node, 1, 0, NULL);
mutex_unlock(&context->context_mgr_node_lock); mutex_unlock(&context->context_mgr_node_lock);
} }
e->to_node = target_node->debug_id; if (!target_node) {
binder_node_lock(target_node); /*
target_proc = target_node->proc; * return_error is set above
if (target_proc == NULL) { */
binder_node_unlock(target_node); return_error_param = -EINVAL;
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__; return_error_line = __LINE__;
goto err_dead_binder; goto err_dead_binder;
} }
binder_inner_proc_lock(target_proc); e->to_node = target_node->debug_id;
target_proc->tmp_ref++;
binder_inner_proc_unlock(target_proc);
binder_node_unlock(target_node);
if (security_binder_transaction(proc->tsk, if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) { target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY; return_error = BR_FAILED_REPLY;
@ -3071,6 +3105,8 @@ static void binder_transaction(struct binder_proc *proc,
if (target_thread) if (target_thread)
binder_thread_dec_tmpref(target_thread); binder_thread_dec_tmpref(target_thread);
binder_proc_dec_tmpref(target_proc); binder_proc_dec_tmpref(target_proc);
if (target_node)
binder_dec_node_tmpref(target_node);
/* /*
* write barrier to synchronize with initialization * write barrier to synchronize with initialization
* of log entry * of log entry
@ -3090,6 +3126,8 @@ err_bad_parent:
err_copy_data_failed: err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer); trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp); binder_transaction_buffer_release(target_proc, t->buffer, offp);
if (target_node)
binder_dec_node_tmpref(target_node);
target_node = NULL; target_node = NULL;
t->buffer->transaction = NULL; t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer); binder_alloc_free_buf(&target_proc->alloc, t->buffer);
@ -3104,13 +3142,14 @@ err_bad_call_stack:
err_empty_call_stack: err_empty_call_stack:
err_dead_binder: err_dead_binder:
err_invalid_target_handle: err_invalid_target_handle:
err_no_context_mgr_node:
if (target_thread) if (target_thread)
binder_thread_dec_tmpref(target_thread); binder_thread_dec_tmpref(target_thread);
if (target_proc) if (target_proc)
binder_proc_dec_tmpref(target_proc); binder_proc_dec_tmpref(target_proc);
if (target_node) if (target_node) {
binder_dec_node(target_node, 1, 0); binder_dec_node(target_node, 1, 0);
binder_dec_node_tmpref(target_node);
}
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",

View file

@ -27,13 +27,21 @@ static struct bus_type node_subsys = {
static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
{ {
ssize_t n;
cpumask_var_t mask;
struct node *node_dev = to_node(dev); struct node *node_dev = to_node(dev);
const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
return cpumap_print_to_pagebuf(list, buf, mask); if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return 0;
cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
n = cpumap_print_to_pagebuf(list, buf, mask);
free_cpumask_var(mask);
return n;
} }
static inline ssize_t node_read_cpumask(struct device *dev, static inline ssize_t node_read_cpumask(struct device *dev,

View file

@ -21,6 +21,7 @@
#include <linux/phy.h> #include <linux/phy.h>
struct property_set { struct property_set {
struct device *dev;
struct fwnode_handle fwnode; struct fwnode_handle fwnode;
const struct property_entry *properties; const struct property_entry *properties;
}; };
@ -682,6 +683,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string);
* Caller is responsible to call fwnode_handle_put() on the returned * Caller is responsible to call fwnode_handle_put() on the returned
* args->fwnode pointer. * args->fwnode pointer.
* *
* Returns: %0 on success
* %-ENOENT when the index is out of bounds, the index has an empty
* reference or the property was not found
* %-EINVAL on parse error
*/ */
int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop, const char *prop, const char *nargs_prop,
@ -891,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset)
void device_remove_properties(struct device *dev) void device_remove_properties(struct device *dev)
{ {
struct fwnode_handle *fwnode; struct fwnode_handle *fwnode;
struct property_set *pset;
fwnode = dev_fwnode(dev); fwnode = dev_fwnode(dev);
if (!fwnode) if (!fwnode)
@ -900,16 +906,16 @@ void device_remove_properties(struct device *dev)
* the pset. If there is no real firmware node (ACPI/DT) primary * the pset. If there is no real firmware node (ACPI/DT) primary
* will hold the pset. * will hold the pset.
*/ */
if (is_pset_node(fwnode)) { pset = to_pset_node(fwnode);
if (pset) {
set_primary_fwnode(dev, NULL); set_primary_fwnode(dev, NULL);
pset_free_set(to_pset_node(fwnode));
} else { } else {
fwnode = fwnode->secondary; pset = to_pset_node(fwnode->secondary);
if (!IS_ERR(fwnode) && is_pset_node(fwnode)) { if (pset && dev == pset->dev)
set_secondary_fwnode(dev, NULL); set_secondary_fwnode(dev, NULL);
pset_free_set(to_pset_node(fwnode));
}
} }
if (pset && dev == pset->dev)
pset_free_set(pset);
} }
EXPORT_SYMBOL_GPL(device_remove_properties); EXPORT_SYMBOL_GPL(device_remove_properties);
@ -938,6 +944,7 @@ int device_add_properties(struct device *dev,
p->fwnode.ops = &pset_fwnode_ops; p->fwnode.ops = &pset_fwnode_ops;
set_secondary_fwnode(dev, &p->fwnode); set_secondary_fwnode(dev, &p->fwnode);
p->dev = dev;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(device_add_properties); EXPORT_SYMBOL_GPL(device_add_properties);

View file

@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx {
/* The crypto framework makes it hard to avoid this global. */ /* The crypto framework makes it hard to avoid this global. */
static struct device *artpec6_crypto_dev; static struct device *artpec6_crypto_dev;
static struct dentry *dbgfs_root;
#ifdef CONFIG_FAULT_INJECTION #ifdef CONFIG_FAULT_INJECTION
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
@ -2984,6 +2982,8 @@ struct dbgfs_u32 {
char *desc; char *desc;
}; };
static struct dentry *dbgfs_root;
static void artpec6_crypto_init_debugfs(void) static void artpec6_crypto_init_debugfs(void)
{ {
dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);

View file

@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
{ {
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct scatterlist sg[1], *tsg; struct scatterlist sg[1], *tsg;
int err = 0, len = 0, reg, ncp; int err = 0, len = 0, reg, ncp = 0;
unsigned int i; unsigned int i;
const u32 *buffer = (const u32 *)rctx->buffer; u32 *buffer = (void *)rctx->buffer;
rctx->sg = hdev->req->src; rctx->sg = hdev->req->src;
rctx->total = hdev->req->nbytes; rctx->total = hdev->req->nbytes;
@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
reg |= HASH_CR_DMAA; reg |= HASH_CR_DMAA;
stm32_hash_write(hdev, HASH_CR, reg); stm32_hash_write(hdev, HASH_CR, reg);
for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++) if (ncp) {
stm32_hash_write(hdev, HASH_DIN, buffer[i]); memset(buffer + ncp, 0,
DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
stm32_hash_set_nblw(hdev, ncp); writesl(hdev->io_base + HASH_DIN, buffer,
DIV_ROUND_UP(ncp, sizeof(u32)));
}
stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32)));
reg = stm32_hash_read(hdev, HASH_STR); reg = stm32_hash_read(hdev, HASH_STR);
reg |= HASH_STR_DCAL; reg |= HASH_STR_DCAL;
stm32_hash_write(hdev, HASH_STR, reg); stm32_hash_write(hdev, HASH_STR, reg);

View file

@ -383,7 +383,7 @@ err_put_fd:
return err; return err;
} }
static void sync_fill_fence_info(struct dma_fence *fence, static int sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info) struct sync_fence_info *info)
{ {
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence,
test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
ktime_to_ns(fence->timestamp) : ktime_to_ns(fence->timestamp) :
ktime_set(0, 0); ktime_set(0, 0);
return info->status;
} }
static long sync_file_ioctl_fence_info(struct sync_file *sync_file, static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
* sync_fence_info and return the actual number of fences on * sync_fence_info and return the actual number of fences on
* info->num_fences. * info->num_fences.
*/ */
if (!info.num_fences) if (!info.num_fences) {
info.status = dma_fence_is_signaled(sync_file->fence);
goto no_fences; goto no_fences;
} else {
info.status = 1;
}
if (info.num_fences < num_fences) if (info.num_fences < num_fences)
return -EINVAL; return -EINVAL;
@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
if (!fence_info) if (!fence_info)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < num_fences; i++) for (i = 0; i < num_fences; i++) {
sync_fill_fence_info(fences[i], &fence_info[i]); int status = sync_fill_fence_info(fences[i], &fence_info[i]);
info.status = info.status <= 0 ? info.status : status;
}
if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
size)) { size)) {
@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
no_fences: no_fences:
sync_file_get_name(sync_file, info.name, sizeof(info.name)); sync_file_get_name(sync_file, info.name, sizeof(info.name));
info.status = dma_fence_is_signaled(sync_file->fence);
info.num_fences = num_fences; info.num_fences = num_fences;
if (copy_to_user((void __user *)arg, &info, sizeof(info))) if (copy_to_user((void __user *)arg, &info, sizeof(info)))

View file

@ -212,11 +212,12 @@ struct msgdma_device {
static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
{ {
struct msgdma_sw_desc *desc; struct msgdma_sw_desc *desc;
unsigned long flags;
spin_lock_bh(&mdev->lock); spin_lock_irqsave(&mdev->lock, flags);
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
list_del(&desc->node); list_del(&desc->node);
spin_unlock_bh(&mdev->lock); spin_unlock_irqrestore(&mdev->lock, flags);
INIT_LIST_HEAD(&desc->tx_list); INIT_LIST_HEAD(&desc->tx_list);
@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
struct msgdma_device *mdev = to_mdev(tx->chan); struct msgdma_device *mdev = to_mdev(tx->chan);
struct msgdma_sw_desc *new; struct msgdma_sw_desc *new;
dma_cookie_t cookie; dma_cookie_t cookie;
unsigned long flags;
new = tx_to_desc(tx); new = tx_to_desc(tx);
spin_lock_bh(&mdev->lock); spin_lock_irqsave(&mdev->lock, flags);
cookie = dma_cookie_assign(tx); cookie = dma_cookie_assign(tx);
list_add_tail(&new->node, &mdev->pending_list); list_add_tail(&new->node, &mdev->pending_list);
spin_unlock_bh(&mdev->lock); spin_unlock_irqrestore(&mdev->lock, flags);
return cookie; return cookie;
} }
@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
struct msgdma_extended_desc *desc; struct msgdma_extended_desc *desc;
size_t copy; size_t copy;
u32 desc_cnt; u32 desc_cnt;
unsigned long irqflags;
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
spin_lock_bh(&mdev->lock); spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) { if (desc_cnt > mdev->desc_free_cnt) {
spin_unlock_bh(&mdev->lock); spin_unlock_bh(&mdev->lock);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL; return NULL;
} }
mdev->desc_free_cnt -= desc_cnt; mdev->desc_free_cnt -= desc_cnt;
spin_unlock_bh(&mdev->lock); spin_unlock_irqrestore(&mdev->lock, irqflags);
do { do {
/* Allocate and populate the descriptor */ /* Allocate and populate the descriptor */
@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
u32 desc_cnt = 0, i; u32 desc_cnt = 0, i;
struct scatterlist *sg; struct scatterlist *sg;
u32 stride; u32 stride;
unsigned long irqflags;
for_each_sg(sgl, sg, sg_len, i) for_each_sg(sgl, sg, sg_len, i)
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
spin_lock_bh(&mdev->lock); spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) { if (desc_cnt > mdev->desc_free_cnt) {
spin_unlock_bh(&mdev->lock); spin_unlock_bh(&mdev->lock);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL; return NULL;
} }
mdev->desc_free_cnt -= desc_cnt; mdev->desc_free_cnt -= desc_cnt;
spin_unlock_bh(&mdev->lock); spin_unlock_irqrestore(&mdev->lock, irqflags);
avail = sg_dma_len(sgl); avail = sg_dma_len(sgl);
@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
static void msgdma_issue_pending(struct dma_chan *chan) static void msgdma_issue_pending(struct dma_chan *chan)
{ {
struct msgdma_device *mdev = to_mdev(chan); struct msgdma_device *mdev = to_mdev(chan);
unsigned long flags;
spin_lock_bh(&mdev->lock); spin_lock_irqsave(&mdev->lock, flags);
msgdma_start_transfer(mdev); msgdma_start_transfer(mdev);
spin_unlock_bh(&mdev->lock); spin_unlock_irqrestore(&mdev->lock, flags);
} }
/** /**
@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
static void msgdma_free_chan_resources(struct dma_chan *dchan) static void msgdma_free_chan_resources(struct dma_chan *dchan)
{ {
struct msgdma_device *mdev = to_mdev(dchan); struct msgdma_device *mdev = to_mdev(dchan);
unsigned long flags;
spin_lock_bh(&mdev->lock); spin_lock_irqsave(&mdev->lock, flags);
msgdma_free_descriptors(mdev); msgdma_free_descriptors(mdev);
spin_unlock_bh(&mdev->lock); spin_unlock_irqrestore(&mdev->lock, flags);
kfree(mdev->sw_desq); kfree(mdev->sw_desq);
} }
@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
u32 count; u32 count;
u32 __maybe_unused size; u32 __maybe_unused size;
u32 __maybe_unused status; u32 __maybe_unused status;
unsigned long flags;
spin_lock(&mdev->lock); spin_lock_irqsave(&mdev->lock, flags);
/* Read number of responses that are available */ /* Read number of responses that are available */
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data)
* bits. So we need to just drop these values. * bits. So we need to just drop these values.
*/ */
size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
status = ioread32(mdev->resp - MSGDMA_RESP_STATUS); status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
msgdma_complete_descriptor(mdev); msgdma_complete_descriptor(mdev);
msgdma_chan_desc_cleanup(mdev); msgdma_chan_desc_cleanup(mdev);
} }
spin_unlock(&mdev->lock); spin_unlock_irqrestore(&mdev->lock, flags);
} }
/** /**

View file

@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
struct edma_desc *edesc; struct edma_desc *edesc;
struct device *dev = chan->device->dev; struct device *dev = chan->device->dev;
struct edma_chan *echan = to_edma_chan(chan); struct edma_chan *echan = to_edma_chan(chan);
unsigned int width, pset_len; unsigned int width, pset_len, array_size;
if (unlikely(!echan || !len)) if (unlikely(!echan || !len))
return NULL; return NULL;
/* Align the array size (acnt block) with the transfer properties */
switch (__ffs((src | dest | len))) {
case 0:
array_size = SZ_32K - 1;
break;
case 1:
array_size = SZ_32K - 2;
break;
default:
array_size = SZ_32K - 4;
break;
}
if (len < SZ_64K) { if (len < SZ_64K) {
/* /*
* Transfer size less than 64K can be handled with one paRAM * Transfer size less than 64K can be handled with one paRAM
@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
* When the full_length is multibple of 32767 one slot can be * When the full_length is multibple of 32767 one slot can be
* used to complete the transfer. * used to complete the transfer.
*/ */
width = SZ_32K - 1; width = array_size;
pset_len = rounddown(len, width); pset_len = rounddown(len, width);
/* One slot is enough for lengths multiple of (SZ_32K -1) */ /* One slot is enough for lengths multiple of (SZ_32K -1) */
if (unlikely(pset_len == len)) if (unlikely(pset_len == len))
@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
} }
dest += pset_len; dest += pset_len;
src += pset_len; src += pset_len;
pset_len = width = len % (SZ_32K - 1); pset_len = width = len % array_size;
ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
width, pset_len, DMA_MEM_TO_MEM); width, pset_len, DMA_MEM_TO_MEM);

View file

@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
mutex_lock(&xbar->mutex); mutex_lock(&xbar->mutex);
map->xbar_out = find_first_zero_bit(xbar->dma_inuse, map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
xbar->dma_requests); xbar->dma_requests);
mutex_unlock(&xbar->mutex);
if (map->xbar_out == xbar->dma_requests) { if (map->xbar_out == xbar->dma_requests) {
mutex_unlock(&xbar->mutex);
dev_err(&pdev->dev, "Run out of free DMA requests\n"); dev_err(&pdev->dev, "Run out of free DMA requests\n");
kfree(map); kfree(map);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
set_bit(map->xbar_out, xbar->dma_inuse); set_bit(map->xbar_out, xbar->dma_inuse);
mutex_unlock(&xbar->mutex);
map->xbar_in = (u16)dma_spec->args[0]; map->xbar_in = (u16)dma_spec->args[0];

View file

@ -453,7 +453,8 @@ config GPIO_TS4800
config GPIO_THUNDERX config GPIO_THUNDERX
tristate "Cavium ThunderX/OCTEON-TX GPIO" tristate "Cavium ThunderX/OCTEON-TX GPIO"
depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY depends on PCI_MSI
select IRQ_DOMAIN_HIERARCHY
select IRQ_FASTEOI_HIERARCHY_HANDLERS select IRQ_FASTEOI_HIERARCHY_HANDLERS
help help
Say yes here to support the on-chip GPIO lines on the ThunderX Say yes here to support the on-chip GPIO lines on the ThunderX

View file

@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
irq_set_handler_locked(d, handle_level_irq); irq_set_handler_locked(d, handle_level_irq);
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
irq_set_handler_locked(d, handle_edge_irq); /*
* Edge IRQs are already cleared/acked in irq_handler and
* not need to be masked, as result handle_edge_irq()
* logic is excessed here and may cause lose of interrupts.
* So just use handle_simple_irq.
*/
irq_set_handler_locked(d, handle_simple_irq);
return 0; return 0;
@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
{ {
void __iomem *isr_reg = NULL; void __iomem *isr_reg = NULL;
u32 isr; u32 enabled, isr, level_mask;
unsigned int bit; unsigned int bit;
struct gpio_bank *bank = gpiobank; struct gpio_bank *bank = gpiobank;
unsigned long wa_lock_flags; unsigned long wa_lock_flags;
@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
pm_runtime_get_sync(bank->chip.parent); pm_runtime_get_sync(bank->chip.parent);
while (1) { while (1) {
u32 isr_saved, level_mask = 0;
u32 enabled;
raw_spin_lock_irqsave(&bank->lock, lock_flags); raw_spin_lock_irqsave(&bank->lock, lock_flags);
enabled = omap_get_gpio_irqbank_mask(bank); enabled = omap_get_gpio_irqbank_mask(bank);
isr_saved = isr = readl_relaxed(isr_reg) & enabled; isr = readl_relaxed(isr_reg) & enabled;
if (bank->level_mask) if (bank->level_mask)
level_mask = bank->level_mask & enabled; level_mask = bank->level_mask & enabled;
else
level_mask = 0;
/* clear edge sensitive interrupts before handler(s) are /* clear edge sensitive interrupts before handler(s) are
called so that we don't miss any interrupt occurred while called so that we don't miss any interrupt occurred while
executing them */ executing them */
omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask); if (isr & ~level_mask)
omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask); omap_clear_gpio_irqbank(bank, isr & ~level_mask);
omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
raw_spin_unlock_irqrestore(&bank->lock, lock_flags); raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
/*---------------------------------------------------------------------*/ /*---------------------------------------------------------------------*/
static void __init omap_gpio_show_rev(struct gpio_bank *bank) static void omap_gpio_show_rev(struct gpio_bank *bank)
{ {
static bool called; static bool called;
u32 rev; u32 rev;

View file

@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
if (pin <= 255) { if (pin <= 255) {
char ev_name[5]; char ev_name[5];
sprintf(ev_name, "_%c%02X", sprintf(ev_name, "_%c%02hhX",
agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
pin); pin);
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))

View file

@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
placement.busy_placement = &placements; placement.busy_placement = &placements;
placements.fpfn = 0; placements.fpfn = 0;
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
if (unlikely(r)) if (unlikely(r))

View file

@ -2960,6 +2960,7 @@ out:
drm_modeset_backoff(&ctx); drm_modeset_backoff(&ctx);
} }
drm_atomic_state_put(state);
drm_modeset_drop_locks(&ctx); drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx); drm_modeset_acquire_fini(&ctx);

View file

@ -3013,10 +3013,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct drm_i915_gem_request *request) static void nop_submit_request(struct drm_i915_gem_request *request)
{ {
unsigned long flags;
GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error)); GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
dma_fence_set_error(&request->fence, -EIO); dma_fence_set_error(&request->fence, -EIO);
i915_gem_request_submit(request);
spin_lock_irqsave(&request->engine->timeline->lock, flags);
__i915_gem_request_submit(request);
intel_engine_init_global_seqno(request->engine, request->global_seqno); intel_engine_init_global_seqno(request->engine, request->global_seqno);
spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
} }
static void engine_set_wedged(struct intel_engine_cs *engine) static void engine_set_wedged(struct intel_engine_cs *engine)

View file

@ -1240,7 +1240,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv,
{ {
enum port port; enum port port;
if (!HAS_DDI(dev_priv)) if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
return; return;
if (!dev_priv->vbt.child_dev_num) if (!dev_priv->vbt.child_dev_num)

View file

@ -74,7 +74,7 @@
#define I9XX_CSC_COEFF_1_0 \ #define I9XX_CSC_COEFF_1_0 \
((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
static bool crtc_state_is_legacy(struct drm_crtc_state *state) static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
{ {
return !state->degamma_lut && return !state->degamma_lut &&
!state->ctm && !state->ctm &&
@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
} }
mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
if (!crtc_state_is_legacy(state)) { if (!crtc_state_is_legacy_gamma(state)) {
mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
(state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
} }
@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
struct intel_crtc_state *intel_state = to_intel_crtc_state(state); struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(state->crtc)->pipe; enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
if (crtc_state_is_legacy(state)) { if (crtc_state_is_legacy_gamma(state)) {
haswell_load_luts(state); haswell_load_luts(state);
return; return;
} }
@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state)
glk_load_degamma_lut(state); glk_load_degamma_lut(state);
if (crtc_state_is_legacy(state)) { if (crtc_state_is_legacy_gamma(state)) {
haswell_load_luts(state); haswell_load_luts(state);
return; return;
} }
@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
uint32_t i, lut_size; uint32_t i, lut_size;
uint32_t word0, word1; uint32_t word0, word1;
if (crtc_state_is_legacy(state)) { if (crtc_state_is_legacy_gamma(state)) {
/* Turn off degamma/gamma on CGM block. */ /* Turn off degamma/gamma on CGM block. */
I915_WRITE(CGM_PIPE_MODE(pipe), I915_WRITE(CGM_PIPE_MODE(pipe),
(state->ctm ? CGM_PIPE_MODE_CSC : 0)); (state->ctm ? CGM_PIPE_MODE_CSC : 0));
@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc,
return 0; return 0;
/* /*
* We also allow no degamma lut and a gamma lut at the legacy * We also allow no degamma lut/ctm and a gamma lut at the legacy
* size (256 entries). * size (256 entries).
*/ */
if (!crtc_state->degamma_lut && if (crtc_state_is_legacy_gamma(crtc_state))
crtc_state->gamma_lut &&
crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH)
return 0; return 0;
return -EINVAL; return -EINVAL;

View file

@ -10245,13 +10245,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; enum transcoder cpu_transcoder;
struct drm_display_mode *mode; struct drm_display_mode *mode;
struct intel_crtc_state *pipe_config; struct intel_crtc_state *pipe_config;
int htot = I915_READ(HTOTAL(cpu_transcoder)); u32 htot, hsync, vtot, vsync;
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
int vsync = I915_READ(VSYNC(cpu_transcoder));
enum pipe pipe = intel_crtc->pipe; enum pipe pipe = intel_crtc->pipe;
mode = kzalloc(sizeof(*mode), GFP_KERNEL); mode = kzalloc(sizeof(*mode), GFP_KERNEL);
@ -10279,6 +10276,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
i9xx_crtc_clock_get(intel_crtc, pipe_config); i9xx_crtc_clock_get(intel_crtc, pipe_config);
mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
cpu_transcoder = pipe_config->cpu_transcoder;
htot = I915_READ(HTOTAL(cpu_transcoder));
hsync = I915_READ(HSYNC(cpu_transcoder));
vtot = I915_READ(VTOTAL(cpu_transcoder));
vsync = I915_READ(VSYNC(cpu_transcoder));
mode->hdisplay = (htot & 0xffff) + 1; mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1; mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1; mode->hsync_start = (hsync & 0xffff) + 1;

View file

@ -2307,8 +2307,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp); I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg); POSTING_READ(pp_ctrl_reg);
intel_dp->panel_power_off_time = ktime_get_boottime();
wait_panel_off(intel_dp); wait_panel_off(intel_dp);
intel_dp->panel_power_off_time = ktime_get_boottime();
/* We got a reference when we enabled the VDD. */ /* We got a reference when we enabled the VDD. */
intel_display_power_put(dev_priv, intel_dp->aux_power_domain); intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
* seems sufficient to avoid this problem. * seems sufficient to avoid this problem.
*/ */
if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10); vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
vbt.t11_t12); vbt.t11_t12);
} }

View file

@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
{ {
enum i915_power_well_id id = power_well->id; enum i915_power_well_id id = power_well->id;
bool wait_fuses = power_well->hsw.has_fuses; bool wait_fuses = power_well->hsw.has_fuses;
enum skl_power_gate pg; enum skl_power_gate uninitialized_var(pg);
u32 val; u32 val;
if (wait_fuses) { if (wait_fuses) {

View file

@ -248,7 +248,7 @@ disable_clks:
clk_disable_unprepare(ahb_clk); clk_disable_unprepare(ahb_clk);
disable_gdsc: disable_gdsc:
regulator_disable(gdsc_reg); regulator_disable(gdsc_reg);
pm_runtime_put_autosuspend(dev); pm_runtime_put_sync(dev);
put_clk: put_clk:
clk_put(ahb_clk); clk_put(ahb_clk);
put_gdsc: put_gdsc:

View file

@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
.caps = MDP_LM_CAP_WB }, .caps = MDP_LM_CAP_WB },
}, },
.nb_stages = 5, .nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
}, },
.dspp = { .dspp = {
.count = 3, .count = 3,

View file

@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
pm_runtime_put_autosuspend(&pdev->dev);
set_cursor: set_cursor:
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) { if (ret) {

View file

@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
struct dma_fence *fence; struct dma_fence *fence;
int i, ret; int i, ret;
if (!exclusive) {
/* NOTE: _reserve_shared() must happen before _add_shared_fence(),
* which makes this a slightly strange place to call it. OTOH this
* is a convenient can-fail point to hook it in. (And similar to
* how etnaviv and nouveau handle this.)
*/
ret = reservation_object_reserve_shared(msm_obj->resv);
if (ret)
return ret;
}
fobj = reservation_object_get_list(msm_obj->resv); fobj = reservation_object_get_list(msm_obj->resv);
if (!fobj || (fobj->shared_count == 0)) { if (!fobj || (fobj->shared_count == 0)) {
fence = reservation_object_get_excl(msm_obj->resv); fence = reservation_object_get_excl(msm_obj->resv);
@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
} }
vaddr = msm_gem_get_vaddr(obj); vaddr = msm_gem_get_vaddr(obj);
if (!vaddr) { if (IS_ERR(vaddr)) {
msm_gem_put_iova(obj, aspace); msm_gem_put_iova(obj, aspace);
drm_gem_object_unreference(obj); drm_gem_object_unreference(obj);
return ERR_PTR(-ENOMEM); return ERR_CAST(vaddr);
} }
if (bo) if (bo)

View file

@ -221,7 +221,7 @@ fail:
return ret; return ret;
} }
static int submit_fence_sync(struct msm_gem_submit *submit) static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
{ {
int i, ret = 0; int i, ret = 0;
@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit)
struct msm_gem_object *msm_obj = submit->bos[i].obj; struct msm_gem_object *msm_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
if (!write) {
/* NOTE: _reserve_shared() must happen before
* _add_shared_fence(), which makes this a slightly
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
ret = reservation_object_reserve_shared(msm_obj->resv);
if (ret)
return ret;
}
if (no_implicit)
continue;
ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
if (ret) if (ret)
break; break;
@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret) if (ret)
goto out; goto out;
if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
ret = submit_fence_sync(submit); if (ret)
if (ret) goto out;
goto out;
}
ret = submit_pin_objects(submit); ret = submit_pin_objects(submit);
if (ret) if (ret)

View file

@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_gem_put_iova(gpu->rb->bo, gpu->aspace); msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
msm_ringbuffer_destroy(gpu->rb); msm_ringbuffer_destroy(gpu->rb);
} }
if (gpu->aspace) {
if (!IS_ERR_OR_NULL(gpu->aspace)) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
NULL, 0); NULL, 0);
msm_gem_address_space_put(gpu->aspace); msm_gem_address_space_put(gpu->aspace);

View file

@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
/* Note that smp_load_acquire() is not strictly required
* as CIRC_SPACE_TO_END() does not access the tail more
* than once.
*/
n = min(sz, circ_space_to_end(&rd->fifo)); n = min(sz, circ_space_to_end(&rd->fifo));
memcpy(fptr, ptr, n); memcpy(fptr, ptr, n);
fifo->head = (fifo->head + n) & (BUF_SZ - 1); smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1));
sz -= n; sz -= n;
ptr += n; ptr += n;
@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf,
if (ret) if (ret)
goto out; goto out;
/* Note that smp_load_acquire() is not strictly required
* as CIRC_CNT_TO_END() does not access the head more than
* once.
*/
n = min_t(int, sz, circ_count_to_end(&rd->fifo)); n = min_t(int, sz, circ_count_to_end(&rd->fifo));
if (copy_to_user(buf, fptr, n)) { if (copy_to_user(buf, fptr, n)) {
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
} }
fifo->tail = (fifo->tail + n) & (BUF_SZ - 1); smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1));
*ppos += n; *ppos += n;
wake_up_all(&rd->fifo_event); wake_up_all(&rd->fifo_event);

View file

@ -405,6 +405,14 @@ int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
return -EINVAL; return -EINVAL;
} }
/*
* IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
* i.MX53 channel arbitration locking doesn't seem to work properly.
* Allow enabling the lock feature on IPUv3H / i.MX6 only.
*/
if (bursts && ipu->ipu_type != IPUV3H)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) { for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
if (channel->num == idmac_lock_en_info[i].chnum) if (channel->num == idmac_lock_en_info[i].chnum)
break; break;

View file

@ -73,6 +73,14 @@
#define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1) #define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1)
#define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4) #define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4)
#define IPU_PRE_STORE_ENG_STATUS 0x120
#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK 0xffff
#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0
#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK 0x3fff
#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16
#define IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL (1 << 30)
#define IPU_PRE_STORE_ENG_STATUS_STORE_FIELD (1 << 31)
#define IPU_PRE_STORE_ENG_SIZE 0x130 #define IPU_PRE_STORE_ENG_SIZE 0x130
#define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0) #define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0)
#define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16) #define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16)
@ -93,6 +101,7 @@ struct ipu_pre {
dma_addr_t buffer_paddr; dma_addr_t buffer_paddr;
void *buffer_virt; void *buffer_virt;
bool in_use; bool in_use;
unsigned int safe_window_end;
}; };
static DEFINE_MUTEX(ipu_pre_list_mutex); static DEFINE_MUTEX(ipu_pre_list_mutex);
@ -160,6 +169,9 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
u32 active_bpp = info->cpp[0] >> 1; u32 active_bpp = info->cpp[0] >> 1;
u32 val; u32 val;
/* calculate safe window for ctrl register updates */
pre->safe_window_end = height - 2;
writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
@ -199,7 +211,24 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
{ {
unsigned long timeout = jiffies + msecs_to_jiffies(5);
unsigned short current_yblock;
u32 val;
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
do {
if (time_after(jiffies, timeout)) {
dev_warn(pre->dev, "timeout waiting for PRE safe window\n");
return;
}
val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS);
current_yblock =
(val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) &
IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK;
} while (current_yblock == 0 || current_yblock >= pre->safe_window_end);
writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET); writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
} }

View file

@ -14,6 +14,7 @@
#include <drm/drm_fourcc.h> #include <drm/drm_fourcc.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h> #include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/module.h> #include <linux/module.h>
@ -329,6 +330,12 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
val = IPU_PRG_REG_UPDATE_REG_UPDATE; val = IPU_PRG_REG_UPDATE_REG_UPDATE;
writel(val, prg->regs + IPU_PRG_REG_UPDATE); writel(val, prg->regs + IPU_PRG_REG_UPDATE);
/* wait for both double buffers to be filled */
readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val,
(val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) &&
(val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)),
5, 1000);
clk_disable_unprepare(prg->clk_ipg); clk_disable_unprepare(prg->clk_ipg);
chan->enabled = true; chan->enabled = true;

View file

@ -281,6 +281,7 @@ config HID_ELECOM
Support for ELECOM devices: Support for ELECOM devices:
- BM084 Bluetooth Mouse - BM084 Bluetooth Mouse
- DEFT Trackball (Wired and wireless) - DEFT Trackball (Wired and wireless)
- HUGE Trackball (Wired and wireless)
config HID_ELO config HID_ELO
tristate "ELO USB 4000/4500 touchscreen" tristate "ELO USB 4000/4500 touchscreen"

View file

@ -2032,6 +2032,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
#endif #endif
#if IS_ENABLED(CONFIG_HID_ELO) #if IS_ENABLED(CONFIG_HID_ELO)
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },

View file

@ -3,6 +3,7 @@
* Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
* Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
* Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
* Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
*/ */
/* /*
@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
break; break;
case USB_DEVICE_ID_ELECOM_DEFT_WIRED: case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
/* The DEFT trackball has eight buttons, but its descriptor only case USB_DEVICE_ID_ELECOM_HUGE_WIRED:
* reports five, disabling the three Fn buttons on the top of case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS:
* the mouse. /* The DEFT/HUGE trackball has eight buttons, but its descriptor
* only reports five, disabling the three Fn buttons on the top
* of the mouse.
* *
* Apply the following diff to the descriptor: * Apply the following diff to the descriptor:
* *
@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
* End Collection, End Collection, * End Collection, End Collection,
*/ */
if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n"); hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n");
rdesc[13] = 8; /* Button/Variable Report Count */ rdesc[13] = 8; /* Button/Variable Report Count */
rdesc[21] = 8; /* Button/Variable Usage Maximum */ rdesc[21] = 8; /* Button/Variable Usage Maximum */
rdesc[29] = 0; /* Button/Constant Report Count */ rdesc[29] = 0; /* Button/Constant Report Count */
@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
{ } { }
}; };
MODULE_DEVICE_TABLE(hid, elecom_devices); MODULE_DEVICE_TABLE(hid, elecom_devices);

View file

@ -368,6 +368,8 @@
#define USB_DEVICE_ID_ELECOM_BM084 0x0061 #define USB_DEVICE_ID_ELECOM_BM084 0x0061
#define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe #define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe
#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff
#define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c
#define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS 0x010d
#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004

View file

@ -975,6 +975,8 @@ static int usbhid_parse(struct hid_device *hid)
unsigned int rsize = 0; unsigned int rsize = 0;
char *rdesc; char *rdesc;
int ret, n; int ret, n;
int num_descriptors;
size_t offset = offsetof(struct hid_descriptor, desc);
quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct)); le16_to_cpu(dev->descriptor.idProduct));
@ -997,10 +999,18 @@ static int usbhid_parse(struct hid_device *hid)
return -ENODEV; return -ENODEV;
} }
if (hdesc->bLength < sizeof(struct hid_descriptor)) {
dbg_hid("hid descriptor is too short\n");
return -EINVAL;
}
hid->version = le16_to_cpu(hdesc->bcdHID); hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode; hid->country = hdesc->bCountryCode;
for (n = 0; n < hdesc->bNumDescriptors; n++) num_descriptors = min_t(int, hdesc->bNumDescriptors,
(hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
for (n = 0; n < num_descriptors; n++)
if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);

View file

@ -640,6 +640,7 @@ void vmbus_close(struct vmbus_channel *channel)
*/ */
return; return;
} }
mutex_lock(&vmbus_connection.channel_mutex);
/* /*
* Close all the sub-channels first and then close the * Close all the sub-channels first and then close the
* primary channel. * primary channel.
@ -648,16 +649,15 @@ void vmbus_close(struct vmbus_channel *channel)
cur_channel = list_entry(cur, struct vmbus_channel, sc_list); cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
vmbus_close_internal(cur_channel); vmbus_close_internal(cur_channel);
if (cur_channel->rescind) { if (cur_channel->rescind) {
mutex_lock(&vmbus_connection.channel_mutex); hv_process_channel_removal(
hv_process_channel_removal(cur_channel,
cur_channel->offermsg.child_relid); cur_channel->offermsg.child_relid);
mutex_unlock(&vmbus_connection.channel_mutex);
} }
} }
/* /*
* Now close the primary. * Now close the primary.
*/ */
vmbus_close_internal(channel); vmbus_close_internal(channel);
mutex_unlock(&vmbus_connection.channel_mutex);
} }
EXPORT_SYMBOL_GPL(vmbus_close); EXPORT_SYMBOL_GPL(vmbus_close);

View file

@ -159,7 +159,7 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
channel->rescind = true;
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
msglistentry) { msglistentry) {
@ -381,14 +381,21 @@ static void vmbus_release_relid(u32 relid)
true); true);
} }
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) void hv_process_channel_removal(u32 relid)
{ {
unsigned long flags; unsigned long flags;
struct vmbus_channel *primary_channel; struct vmbus_channel *primary_channel, *channel;
BUG_ON(!channel->rescind);
BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
/*
* Make sure channel is valid as we may have raced.
*/
channel = relid2channel(relid);
if (!channel)
return;
BUG_ON(!channel->rescind);
if (channel->target_cpu != get_cpu()) { if (channel->target_cpu != get_cpu()) {
put_cpu(); put_cpu();
smp_call_function_single(channel->target_cpu, smp_call_function_single(channel->target_cpu,
@ -515,6 +522,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
if (!fnew) { if (!fnew) {
if (channel->sc_creation_callback != NULL) if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel); channel->sc_creation_callback(newchannel);
newchannel->probe_done = true;
return; return;
} }
@ -834,7 +842,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
{ {
struct vmbus_channel_rescind_offer *rescind; struct vmbus_channel_rescind_offer *rescind;
struct vmbus_channel *channel; struct vmbus_channel *channel;
unsigned long flags;
struct device *dev; struct device *dev;
rescind = (struct vmbus_channel_rescind_offer *)hdr; rescind = (struct vmbus_channel_rescind_offer *)hdr;
@ -873,16 +880,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
return; return;
} }
spin_lock_irqsave(&channel->lock, flags);
channel->rescind = true;
spin_unlock_irqrestore(&channel->lock, flags);
/*
* Now that we have posted the rescind state, perform
* rescind related cleanup.
*/
vmbus_rescind_cleanup(channel);
/* /*
* Now wait for offer handling to complete. * Now wait for offer handling to complete.
*/ */
@ -901,6 +898,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
if (channel->device_obj) { if (channel->device_obj) {
if (channel->chn_rescind_callback) { if (channel->chn_rescind_callback) {
channel->chn_rescind_callback(channel); channel->chn_rescind_callback(channel);
vmbus_rescind_cleanup(channel);
return; return;
} }
/* /*
@ -909,6 +907,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
*/ */
dev = get_device(&channel->device_obj->device); dev = get_device(&channel->device_obj->device);
if (dev) { if (dev) {
vmbus_rescind_cleanup(channel);
vmbus_device_unregister(channel->device_obj); vmbus_device_unregister(channel->device_obj);
put_device(dev); put_device(dev);
} }
@ -921,16 +920,16 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
* 1. Close all sub-channels first * 1. Close all sub-channels first
* 2. Then close the primary channel. * 2. Then close the primary channel.
*/ */
mutex_lock(&vmbus_connection.channel_mutex);
vmbus_rescind_cleanup(channel);
if (channel->state == CHANNEL_OPEN_STATE) { if (channel->state == CHANNEL_OPEN_STATE) {
/* /*
* The channel is currently not open; * The channel is currently not open;
* it is safe for us to cleanup the channel. * it is safe for us to cleanup the channel.
*/ */
mutex_lock(&vmbus_connection.channel_mutex); hv_process_channel_removal(rescind->child_relid);
hv_process_channel_removal(channel,
channel->offermsg.child_relid);
mutex_unlock(&vmbus_connection.channel_mutex);
} }
mutex_unlock(&vmbus_connection.channel_mutex);
} }
} }

View file

@ -768,8 +768,7 @@ static void vmbus_device_release(struct device *device)
struct vmbus_channel *channel = hv_dev->channel; struct vmbus_channel *channel = hv_dev->channel;
mutex_lock(&vmbus_connection.channel_mutex); mutex_lock(&vmbus_connection.channel_mutex);
hv_process_channel_removal(channel, hv_process_channel_removal(channel->offermsg.child_relid);
channel->offermsg.child_relid);
mutex_unlock(&vmbus_connection.channel_mutex); mutex_unlock(&vmbus_connection.channel_mutex);
kfree(hv_dev); kfree(hv_dev);

View file

@ -2773,14 +2773,16 @@ int __init amd_iommu_init_api(void)
int __init amd_iommu_init_dma_ops(void) int __init amd_iommu_init_dma_ops(void)
{ {
swiotlb = iommu_pass_through ? 1 : 0; swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0;
iommu_detected = 1; iommu_detected = 1;
/* /*
* In case we don't initialize SWIOTLB (actually the common case * In case we don't initialize SWIOTLB (actually the common case
* when AMD IOMMU is enabled), make sure there are global * when AMD IOMMU is enabled and SME is not active), make sure there
* dma_ops set as a fall-back for devices not handled by this * are global dma_ops set as a fall-back for devices not handled by
* driver (for example non-PCI devices). * this driver (for example non-PCI devices). When SME is active,
* make sure that swiotlb variable remains set so the global dma_ops
* continue to be SWIOTLB.
*/ */
if (!swiotlb) if (!swiotlb)
dma_ops = &nommu_dma_ops; dma_ops = &nommu_dma_ops;
@ -3046,6 +3048,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
mutex_unlock(&domain->api_lock); mutex_unlock(&domain->api_lock);
domain_flush_tlb_pde(domain); domain_flush_tlb_pde(domain);
domain_flush_complete(domain);
return unmap_size; return unmap_size;
} }

View file

@ -709,7 +709,7 @@ static const struct dev_pm_ops sysmmu_pm_ops = {
pm_runtime_force_resume) pm_runtime_force_resume)
}; };
static const struct of_device_id sysmmu_of_match[] __initconst = { static const struct of_device_id sysmmu_of_match[] = {
{ .compatible = "samsung,exynos-sysmmu", }, { .compatible = "samsung,exynos-sysmmu", },
{ }, { },
}; };

View file

@ -128,6 +128,8 @@
#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */

View file

@ -94,6 +94,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
@ -227,12 +229,15 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
/* /*
* For not wake-able HW runtime pm framework * ME maps runtime suspend/resume to D0i states,
* can't be used on pci device level. * hence we need to go around native PCI runtime service which
* Use domain runtime pm callbacks instead. * eventually brings the device into D3cold/hot state,
*/ * but the mei device cannot wake up from D3 unlike from D0i3.
if (!pci_dev_run_wake(pdev)) * To get around the PCI device native runtime pm,
mei_me_set_pm_domain(dev); * ME uses runtime pm domain handlers which take precedence
* over the driver's pm handlers.
*/
mei_me_set_pm_domain(dev);
if (mei_pg_is_enabled(dev)) if (mei_pg_is_enabled(dev))
pm_runtime_put_noidle(&pdev->dev); pm_runtime_put_noidle(&pdev->dev);
@ -272,8 +277,7 @@ static void mei_me_shutdown(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "shutdown\n"); dev_dbg(&pdev->dev, "shutdown\n");
mei_stop(dev); mei_stop(dev);
if (!pci_dev_run_wake(pdev)) mei_me_unset_pm_domain(dev);
mei_me_unset_pm_domain(dev);
mei_disable_interrupts(dev); mei_disable_interrupts(dev);
free_irq(pdev->irq, dev); free_irq(pdev->irq, dev);
@ -301,8 +305,7 @@ static void mei_me_remove(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "stop\n"); dev_dbg(&pdev->dev, "stop\n");
mei_stop(dev); mei_stop(dev);
if (!pci_dev_run_wake(pdev)) mei_me_unset_pm_domain(dev);
mei_me_unset_pm_domain(dev);
mei_disable_interrupts(dev); mei_disable_interrupts(dev);

View file

@ -144,12 +144,14 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
/* /*
* For not wake-able HW runtime pm framework * TXE maps runtime suspend/resume to own power gating states,
* can't be used on pci device level. * hence we need to go around native PCI runtime service which
* Use domain runtime pm callbacks instead. * eventually brings the device into D3cold/hot state.
*/ * But the TXE device cannot wake up from D3 unlike from own
if (!pci_dev_run_wake(pdev)) * power gating. To get around PCI device native runtime pm,
mei_txe_set_pm_domain(dev); * TXE uses runtime pm domain handlers which take precedence.
*/
mei_txe_set_pm_domain(dev);
pm_runtime_put_noidle(&pdev->dev); pm_runtime_put_noidle(&pdev->dev);
@ -186,8 +188,7 @@ static void mei_txe_shutdown(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "shutdown\n"); dev_dbg(&pdev->dev, "shutdown\n");
mei_stop(dev); mei_stop(dev);
if (!pci_dev_run_wake(pdev)) mei_txe_unset_pm_domain(dev);
mei_txe_unset_pm_domain(dev);
mei_disable_interrupts(dev); mei_disable_interrupts(dev);
free_irq(pdev->irq, dev); free_irq(pdev->irq, dev);
@ -215,8 +216,7 @@ static void mei_txe_remove(struct pci_dev *pdev)
mei_stop(dev); mei_stop(dev);
if (!pci_dev_run_wake(pdev)) mei_txe_unset_pm_domain(dev);
mei_txe_unset_pm_domain(dev);
mei_disable_interrupts(dev); mei_disable_interrupts(dev);
free_irq(pdev->irq, dev); free_irq(pdev->irq, dev);
@ -318,15 +318,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
else else
ret = -EAGAIN; ret = -EAGAIN;
/* /* keep irq on we are staying in D0 */
* If everything is okay we're about to enter PCI low
* power state (D3) therefor we need to disable the
* interrupts towards host.
* However if device is not wakeable we do not enter
* D-low state and we need to keep the interrupt kicking
*/
if (!ret && pci_dev_run_wake(pdev))
mei_disable_interrupts(dev);
dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);

View file

@ -565,8 +565,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
return true; return true;
default: default:
bpf_warn_invalid_xdp_action(action); bpf_warn_invalid_xdp_action(action);
/* fall through */
case XDP_ABORTED: case XDP_ABORTED:
trace_xdp_exception(nic->netdev, prog, action); trace_xdp_exception(nic->netdev, prog, action);
/* fall through */
case XDP_DROP: case XDP_DROP:
/* Check if it's a recycled page, if not /* Check if it's a recycled page, if not
* unmap the DMA mapping. * unmap the DMA mapping.

View file

@ -175,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
**/ **/
static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{ {
#ifndef CONFIG_SPARC
u32 regval;
u32 i;
#endif
s32 ret_val; s32 ret_val;
ret_val = ixgbe_start_hw_generic(hw); ret_val = ixgbe_start_hw_generic(hw);
#ifndef CONFIG_SPARC
/* Disable relaxed ordering */
for (i = 0; ((i < hw->mac.max_tx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
}
for (i = 0; ((i < hw->mac.max_rx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
#endif
if (ret_val) if (ret_val)
return ret_val; return ret_val;

View file

@ -366,25 +366,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
} }
IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_FLUSH(hw);
#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
/* Disable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
u32 regval;
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
}
for (i = 0; i < hw->mac.max_rx_queues; i++) {
u32 regval;
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
#endif
return 0; return 0;
} }

View file

@ -1048,7 +1048,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_ring; struct ixgbe_ring *temp_ring;
int i, err = 0; int i, j, err = 0;
u32 new_rx_count, new_tx_count; u32 new_rx_count, new_tx_count;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@ -1085,8 +1085,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
} }
/* allocate temporary buffer to store rings in */ /* allocate temporary buffer to store rings in */
i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
i = max_t(int, i, adapter->num_xdp_queues); adapter->num_rx_queues);
temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
if (!temp_ring) { if (!temp_ring) {
@ -1118,8 +1118,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
} }
} }
for (i = 0; i < adapter->num_xdp_queues; i++) { for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
memcpy(&temp_ring[i], adapter->xdp_ring[i], memcpy(&temp_ring[i], adapter->xdp_ring[j],
sizeof(struct ixgbe_ring)); sizeof(struct ixgbe_ring));
temp_ring[i].count = new_tx_count; temp_ring[i].count = new_tx_count;
@ -1139,10 +1139,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
memcpy(adapter->tx_ring[i], &temp_ring[i], memcpy(adapter->tx_ring[i], &temp_ring[i],
sizeof(struct ixgbe_ring)); sizeof(struct ixgbe_ring));
} }
for (i = 0; i < adapter->num_xdp_queues; i++) { for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
ixgbe_free_tx_resources(adapter->xdp_ring[i]); ixgbe_free_tx_resources(adapter->xdp_ring[j]);
memcpy(adapter->xdp_ring[i], &temp_ring[i], memcpy(adapter->xdp_ring[j], &temp_ring[i],
sizeof(struct ixgbe_ring)); sizeof(struct ixgbe_ring));
} }

View file

@ -4881,7 +4881,7 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
return; return;
vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask; vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
@ -8529,6 +8529,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
return ixgbe_ptp_set_ts_config(adapter, req); return ixgbe_ptp_set_ts_config(adapter, req);
case SIOCGHWTSTAMP: case SIOCGHWTSTAMP:
return ixgbe_ptp_get_ts_config(adapter, req); return ixgbe_ptp_get_ts_config(adapter, req);
case SIOCGMIIPHY:
if (!adapter->hw.phy.ops.read_reg)
return -EOPNOTSUPP;
/* fall through */
default: default:
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
} }

View file

@ -3505,20 +3505,6 @@ static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib *fib) struct mlxsw_sp_fib *fib)
{ {
struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
struct mlxsw_sp_lpm_tree *lpm_tree;
/* Aggregate prefix lengths across all virtual routers to make
* sure we only have used prefix lengths in the LPM tree.
*/
mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
fib->proto);
if (IS_ERR(lpm_tree))
goto err_tree_get;
mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
err_tree_get:
if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
return; return;
mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);

View file

@ -1339,7 +1339,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
static int ppp_dev_init(struct net_device *dev) static int ppp_dev_init(struct net_device *dev)
{ {
struct ppp *ppp;
netdev_lockdep_set_classes(dev); netdev_lockdep_set_classes(dev);
ppp = netdev_priv(dev);
/* Let the netdevice take a reference on the ppp file. This ensures
* that ppp_destroy_interface() won't run before the device gets
* unregistered.
*/
atomic_inc(&ppp->file.refcnt);
return 0; return 0;
} }
@ -1362,6 +1372,15 @@ static void ppp_dev_uninit(struct net_device *dev)
wake_up_interruptible(&ppp->file.rwait); wake_up_interruptible(&ppp->file.rwait);
} }
static void ppp_dev_priv_destructor(struct net_device *dev)
{
struct ppp *ppp;
ppp = netdev_priv(dev);
if (atomic_dec_and_test(&ppp->file.refcnt))
ppp_destroy_interface(ppp);
}
static const struct net_device_ops ppp_netdev_ops = { static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init, .ndo_init = ppp_dev_init,
.ndo_uninit = ppp_dev_uninit, .ndo_uninit = ppp_dev_uninit,
@ -1387,6 +1406,7 @@ static void ppp_setup(struct net_device *dev)
dev->tx_queue_len = 3; dev->tx_queue_len = 3;
dev->type = ARPHRD_PPP; dev->type = ARPHRD_PPP;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->priv_destructor = ppp_dev_priv_destructor;
netif_keep_dst(dev); netif_keep_dst(dev);
} }

View file

@ -560,6 +560,7 @@ static const struct driver_info wwan_info = {
#define NVIDIA_VENDOR_ID 0x0955 #define NVIDIA_VENDOR_ID 0x0955
#define HP_VENDOR_ID 0x03f0 #define HP_VENDOR_ID 0x03f0
#define MICROSOFT_VENDOR_ID 0x045e #define MICROSOFT_VENDOR_ID 0x045e
#define UBLOX_VENDOR_ID 0x1546
static const struct usb_device_id products[] = { static const struct usb_device_id products[] = {
/* BLACKLIST !! /* BLACKLIST !!
@ -861,6 +862,18 @@ static const struct usb_device_id products[] = {
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE), USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&zte_cdc_info, .driver_info = (unsigned long)&zte_cdc_info,
}, {
/* U-blox TOBY-L2 */
USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1143, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
/* U-blox SARA-U2 */
USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1104, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, { }, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE), USB_CDC_PROTO_NONE),

View file

@ -1781,8 +1781,12 @@ bool of_console_check(struct device_node *dn, char *name, int index)
{ {
if (!dn || dn != of_stdout || console_set_on_cmdline) if (!dn || dn != of_stdout || console_set_on_cmdline)
return false; return false;
return !add_preferred_console(name, index,
kstrdup(of_stdout_options, GFP_KERNEL)); /*
* XXX: cast `options' to char pointer to suppress complication
* warnings: printk, UART and console drivers expect char pointer.
*/
return !add_preferred_console(name, index, (char *)of_stdout_options);
} }
EXPORT_SYMBOL_GPL(of_console_check); EXPORT_SYMBOL_GPL(of_console_check);

View file

@ -25,7 +25,7 @@
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/slab.h> #include <linux/slab.h>
#define MAX_RESERVED_REGIONS 16 #define MAX_RESERVED_REGIONS 32
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count; static int reserved_mem_count;

View file

@ -954,7 +954,7 @@ of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode)
struct device_node *np; struct device_node *np;
/* Get the parent of the port */ /* Get the parent of the port */
np = of_get_next_parent(to_of_node(fwnode)); np = of_get_parent(to_of_node(fwnode));
if (!np) if (!np)
return NULL; return NULL;

Some files were not shown because too many files have changed in this diff Show more