updating to mainline 4.14.3

This commit is contained in:
Jake Day 2017-11-30 11:45:49 -05:00
parent dbd4bc1742
commit 7973e0981b
206 changed files with 2173 additions and 1251 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 14 PATCHLEVEL = 14
SUBLEVEL = 2 SUBLEVEL = 3
EXTRAVERSION = EXTRAVERSION =
NAME = Petit Gorille NAME = Petit Gorille

View file

@ -129,8 +129,8 @@ static const struct prot_bits section_bits[] = {
.val = PMD_SECT_USER, .val = PMD_SECT_USER,
.set = "USR", .set = "USR",
}, { }, {
.mask = L_PMD_SECT_RDONLY, .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
.val = L_PMD_SECT_RDONLY, .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
.set = "ro", .set = "ro",
.clear = "RW", .clear = "RW",
#elif __LINUX_ARM_ARCH__ >= 6 #elif __LINUX_ARM_ARCH__ >= 6

View file

@ -639,8 +639,8 @@ static struct section_perm ro_perms[] = {
.start = (unsigned long)_stext, .start = (unsigned long)_stext,
.end = (unsigned long)__init_begin, .end = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
.mask = ~L_PMD_SECT_RDONLY, .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
.prot = L_PMD_SECT_RDONLY, .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
#else #else
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,

View file

@ -49,6 +49,14 @@
/ { / {
compatible = "amlogic,meson-gxl"; compatible = "amlogic,meson-gxl";
reserved-memory {
/* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
secmon_reserved_alt: secmon@05000000 {
reg = <0x0 0x05000000 0x0 0x300000>;
no-map;
};
};
}; };
&ethmac { &ethmac {

View file

@ -98,6 +98,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
#define pte_valid_young(pte) \ #define pte_valid_young(pte) \
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
#define pte_valid_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
/* /*
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
@ -107,6 +109,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_accessible(mm, pte) \ #define pte_accessible(mm, pte) \
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte)) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
/*
* p??_access_permitted() is true for valid user mappings (subject to the
* write permission check) other than user execute-only which do not have the
* PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
*/
#define pte_access_permitted(pte, write) \
(pte_valid_user(pte) && (!(write) || pte_write(pte)))
#define pmd_access_permitted(pmd, write) \
(pte_access_permitted(pmd_pte(pmd), (write)))
#define pud_access_permitted(pud, write) \
(pte_access_permitted(pud_pte(pud), (write)))
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{ {
pte_val(pte) &= ~pgprot_val(prot); pte_val(pte) &= ~pgprot_val(prot);

View file

@ -65,7 +65,7 @@ config MIPS
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING_GEN select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select MODULES_USE_ELF_RELA if MODULES && 64BIT select MODULES_USE_ELF_RELA if MODULES && 64BIT
select MODULES_USE_ELF_REL if MODULES select MODULES_USE_ELF_REL if MODULES

View file

@ -331,7 +331,7 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = {
/* Verified on: WRT54GS V1.0 */ /* Verified on: WRT54GS V1.0 */
static const struct gpio_led static const struct gpio_led
bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = { bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = {
BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF), BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON), BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF), BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
}; };

View file

@ -23,7 +23,6 @@ dtb-$(CONFIG_DT_NONE) += \
bcm63268-comtrend-vr-3032u.dtb \ bcm63268-comtrend-vr-3032u.dtb \
bcm93384wvg.dtb \ bcm93384wvg.dtb \
bcm93384wvg_viper.dtb \ bcm93384wvg_viper.dtb \
bcm96358nb4ser.dtb \
bcm96368mvwg.dtb \ bcm96368mvwg.dtb \
bcm9ejtagprb.dtb \ bcm9ejtagprb.dtb \
bcm97125cbmb.dtb \ bcm97125cbmb.dtb \

View file

@ -19,6 +19,9 @@
#include <asm/asmmacro-64.h> #include <asm/asmmacro-64.h>
#endif #endif
/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
#undef fp
/* /*
* Helper macros for generating raw instruction encodings. * Helper macros for generating raw instruction encodings.
*/ */
@ -105,6 +108,7 @@
.macro fpu_save_16odd thread .macro fpu_save_16odd thread
.set push .set push
.set mips64r2 .set mips64r2
.set fp=64
SET_HARDFLOAT SET_HARDFLOAT
sdc1 $f1, THREAD_FPR1(\thread) sdc1 $f1, THREAD_FPR1(\thread)
sdc1 $f3, THREAD_FPR3(\thread) sdc1 $f3, THREAD_FPR3(\thread)
@ -126,8 +130,8 @@
.endm .endm
.macro fpu_save_double thread status tmp .macro fpu_save_double thread status tmp
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
defined(CONFIG_CPU_MIPS32_R6) defined(CONFIG_CPU_MIPSR6)
sll \tmp, \status, 5 sll \tmp, \status, 5
bgez \tmp, 10f bgez \tmp, 10f
fpu_save_16odd \thread fpu_save_16odd \thread
@ -163,6 +167,7 @@
.macro fpu_restore_16odd thread .macro fpu_restore_16odd thread
.set push .set push
.set mips64r2 .set mips64r2
.set fp=64
SET_HARDFLOAT SET_HARDFLOAT
ldc1 $f1, THREAD_FPR1(\thread) ldc1 $f1, THREAD_FPR1(\thread)
ldc1 $f3, THREAD_FPR3(\thread) ldc1 $f3, THREAD_FPR3(\thread)
@ -184,8 +189,8 @@
.endm .endm
.macro fpu_restore_double thread status tmp .macro fpu_restore_double thread status tmp
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
defined(CONFIG_CPU_MIPS32_R6) defined(CONFIG_CPU_MIPSR6)
sll \tmp, \status, 5 sll \tmp, \status, 5
bgez \tmp, 10f # 16 register mode? bgez \tmp, 10f # 16 register mode?
@ -234,9 +239,6 @@
.endm .endm
#ifdef TOOLCHAIN_SUPPORTS_MSA #ifdef TOOLCHAIN_SUPPORTS_MSA
/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
#undef fp
.macro _cfcmsa rd, cs .macro _cfcmsa rd, cs
.set push .set push
.set mips32r2 .set mips32r2

View file

@ -204,8 +204,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#else #else
#include <asm-generic/cmpxchg-local.h> #include <asm-generic/cmpxchg-local.h>
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#ifndef CONFIG_SMP
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif #endif
#endif
#undef __scbeqz #undef __scbeqz

View file

@ -618,6 +618,19 @@ static const struct user_regset_view user_mips64_view = {
.n = ARRAY_SIZE(mips64_regsets), .n = ARRAY_SIZE(mips64_regsets),
}; };
#ifdef CONFIG_MIPS32_N32
static const struct user_regset_view user_mipsn32_view = {
.name = "mipsn32",
.e_flags = EF_MIPS_ABI2,
.e_machine = ELF_ARCH,
.ei_osabi = ELF_OSABI,
.regsets = mips64_regsets,
.n = ARRAY_SIZE(mips64_regsets),
};
#endif /* CONFIG_MIPS32_N32 */
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
const struct user_regset_view *task_user_regset_view(struct task_struct *task) const struct user_regset_view *task_user_regset_view(struct task_struct *task)
@ -628,6 +641,10 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
#ifdef CONFIG_MIPS32_O32 #ifdef CONFIG_MIPS32_O32
if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
return &user_mips_view; return &user_mips_view;
#endif
#ifdef CONFIG_MIPS32_N32
if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
return &user_mipsn32_view;
#endif #endif
return &user_mips64_view; return &user_mips64_view;
#endif #endif

View file

@ -40,8 +40,8 @@
*/ */
LEAF(_save_fp) LEAF(_save_fp)
EXPORT_SYMBOL(_save_fp) EXPORT_SYMBOL(_save_fp)
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
defined(CONFIG_CPU_MIPS32_R6) defined(CONFIG_CPU_MIPSR6)
mfc0 t0, CP0_STATUS mfc0 t0, CP0_STATUS
#endif #endif
fpu_save_double a0 t0 t1 # clobbers t1 fpu_save_double a0 t0 t1 # clobbers t1
@ -52,8 +52,8 @@ EXPORT_SYMBOL(_save_fp)
* Restore a thread's fp context. * Restore a thread's fp context.
*/ */
LEAF(_restore_fp) LEAF(_restore_fp)
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
defined(CONFIG_CPU_MIPS32_R6) defined(CONFIG_CPU_MIPSR6)
mfc0 t0, CP0_STATUS mfc0 t0, CP0_STATUS
#endif #endif
fpu_restore_double a0 t0 t1 # clobbers t1 fpu_restore_double a0 t0 t1 # clobbers t1
@ -246,11 +246,11 @@ LEAF(_save_fp_context)
cfc1 t1, fcr31 cfc1 t1, fcr31
.set pop .set pop
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
defined(CONFIG_CPU_MIPS32_R6) defined(CONFIG_CPU_MIPSR6)
.set push .set push
SET_HARDFLOAT SET_HARDFLOAT
#ifdef CONFIG_CPU_MIPS32_R2 #ifdef CONFIG_CPU_MIPSR2
.set mips32r2 .set mips32r2
.set fp=64 .set fp=64
mfc0 t0, CP0_STATUS mfc0 t0, CP0_STATUS
@ -314,11 +314,11 @@ LEAF(_save_fp_context)
LEAF(_restore_fp_context) LEAF(_restore_fp_context)
EX lw t1, 0(a1) EX lw t1, 0(a1)
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
defined(CONFIG_CPU_MIPS32_R6) defined(CONFIG_CPU_MIPSR6)
.set push .set push
SET_HARDFLOAT SET_HARDFLOAT
#ifdef CONFIG_CPU_MIPS32_R2 #ifdef CONFIG_CPU_MIPSR2
.set mips32r2 .set mips32r2
.set fp=64 .set fp=64
mfc0 t0, CP0_STATUS mfc0 t0, CP0_STATUS

View file

@ -1795,7 +1795,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(fd, MIPSInst_FD(ir)); SPFROMREG(fd, MIPSInst_FD(ir));
rv.s = ieee754sp_maddf(fd, fs, ft); rv.s = ieee754sp_maddf(fd, fs, ft);
break; goto copcsr;
} }
case fmsubf_op: { case fmsubf_op: {
@ -1809,7 +1809,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(fd, MIPSInst_FD(ir)); SPFROMREG(fd, MIPSInst_FD(ir));
rv.s = ieee754sp_msubf(fd, fs, ft); rv.s = ieee754sp_msubf(fd, fs, ft);
break; goto copcsr;
} }
case frint_op: { case frint_op: {
@ -1834,7 +1834,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fs, MIPSInst_FS(ir));
rv.w = ieee754sp_2008class(fs); rv.w = ieee754sp_2008class(fs);
rfmt = w_fmt; rfmt = w_fmt;
break; goto copcsr;
} }
case fmin_op: { case fmin_op: {
@ -1847,7 +1847,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmin(fs, ft); rv.s = ieee754sp_fmin(fs, ft);
break; goto copcsr;
} }
case fmina_op: { case fmina_op: {
@ -1860,7 +1860,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmina(fs, ft); rv.s = ieee754sp_fmina(fs, ft);
break; goto copcsr;
} }
case fmax_op: { case fmax_op: {
@ -1873,7 +1873,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmax(fs, ft); rv.s = ieee754sp_fmax(fs, ft);
break; goto copcsr;
} }
case fmaxa_op: { case fmaxa_op: {
@ -1886,7 +1886,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(ft, MIPSInst_FT(ir)); SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir)); SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmaxa(fs, ft); rv.s = ieee754sp_fmaxa(fs, ft);
break; goto copcsr;
} }
case fabs_op: case fabs_op:
@ -2165,7 +2165,7 @@ copcsr:
DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(fd, MIPSInst_FD(ir)); DPFROMREG(fd, MIPSInst_FD(ir));
rv.d = ieee754dp_maddf(fd, fs, ft); rv.d = ieee754dp_maddf(fd, fs, ft);
break; goto copcsr;
} }
case fmsubf_op: { case fmsubf_op: {
@ -2179,7 +2179,7 @@ copcsr:
DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(fd, MIPSInst_FD(ir)); DPFROMREG(fd, MIPSInst_FD(ir));
rv.d = ieee754dp_msubf(fd, fs, ft); rv.d = ieee754dp_msubf(fd, fs, ft);
break; goto copcsr;
} }
case frint_op: { case frint_op: {
@ -2204,7 +2204,7 @@ copcsr:
DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fs, MIPSInst_FS(ir));
rv.l = ieee754dp_2008class(fs); rv.l = ieee754dp_2008class(fs);
rfmt = l_fmt; rfmt = l_fmt;
break; goto copcsr;
} }
case fmin_op: { case fmin_op: {
@ -2217,7 +2217,7 @@ copcsr:
DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmin(fs, ft); rv.d = ieee754dp_fmin(fs, ft);
break; goto copcsr;
} }
case fmina_op: { case fmina_op: {
@ -2230,7 +2230,7 @@ copcsr:
DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmina(fs, ft); rv.d = ieee754dp_fmina(fs, ft);
break; goto copcsr;
} }
case fmax_op: { case fmax_op: {
@ -2243,7 +2243,7 @@ copcsr:
DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmax(fs, ft); rv.d = ieee754dp_fmax(fs, ft);
break; goto copcsr;
} }
case fmaxa_op: { case fmaxa_op: {
@ -2256,7 +2256,7 @@ copcsr:
DPFROMREG(ft, MIPSInst_FT(ir)); DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir)); DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmaxa(fs, ft); rv.d = ieee754dp_fmaxa(fs, ft);
break; goto copcsr;
} }
case fabs_op: case fabs_op:

View file

@ -121,7 +121,7 @@ static int wait_pciephy_busy(void)
else else
break; break;
if (retry++ > WAITRETRY_MAX) { if (retry++ > WAITRETRY_MAX) {
printk(KERN_WARN "PCIE-PHY retry failed.\n"); pr_warn("PCIE-PHY retry failed.\n");
return -1; return -1;
} }
} }

View file

@ -145,8 +145,8 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
FUNC("i2c", 0, 4, 2), FUNC("i2c", 0, 4, 2),
}; };
static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) }; static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) };
static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) }; static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) };
static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) }; static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };

View file

@ -690,15 +690,15 @@ cas_action:
/* ELF32 Process entry path */ /* ELF32 Process entry path */
lws_compare_and_swap_2: lws_compare_and_swap_2:
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/* Clip the input registers */ /* Clip the input registers. We don't need to clip %r23 as we
only use it for word operations */
depdi 0, 31, 32, %r26 depdi 0, 31, 32, %r26
depdi 0, 31, 32, %r25 depdi 0, 31, 32, %r25
depdi 0, 31, 32, %r24 depdi 0, 31, 32, %r24
depdi 0, 31, 32, %r23
#endif #endif
/* Check the validity of the size pointer */ /* Check the validity of the size pointer */
subi,>>= 4, %r23, %r0 subi,>>= 3, %r23, %r0
b,n lws_exit_nosys b,n lws_exit_nosys
/* Jump to the functions which will load the old and new values into /* Jump to the functions which will load the old and new values into

View file

@ -542,7 +542,7 @@ EXC_COMMON_BEGIN(instruction_access_common)
RECONCILE_IRQ_STATE(r10, r11) RECONCILE_IRQ_STATE(r10, r11)
ld r12,_MSR(r1) ld r12,_MSR(r1)
ld r3,_NIP(r1) ld r3,_NIP(r1)
andis. r4,r12,DSISR_BAD_FAULT_64S@h andis. r4,r12,DSISR_SRR1_MATCH_64S@h
li r5,0x400 li r5,0x400
std r3,_DAR(r1) std r3,_DAR(r1)
std r4,_DSISR(r1) std r4,_DSISR(r1)

View file

@ -103,7 +103,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
static void do_signal(struct task_struct *tsk) static void do_signal(struct task_struct *tsk)
{ {
sigset_t *oldset = sigmask_to_save(); sigset_t *oldset = sigmask_to_save();
struct ksignal ksig; struct ksignal ksig = { .sig = 0 };
int ret; int ret;
int is32 = is_32bit_task(); int is32 = is_32bit_task();

View file

@ -529,6 +529,8 @@ static inline bool is_rm(void)
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
{ {
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
if (xive_enabled()) { if (xive_enabled()) {
if (is_rm()) if (is_rm())
return xive_rm_h_xirr(vcpu); return xive_rm_h_xirr(vcpu);
@ -541,6 +543,8 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
{ {
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
vcpu->arch.gpr[5] = get_tb(); vcpu->arch.gpr[5] = get_tb();
if (xive_enabled()) { if (xive_enabled()) {
if (is_rm()) if (is_rm())
@ -554,6 +558,8 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
{ {
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
if (xive_enabled()) { if (xive_enabled()) {
if (is_rm()) if (is_rm())
return xive_rm_h_ipoll(vcpu, server); return xive_rm_h_ipoll(vcpu, server);
@ -567,6 +573,8 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
unsigned long mfrr) unsigned long mfrr)
{ {
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
if (xive_enabled()) { if (xive_enabled()) {
if (is_rm()) if (is_rm())
return xive_rm_h_ipi(vcpu, server, mfrr); return xive_rm_h_ipi(vcpu, server, mfrr);
@ -579,6 +587,8 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
{ {
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
if (xive_enabled()) { if (xive_enabled()) {
if (is_rm()) if (is_rm())
return xive_rm_h_cppr(vcpu, cppr); return xive_rm_h_cppr(vcpu, cppr);
@ -591,6 +601,8 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
{ {
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
if (xive_enabled()) { if (xive_enabled()) {
if (is_rm()) if (is_rm())
return xive_rm_h_eoi(vcpu, xirr); return xive_rm_h_eoi(vcpu, xirr);

View file

@ -21,6 +21,7 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/code-patching.h> #include <asm/code-patching.h>
#include <asm/setup.h>
static int __patch_instruction(unsigned int *addr, unsigned int instr) static int __patch_instruction(unsigned int *addr, unsigned int instr)
{ {
@ -146,11 +147,8 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
* During early early boot patch_instruction is called * During early early boot patch_instruction is called
* when text_poke_area is not ready, but we still need * when text_poke_area is not ready, but we still need
* to allow patching. We just do the plain old patching * to allow patching. We just do the plain old patching
* We use slab_is_available and per cpu read * via this_cpu_read
* of text_poke_area. Per-CPU areas might not be up early
* this can create problems with just using this_cpu_read()
*/ */
if (!slab_is_available() || !this_cpu_read(text_poke_area)) if (!this_cpu_read(*PTRRELOC(&text_poke_area)))
return __patch_instruction(addr, instr); return __patch_instruction(addr, instr);
local_irq_save(flags); local_irq_save(flags);

View file

@ -49,17 +49,28 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct hstate *h = hstate_file(file); struct hstate *h = hstate_file(file);
int fixed = (flags & MAP_FIXED);
unsigned long high_limit;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
if (unlikely(addr > mm->context.addr_limit && addr < TASK_SIZE)) high_limit = DEFAULT_MAP_WINDOW;
mm->context.addr_limit = TASK_SIZE; if (addr >= high_limit || (fixed && (addr + len > high_limit)))
high_limit = TASK_SIZE;
if (len & ~huge_page_mask(h)) if (len & ~huge_page_mask(h))
return -EINVAL; return -EINVAL;
if (len > mm->task_size) if (len > high_limit)
return -ENOMEM; return -ENOMEM;
if (fixed) {
if (addr > high_limit - len)
return -ENOMEM;
}
if (flags & MAP_FIXED) { if (unlikely(addr > mm->context.addr_limit &&
mm->context.addr_limit != TASK_SIZE))
mm->context.addr_limit = TASK_SIZE;
if (fixed) {
if (prepare_hugepage_range(file, addr, len)) if (prepare_hugepage_range(file, addr, len))
return -EINVAL; return -EINVAL;
return addr; return addr;
@ -68,7 +79,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (addr) { if (addr) {
addr = ALIGN(addr, huge_page_size(h)); addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && if (high_limit - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma))) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@ -79,12 +90,9 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len; info.length = len;
info.low_limit = PAGE_SIZE; info.low_limit = PAGE_SIZE;
info.high_limit = current->mm->mmap_base; info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0; info.align_offset = 0;
if (addr > DEFAULT_MAP_WINDOW)
info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
return vm_unmapped_area(&info); return vm_unmapped_area(&info);
} }

View file

@ -106,22 +106,32 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int fixed = (flags & MAP_FIXED);
unsigned long high_limit;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
high_limit = DEFAULT_MAP_WINDOW;
if (addr >= high_limit || (fixed && (addr + len > high_limit)))
high_limit = TASK_SIZE;
if (len > high_limit)
return -ENOMEM;
if (fixed) {
if (addr > high_limit - len)
return -ENOMEM;
}
if (unlikely(addr > mm->context.addr_limit && if (unlikely(addr > mm->context.addr_limit &&
mm->context.addr_limit != TASK_SIZE)) mm->context.addr_limit != TASK_SIZE))
mm->context.addr_limit = TASK_SIZE; mm->context.addr_limit = TASK_SIZE;
if (len > mm->task_size - mmap_min_addr) if (fixed)
return -ENOMEM;
if (flags & MAP_FIXED)
return addr; return addr;
if (addr) { if (addr) {
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && addr >= mmap_min_addr && if (high_limit - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma))) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@ -129,13 +139,9 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.flags = 0; info.flags = 0;
info.length = len; info.length = len;
info.low_limit = mm->mmap_base; info.low_limit = mm->mmap_base;
info.high_limit = high_limit;
info.align_mask = 0; info.align_mask = 0;
if (unlikely(addr > DEFAULT_MAP_WINDOW))
info.high_limit = mm->context.addr_limit;
else
info.high_limit = DEFAULT_MAP_WINDOW;
return vm_unmapped_area(&info); return vm_unmapped_area(&info);
} }
@ -149,37 +155,42 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long addr = addr0; unsigned long addr = addr0;
int fixed = (flags & MAP_FIXED);
unsigned long high_limit;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
high_limit = DEFAULT_MAP_WINDOW;
if (addr >= high_limit || (fixed && (addr + len > high_limit)))
high_limit = TASK_SIZE;
if (len > high_limit)
return -ENOMEM;
if (fixed) {
if (addr > high_limit - len)
return -ENOMEM;
}
if (unlikely(addr > mm->context.addr_limit && if (unlikely(addr > mm->context.addr_limit &&
mm->context.addr_limit != TASK_SIZE)) mm->context.addr_limit != TASK_SIZE))
mm->context.addr_limit = TASK_SIZE; mm->context.addr_limit = TASK_SIZE;
/* requested length too big for entire address space */ if (fixed)
if (len > mm->task_size - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
return addr; return addr;
/* requesting a specific address */
if (addr) { if (addr) {
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && addr >= mmap_min_addr && if (high_limit - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma))) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len; info.length = len;
info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base; info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
info.align_mask = 0; info.align_mask = 0;
if (addr > DEFAULT_MAP_WINDOW)
info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
addr = vm_unmapped_area(&info); addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK)) if (!(addr & ~PAGE_MASK))
return addr; return addr;

View file

@ -93,11 +93,11 @@ static int hash__init_new_context(struct mm_struct *mm)
return index; return index;
/* /*
* We do switch_slb() early in fork, even before we setup the * In the case of exec, use the default limit,
* mm->context.addr_limit. Default to max task size so that we copy the * otherwise inherit it from the mm we are duplicating.
* default values to paca which will help us to handle slb miss early.
*/ */
mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64; if (!mm->context.addr_limit)
mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
/* /*
* The old code would re-promote on fork, we don't do that when using * The old code would re-promote on fork, we don't do that when using

View file

@ -169,6 +169,16 @@ void radix__mark_rodata_ro(void)
{ {
unsigned long start, end; unsigned long start, end;
/*
* mark_rodata_ro() will mark itself as !writable at some point.
* Due to DD1 workaround in radix__pte_update(), we'll end up with
* an invalid pte and the system will crash quite severly.
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n");
return;
}
start = (unsigned long)_stext; start = (unsigned long)_stext;
end = (unsigned long)__init_begin; end = (unsigned long)__init_begin;

View file

@ -96,7 +96,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
if ((mm->task_size - len) < addr) if ((mm->context.addr_limit - len) < addr)
return 0; return 0;
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
return (!vma || (addr + len) <= vm_start_gap(vma)); return (!vma || (addr + len) <= vm_start_gap(vma));
@ -133,7 +133,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
if (!slice_low_has_vma(mm, i)) if (!slice_low_has_vma(mm, i))
ret->low_slices |= 1u << i; ret->low_slices |= 1u << i;
if (mm->task_size <= SLICE_LOW_TOP) if (mm->context.addr_limit <= SLICE_LOW_TOP)
return; return;
for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++)
@ -412,25 +412,31 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
struct slice_mask compat_mask; struct slice_mask compat_mask;
int fixed = (flags & MAP_FIXED); int fixed = (flags & MAP_FIXED);
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
unsigned long page_size = 1UL << pshift;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long newaddr; unsigned long newaddr;
unsigned long high_limit; unsigned long high_limit;
/* high_limit = DEFAULT_MAP_WINDOW;
* Check if we need to expland slice area. if (addr >= high_limit || (fixed && (addr + len > high_limit)))
*/ high_limit = TASK_SIZE;
if (unlikely(addr > mm->context.addr_limit &&
mm->context.addr_limit != TASK_SIZE)) { if (len > high_limit)
mm->context.addr_limit = TASK_SIZE; return -ENOMEM;
if (len & (page_size - 1))
return -EINVAL;
if (fixed) {
if (addr & (page_size - 1))
return -EINVAL;
if (addr > high_limit - len)
return -ENOMEM;
}
if (high_limit > mm->context.addr_limit) {
mm->context.addr_limit = high_limit;
on_each_cpu(slice_flush_segments, mm, 1); on_each_cpu(slice_flush_segments, mm, 1);
} }
/*
* This mmap request can allocate upt to 512TB
*/
if (addr > DEFAULT_MAP_WINDOW)
high_limit = mm->context.addr_limit;
else
high_limit = DEFAULT_MAP_WINDOW;
/* /*
* init different masks * init different masks
*/ */
@ -446,27 +452,19 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* Sanity checks */ /* Sanity checks */
BUG_ON(mm->task_size == 0); BUG_ON(mm->task_size == 0);
BUG_ON(mm->context.addr_limit == 0);
VM_BUG_ON(radix_enabled()); VM_BUG_ON(radix_enabled());
slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n", slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
addr, len, flags, topdown); addr, len, flags, topdown);
if (len > mm->task_size)
return -ENOMEM;
if (len & ((1ul << pshift) - 1))
return -EINVAL;
if (fixed && (addr & ((1ul << pshift) - 1)))
return -EINVAL;
if (fixed && addr > (mm->task_size - len))
return -ENOMEM;
/* If hint, make sure it matches our alignment restrictions */ /* If hint, make sure it matches our alignment restrictions */
if (!fixed && addr) { if (!fixed && addr) {
addr = _ALIGN_UP(addr, 1ul << pshift); addr = _ALIGN_UP(addr, page_size);
slice_dbg(" aligned addr=%lx\n", addr); slice_dbg(" aligned addr=%lx\n", addr);
/* Ignore hint if it's too large or overlaps a VMA */ /* Ignore hint if it's too large or overlaps a VMA */
if (addr > mm->task_size - len || if (addr > high_limit - len ||
!slice_area_is_free(mm, addr, len)) !slice_area_is_free(mm, addr, len))
addr = 0; addr = 0;
} }

View file

@ -467,7 +467,7 @@ static int nest_imc_event_init(struct perf_event *event)
* Nest HW counter memory resides in a per-chip reserve-memory (HOMER). * Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
* Get the base memory addresss for this cpu. * Get the base memory addresss for this cpu.
*/ */
chip_id = topology_physical_package_id(event->cpu); chip_id = cpu_to_chip_id(event->cpu);
pcni = pmu->mem_info; pcni = pmu->mem_info;
do { do {
if (pcni->id == chip_id) { if (pcni->id == chip_id) {
@ -524,19 +524,19 @@ static int nest_imc_event_init(struct perf_event *event)
*/ */
static int core_imc_mem_init(int cpu, int size) static int core_imc_mem_init(int cpu, int size)
{ {
int phys_id, rc = 0, core_id = (cpu / threads_per_core); int nid, rc = 0, core_id = (cpu / threads_per_core);
struct imc_mem_info *mem_info; struct imc_mem_info *mem_info;
/* /*
* alloc_pages_node() will allocate memory for core in the * alloc_pages_node() will allocate memory for core in the
* local node only. * local node only.
*/ */
phys_id = topology_physical_package_id(cpu); nid = cpu_to_node(cpu);
mem_info = &core_imc_pmu->mem_info[core_id]; mem_info = &core_imc_pmu->mem_info[core_id];
mem_info->id = core_id; mem_info->id = core_id;
/* We need only vbase for core counters */ /* We need only vbase for core counters */
mem_info->vbase = page_address(alloc_pages_node(phys_id, mem_info->vbase = page_address(alloc_pages_node(nid,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
__GFP_NOWARN, get_order(size))); __GFP_NOWARN, get_order(size)));
if (!mem_info->vbase) if (!mem_info->vbase)
@ -797,14 +797,14 @@ static int core_imc_event_init(struct perf_event *event)
static int thread_imc_mem_alloc(int cpu_id, int size) static int thread_imc_mem_alloc(int cpu_id, int size)
{ {
u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id); u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id);
int phys_id = topology_physical_package_id(cpu_id); int nid = cpu_to_node(cpu_id);
if (!local_mem) { if (!local_mem) {
/* /*
* This case could happen only once at start, since we dont * This case could happen only once at start, since we dont
* free the memory in cpu offline path. * free the memory in cpu offline path.
*/ */
local_mem = page_address(alloc_pages_node(phys_id, local_mem = page_address(alloc_pages_node(nid,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
__GFP_NOWARN, get_order(size))); __GFP_NOWARN, get_order(size)));
if (!local_mem) if (!local_mem)

View file

@ -37,8 +37,8 @@ static inline void restore_access_regs(unsigned int *acrs)
save_ri_cb(prev->thread.ri_cb); \ save_ri_cb(prev->thread.ri_cb); \
save_gs_cb(prev->thread.gs_cb); \ save_gs_cb(prev->thread.gs_cb); \
} \ } \
update_cr_regs(next); \
if (next->mm) { \ if (next->mm) { \
update_cr_regs(next); \
set_cpu_flag(CIF_FPU); \ set_cpu_flag(CIF_FPU); \
restore_access_regs(&next->thread.acrs[0]); \ restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \

View file

@ -1548,6 +1548,7 @@ static struct s390_insn opcode_e7[] = {
{ "vfsq", 0xce, INSTR_VRR_VV000MM }, { "vfsq", 0xce, INSTR_VRR_VV000MM },
{ "vfs", 0xe2, INSTR_VRR_VVV00MM }, { "vfs", 0xe2, INSTR_VRR_VVV00MM },
{ "vftci", 0x4a, INSTR_VRI_VVIMM }, { "vftci", 0x4a, INSTR_VRI_VVIMM },
{ "", 0, INSTR_INVALID }
}; };
static struct s390_insn opcode_eb[] = { static struct s390_insn opcode_eb[] = {
@ -1953,7 +1954,7 @@ void show_code(struct pt_regs *regs)
{ {
char *mode = user_mode(regs) ? "User" : "Krnl"; char *mode = user_mode(regs) ? "User" : "Krnl";
unsigned char code[64]; unsigned char code[64];
char buffer[64], *ptr; char buffer[128], *ptr;
mm_segment_t old_fs; mm_segment_t old_fs;
unsigned long addr; unsigned long addr;
int start, end, opsize, hops, i; int start, end, opsize, hops, i;
@ -2016,7 +2017,7 @@ void show_code(struct pt_regs *regs)
start += opsize; start += opsize;
pr_cont("%s", buffer); pr_cont("%s", buffer);
ptr = buffer; ptr = buffer;
ptr += sprintf(ptr, "\n "); ptr += sprintf(ptr, "\n\t ");
hops++; hops++;
} }
pr_cont("\n"); pr_cont("\n");

View file

@ -375,8 +375,10 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
if (test_facility(40)) if (test_facility(40))
S390_lowcore.machine_flags |= MACHINE_FLAG_LPP; S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
if (test_facility(50) && test_facility(73)) if (test_facility(50) && test_facility(73)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_TE; S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
__ctl_set_bit(0, 55);
}
if (test_facility(51)) if (test_facility(51))
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129)) { if (test_facility(129)) {

View file

@ -14,9 +14,11 @@
void exit_thread_gs(void) void exit_thread_gs(void)
{ {
preempt_disable();
kfree(current->thread.gs_cb); kfree(current->thread.gs_cb);
kfree(current->thread.gs_bc_cb); kfree(current->thread.gs_bc_cb);
current->thread.gs_cb = current->thread.gs_bc_cb = NULL; current->thread.gs_cb = current->thread.gs_bc_cb = NULL;
preempt_enable();
} }
static int gs_enable(void) static int gs_enable(void)

View file

@ -269,6 +269,7 @@ static void __do_machine_kexec(void *data)
s390_reset_system(); s390_reset_system();
data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
__arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */
/* Call the moving routine */ /* Call the moving routine */
(*data_mover)(&image->head, image->start); (*data_mover)(&image->head, image->start);

View file

@ -100,6 +100,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
clear_tsk_thread_flag(p, TIF_SINGLE_STEP); clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
p->thread.per_flags = 0;
/* Initialize per thread user and system timer values */ /* Initialize per thread user and system timer values */
p->thread.user_timer = 0; p->thread.user_timer = 0;
p->thread.guest_timer = 0; p->thread.guest_timer = 0;

View file

@ -29,7 +29,6 @@
ENTRY(relocate_kernel) ENTRY(relocate_kernel)
basr %r13,0 # base address basr %r13,0 # base address
.base: .base:
stnsm sys_msk-.base(%r13),0xfb # disable DAT
stctg %c0,%c15,ctlregs-.base(%r13) stctg %c0,%c15,ctlregs-.base(%r13)
stmg %r0,%r15,gprregs-.base(%r13) stmg %r0,%r15,gprregs-.base(%r13)
lghi %r0,3 lghi %r0,3
@ -103,8 +102,6 @@ ENTRY(relocate_kernel)
.align 8 .align 8
load_psw: load_psw:
.long 0x00080000,0x80000000 .long 0x00080000,0x80000000
sys_msk:
.quad 0
ctlregs: ctlregs:
.rept 16 .rept 16
.quad 0 .quad 0

View file

@ -50,11 +50,13 @@ void exit_thread_runtime_instr(void)
{ {
struct task_struct *task = current; struct task_struct *task = current;
preempt_disable();
if (!task->thread.ri_cb) if (!task->thread.ri_cb)
return; return;
disable_runtime_instr(); disable_runtime_instr();
kfree(task->thread.ri_cb); kfree(task->thread.ri_cb);
task->thread.ri_cb = NULL; task->thread.ri_cb = NULL;
preempt_enable();
} }
SYSCALL_DEFINE1(s390_runtime_instr, int, command) SYSCALL_DEFINE1(s390_runtime_instr, int, command)
@ -65,9 +67,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (command == S390_RUNTIME_INSTR_STOP) { if (command == S390_RUNTIME_INSTR_STOP) {
preempt_disable();
exit_thread_runtime_instr(); exit_thread_runtime_instr();
preempt_enable();
return 0; return 0;
} }

View file

@ -51,15 +51,19 @@ ENTRY(native_usergs_sysret64)
END(native_usergs_sysret64) END(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT */
.macro TRACE_IRQS_IRETQ .macro TRACE_IRQS_FLAGS flags:req
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
bt $9, EFLAGS(%rsp) /* interrupts off? */ bt $9, \flags /* interrupts off? */
jnc 1f jnc 1f
TRACE_IRQS_ON TRACE_IRQS_ON
1: 1:
#endif #endif
.endm .endm
.macro TRACE_IRQS_IRETQ
TRACE_IRQS_FLAGS EFLAGS(%rsp)
.endm
/* /*
* When dynamic function tracer is enabled it will add a breakpoint * When dynamic function tracer is enabled it will add a breakpoint
* to all locations that it is about to modify, sync CPUs, update * to all locations that it is about to modify, sync CPUs, update
@ -148,8 +152,6 @@ ENTRY(entry_SYSCALL_64)
movq %rsp, PER_CPU_VAR(rsp_scratch) movq %rsp, PER_CPU_VAR(rsp_scratch)
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
TRACE_IRQS_OFF
/* Construct struct pt_regs on stack */ /* Construct struct pt_regs on stack */
pushq $__USER_DS /* pt_regs->ss */ pushq $__USER_DS /* pt_regs->ss */
pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
@ -170,6 +172,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
UNWIND_HINT_REGS extra=0 UNWIND_HINT_REGS extra=0
TRACE_IRQS_OFF
/* /*
* If we need to do entry work or if we guess we'll need to do * If we need to do entry work or if we guess we'll need to do
* exit work, go straight to the slow path. * exit work, go straight to the slow path.
@ -923,11 +927,13 @@ ENTRY(native_load_gs_index)
FRAME_BEGIN FRAME_BEGIN
pushfq pushfq
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
TRACE_IRQS_OFF
SWAPGS SWAPGS
.Lgs_change: .Lgs_change:
movl %edi, %gs movl %edi, %gs
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
SWAPGS SWAPGS
TRACE_IRQS_FLAGS (%rsp)
popfq popfq
FRAME_END FRAME_END
ret ret

View file

@ -3730,6 +3730,19 @@ EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1"); EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
static struct attribute *hsw_events_attrs[] = { static struct attribute *hsw_events_attrs[] = {
EVENT_PTR(mem_ld_hsw),
EVENT_PTR(mem_st_hsw),
EVENT_PTR(td_slots_issued),
EVENT_PTR(td_slots_retired),
EVENT_PTR(td_fetch_bubbles),
EVENT_PTR(td_total_slots),
EVENT_PTR(td_total_slots_scale),
EVENT_PTR(td_recovery_bubbles),
EVENT_PTR(td_recovery_bubbles_scale),
NULL
};
static struct attribute *hsw_tsx_events_attrs[] = {
EVENT_PTR(tx_start), EVENT_PTR(tx_start),
EVENT_PTR(tx_commit), EVENT_PTR(tx_commit),
EVENT_PTR(tx_abort), EVENT_PTR(tx_abort),
@ -3742,18 +3755,16 @@ static struct attribute *hsw_events_attrs[] = {
EVENT_PTR(el_conflict), EVENT_PTR(el_conflict),
EVENT_PTR(cycles_t), EVENT_PTR(cycles_t),
EVENT_PTR(cycles_ct), EVENT_PTR(cycles_ct),
EVENT_PTR(mem_ld_hsw),
EVENT_PTR(mem_st_hsw),
EVENT_PTR(td_slots_issued),
EVENT_PTR(td_slots_retired),
EVENT_PTR(td_fetch_bubbles),
EVENT_PTR(td_total_slots),
EVENT_PTR(td_total_slots_scale),
EVENT_PTR(td_recovery_bubbles),
EVENT_PTR(td_recovery_bubbles_scale),
NULL NULL
}; };
static __init struct attribute **get_hsw_events_attrs(void)
{
return boot_cpu_has(X86_FEATURE_RTM) ?
merge_attr(hsw_events_attrs, hsw_tsx_events_attrs) :
hsw_events_attrs;
}
static ssize_t freeze_on_smi_show(struct device *cdev, static ssize_t freeze_on_smi_show(struct device *cdev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
@ -4182,7 +4193,7 @@ __init int intel_pmu_init(void)
x86_pmu.hw_config = hsw_hw_config; x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints; x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu.cpu_events = hsw_events_attrs; x86_pmu.cpu_events = get_hsw_events_attrs();
x86_pmu.lbr_double_abort = true; x86_pmu.lbr_double_abort = true;
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr; hsw_format_attr : nhm_format_attr;
@ -4221,7 +4232,7 @@ __init int intel_pmu_init(void)
x86_pmu.hw_config = hsw_hw_config; x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints; x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu.cpu_events = hsw_events_attrs; x86_pmu.cpu_events = get_hsw_events_attrs();
x86_pmu.limit_period = bdw_limit_period; x86_pmu.limit_period = bdw_limit_period;
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr; hsw_format_attr : nhm_format_attr;
@ -4279,7 +4290,7 @@ __init int intel_pmu_init(void)
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr; hsw_format_attr : nhm_format_attr;
extra_attr = merge_attr(extra_attr, skl_format_attr); extra_attr = merge_attr(extra_attr, skl_format_attr);
x86_pmu.cpu_events = hsw_events_attrs; x86_pmu.cpu_events = get_hsw_events_attrs();
intel_pmu_pebs_data_source_skl( intel_pmu_pebs_data_source_skl(
boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
pr_cont("Skylake events, "); pr_cont("Skylake events, ");

View file

@ -431,6 +431,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
} }
static unsigned long mpf_base; static unsigned long mpf_base;
static bool mpf_found;
static unsigned long __init get_mpc_size(unsigned long physptr) static unsigned long __init get_mpc_size(unsigned long physptr)
{ {
@ -504,7 +505,7 @@ void __init default_get_smp_config(unsigned int early)
if (!smp_found_config) if (!smp_found_config)
return; return;
if (!mpf_base) if (!mpf_found)
return; return;
if (acpi_lapic && early) if (acpi_lapic && early)
@ -593,6 +594,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
smp_found_config = 1; smp_found_config = 1;
#endif #endif
mpf_base = base; mpf_base = base;
mpf_found = true;
pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n", pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
base, base + sizeof(*mpf) - 1, mpf); base, base + sizeof(*mpf) - 1, mpf);
@ -858,7 +860,7 @@ static int __init update_mp_table(void)
if (!enable_update_mptable) if (!enable_update_mptable)
return 0; return 0;
if (!mpf_base) if (!mpf_found)
return 0; return 0;
mpf = early_memremap(mpf_base, sizeof(*mpf)); mpf = early_memremap(mpf_base, sizeof(*mpf));

View file

@ -3657,6 +3657,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
u32 ecx = msr->index; u32 ecx = msr->index;
u64 data = msr->data; u64 data = msr->data;
switch (ecx) { switch (ecx) {
case MSR_IA32_CR_PAT:
if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
return 1;
vcpu->arch.pat = data;
svm->vmcb->save.g_pat = data;
mark_dirty(svm->vmcb, VMCB_NPT);
break;
case MSR_IA32_TSC: case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr); kvm_write_tsc(vcpu, msr);
break; break;

View file

@ -202,6 +202,10 @@ struct loaded_vmcs {
bool nmi_known_unmasked; bool nmi_known_unmasked;
unsigned long vmcs_host_cr3; /* May not match real cr3 */ unsigned long vmcs_host_cr3; /* May not match real cr3 */
unsigned long vmcs_host_cr4; /* May not match real cr4 */ unsigned long vmcs_host_cr4; /* May not match real cr4 */
/* Support for vnmi-less CPUs */
int soft_vnmi_blocked;
ktime_t entry_time;
s64 vnmi_blocked_time;
struct list_head loaded_vmcss_on_cpu_link; struct list_head loaded_vmcss_on_cpu_link;
}; };
@ -1286,6 +1290,11 @@ static inline bool cpu_has_vmx_invpcid(void)
SECONDARY_EXEC_ENABLE_INVPCID; SECONDARY_EXEC_ENABLE_INVPCID;
} }
static inline bool cpu_has_virtual_nmis(void)
{
return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
}
static inline bool cpu_has_vmx_wbinvd_exit(void) static inline bool cpu_has_vmx_wbinvd_exit(void)
{ {
return vmcs_config.cpu_based_2nd_exec_ctrl & return vmcs_config.cpu_based_2nd_exec_ctrl &
@ -1343,11 +1352,6 @@ static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
(vmcs12->secondary_vm_exec_control & bit); (vmcs12->secondary_vm_exec_control & bit);
} }
static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
{
return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
}
static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12) static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
{ {
return vmcs12->pin_based_vm_exec_control & return vmcs12->pin_based_vm_exec_control &
@ -3699,9 +3703,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
&_vmexit_control) < 0) &_vmexit_control) < 0)
return -EIO; return -EIO;
min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
PIN_BASED_VIRTUAL_NMIS; opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER; PIN_BASED_VMX_PREEMPTION_TIMER;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
&_pin_based_exec_control) < 0) &_pin_based_exec_control) < 0)
return -EIO; return -EIO;
@ -5667,7 +5671,8 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
static void enable_nmi_window(struct kvm_vcpu *vcpu) static void enable_nmi_window(struct kvm_vcpu *vcpu)
{ {
if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { if (!cpu_has_virtual_nmis() ||
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
enable_irq_window(vcpu); enable_irq_window(vcpu);
return; return;
} }
@ -5707,6 +5712,19 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!cpu_has_virtual_nmis()) {
/*
* Tracking the NMI-blocked state in software is built upon
* finding the next open IRQ window. This, in turn, depends on
* well-behaving guests: They have to keep IRQs disabled at
* least as long as the NMI handler runs. Otherwise we may
* cause NMI nesting, maybe breaking the guest. But as this is
* highly unlikely, we can live with the residual risk.
*/
vmx->loaded_vmcs->soft_vnmi_blocked = 1;
vmx->loaded_vmcs->vnmi_blocked_time = 0;
}
++vcpu->stat.nmi_injections; ++vcpu->stat.nmi_injections;
vmx->loaded_vmcs->nmi_known_unmasked = false; vmx->loaded_vmcs->nmi_known_unmasked = false;
@ -5725,6 +5743,8 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
bool masked; bool masked;
if (!cpu_has_virtual_nmis())
return vmx->loaded_vmcs->soft_vnmi_blocked;
if (vmx->loaded_vmcs->nmi_known_unmasked) if (vmx->loaded_vmcs->nmi_known_unmasked)
return false; return false;
masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
@ -5736,13 +5756,20 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx->loaded_vmcs->nmi_known_unmasked = !masked; if (!cpu_has_virtual_nmis()) {
if (masked) if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, vmx->loaded_vmcs->soft_vnmi_blocked = masked;
GUEST_INTR_STATE_NMI); vmx->loaded_vmcs->vnmi_blocked_time = 0;
else }
vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, } else {
GUEST_INTR_STATE_NMI); vmx->loaded_vmcs->nmi_known_unmasked = !masked;
if (masked)
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
else
vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
}
} }
static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
@ -5750,6 +5777,10 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
if (to_vmx(vcpu)->nested.nested_run_pending) if (to_vmx(vcpu)->nested.nested_run_pending)
return 0; return 0;
if (!cpu_has_virtual_nmis() &&
to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
return 0;
return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
| GUEST_INTR_STATE_NMI)); | GUEST_INTR_STATE_NMI));
@ -6478,6 +6509,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
* AAK134, BY25. * AAK134, BY25.
*/ */
if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
cpu_has_virtual_nmis() &&
(exit_qualification & INTR_INFO_UNBLOCK_NMI)) (exit_qualification & INTR_INFO_UNBLOCK_NMI))
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
@ -6961,7 +6993,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
} }
/* Create a new VMCS */ /* Create a new VMCS */
item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL); item = kzalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
if (!item) if (!item)
return NULL; return NULL;
item->vmcs02.vmcs = alloc_vmcs(); item->vmcs02.vmcs = alloc_vmcs();
@ -7978,6 +8010,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
* "blocked by NMI" bit has to be set before next VM entry. * "blocked by NMI" bit has to be set before next VM entry.
*/ */
if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
cpu_has_virtual_nmis() &&
(exit_qualification & INTR_INFO_UNBLOCK_NMI)) (exit_qualification & INTR_INFO_UNBLOCK_NMI))
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI); GUEST_INTR_STATE_NMI);
@ -8822,6 +8855,25 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
if (unlikely(!cpu_has_virtual_nmis() &&
vmx->loaded_vmcs->soft_vnmi_blocked)) {
if (vmx_interrupt_allowed(vcpu)) {
vmx->loaded_vmcs->soft_vnmi_blocked = 0;
} else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
vcpu->arch.nmi_pending) {
/*
* This CPU don't support us in finding the end of an
* NMI-blocked window if the guest runs with IRQs
* disabled. So we pull the trigger after 1 s of
* futile waiting, but inform the user about this.
*/
printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
"state on VCPU %d after 1 s timeout\n",
__func__, vcpu->vcpu_id);
vmx->loaded_vmcs->soft_vnmi_blocked = 0;
}
}
if (exit_reason < kvm_vmx_max_exit_handlers if (exit_reason < kvm_vmx_max_exit_handlers
&& kvm_vmx_exit_handlers[exit_reason]) && kvm_vmx_exit_handlers[exit_reason])
return kvm_vmx_exit_handlers[exit_reason](vcpu); return kvm_vmx_exit_handlers[exit_reason](vcpu);
@ -9104,33 +9156,38 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
if (vmx->loaded_vmcs->nmi_known_unmasked) if (cpu_has_virtual_nmis()) {
return; if (vmx->loaded_vmcs->nmi_known_unmasked)
/* return;
* Can't use vmx->exit_intr_info since we're not sure what /*
* the exit reason is. * Can't use vmx->exit_intr_info since we're not sure what
*/ * the exit reason is.
exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); */
unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
vector = exit_intr_info & INTR_INFO_VECTOR_MASK; unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
/* vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
* SDM 3: 27.7.1.2 (September 2008) /*
* Re-set bit "block by NMI" before VM entry if vmexit caused by * SDM 3: 27.7.1.2 (September 2008)
* a guest IRET fault. * Re-set bit "block by NMI" before VM entry if vmexit caused by
* SDM 3: 23.2.2 (September 2008) * a guest IRET fault.
* Bit 12 is undefined in any of the following cases: * SDM 3: 23.2.2 (September 2008)
* If the VM exit sets the valid bit in the IDT-vectoring * Bit 12 is undefined in any of the following cases:
* information field. * If the VM exit sets the valid bit in the IDT-vectoring
* If the VM exit is due to a double fault. * information field.
*/ * If the VM exit is due to a double fault.
if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && */
vector != DF_VECTOR && !idtv_info_valid) if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, vector != DF_VECTOR && !idtv_info_valid)
GUEST_INTR_STATE_NMI); vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
else GUEST_INTR_STATE_NMI);
vmx->loaded_vmcs->nmi_known_unmasked = else
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) vmx->loaded_vmcs->nmi_known_unmasked =
& GUEST_INTR_STATE_NMI); !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
& GUEST_INTR_STATE_NMI);
} else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
vmx->loaded_vmcs->vnmi_blocked_time +=
ktime_to_ns(ktime_sub(ktime_get(),
vmx->loaded_vmcs->entry_time));
} }
static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
@ -9247,6 +9304,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long debugctlmsr, cr3, cr4; unsigned long debugctlmsr, cr3, cr4;
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!cpu_has_virtual_nmis() &&
vmx->loaded_vmcs->soft_vnmi_blocked))
vmx->loaded_vmcs->entry_time = ktime_get();
/* Don't enter VMX if guest state is invalid, let the exit handler /* Don't enter VMX if guest state is invalid, let the exit handler
start emulation until we arrive back to a valid state */ start emulation until we arrive back to a valid state */
if (vmx->emulation_required) if (vmx->emulation_required)
@ -11325,6 +11387,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
/* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)

View file

@ -896,7 +896,7 @@ EndTable
GrpTable: Grp3_1 GrpTable: Grp3_1
0: TEST Eb,Ib 0: TEST Eb,Ib
1: 1: TEST Eb,Ib
2: NOT Eb 2: NOT Eb
3: NEG Eb 3: NEG Eb
4: MUL AL,Eb 4: MUL AL,Eb

View file

@ -333,6 +333,7 @@ EXPORT_SYMBOL(blk_stop_queue);
void blk_sync_queue(struct request_queue *q) void blk_sync_queue(struct request_queue *q)
{ {
del_timer_sync(&q->timeout); del_timer_sync(&q->timeout);
cancel_work_sync(&q->timeout_work);
if (q->mq_ops) { if (q->mq_ops) {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
@ -844,6 +845,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
setup_timer(&q->backing_dev_info->laptop_mode_wb_timer, setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q); laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_WORK(&q->timeout_work, NULL);
INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->icq_list); INIT_LIST_HEAD(&q->icq_list);

View file

@ -134,8 +134,6 @@ void blk_timeout_work(struct work_struct *work)
struct request *rq, *tmp; struct request *rq, *tmp;
int next_set = 0; int next_set = 0;
if (blk_queue_enter(q, true))
return;
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
@ -145,7 +143,6 @@ void blk_timeout_work(struct work_struct *work)
mod_timer(&q->timeout, round_jiffies_up(next)); mod_timer(&q->timeout, round_jiffies_up(next));
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
blk_queue_exit(q);
} }
/** /**

View file

@ -387,6 +387,7 @@ EXPORT_SYMBOL(acpi_bus_power_manageable);
#ifdef CONFIG_PM #ifdef CONFIG_PM
static DEFINE_MUTEX(acpi_pm_notifier_lock); static DEFINE_MUTEX(acpi_pm_notifier_lock);
static DEFINE_MUTEX(acpi_pm_notifier_install_lock);
void acpi_pm_wakeup_event(struct device *dev) void acpi_pm_wakeup_event(struct device *dev)
{ {
@ -443,24 +444,25 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
if (!dev && !func) if (!dev && !func)
return AE_BAD_PARAMETER; return AE_BAD_PARAMETER;
mutex_lock(&acpi_pm_notifier_lock); mutex_lock(&acpi_pm_notifier_install_lock);
if (adev->wakeup.flags.notifier_present) if (adev->wakeup.flags.notifier_present)
goto out; goto out;
adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
adev->wakeup.context.dev = dev;
adev->wakeup.context.func = func;
status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY, status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
acpi_pm_notify_handler, NULL); acpi_pm_notify_handler, NULL);
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
goto out; goto out;
mutex_lock(&acpi_pm_notifier_lock);
adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
adev->wakeup.context.dev = dev;
adev->wakeup.context.func = func;
adev->wakeup.flags.notifier_present = true; adev->wakeup.flags.notifier_present = true;
mutex_unlock(&acpi_pm_notifier_lock);
out: out:
mutex_unlock(&acpi_pm_notifier_lock); mutex_unlock(&acpi_pm_notifier_install_lock);
return status; return status;
} }
@ -472,7 +474,7 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
{ {
acpi_status status = AE_BAD_PARAMETER; acpi_status status = AE_BAD_PARAMETER;
mutex_lock(&acpi_pm_notifier_lock); mutex_lock(&acpi_pm_notifier_install_lock);
if (!adev->wakeup.flags.notifier_present) if (!adev->wakeup.flags.notifier_present)
goto out; goto out;
@ -483,14 +485,15 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
goto out; goto out;
mutex_lock(&acpi_pm_notifier_lock);
adev->wakeup.context.func = NULL; adev->wakeup.context.func = NULL;
adev->wakeup.context.dev = NULL; adev->wakeup.context.dev = NULL;
wakeup_source_unregister(adev->wakeup.ws); wakeup_source_unregister(adev->wakeup.ws);
adev->wakeup.flags.notifier_present = false; adev->wakeup.flags.notifier_present = false;
mutex_unlock(&acpi_pm_notifier_lock);
out: out:
mutex_unlock(&acpi_pm_notifier_lock); mutex_unlock(&acpi_pm_notifier_install_lock);
return status; return status;
} }

View file

@ -486,8 +486,11 @@ static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
{ {
if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
ec_log_drv("event unblocked"); ec_log_drv("event unblocked");
if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) /*
advance_transaction(ec); * Unconditionally invoke this once after enabling the event
* handling mechanism to detect the pending events.
*/
advance_transaction(ec);
} }
static inline void __acpi_ec_disable_event(struct acpi_ec *ec) static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
@ -1456,11 +1459,10 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
if (test_bit(EC_FLAGS_STARTED, &ec->flags) && if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
ec->reference_count >= 1) ec->reference_count >= 1)
acpi_ec_enable_gpe(ec, true); acpi_ec_enable_gpe(ec, true);
/* EC is fully operational, allow queries */
acpi_ec_enable_event(ec);
} }
} }
/* EC is fully operational, allow queries */
acpi_ec_enable_event(ec);
return 0; return 0;
} }

View file

@ -2264,8 +2264,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
eflags |= ATA_EFLAG_DUBIOUS_XFER; eflags |= ATA_EFLAG_DUBIOUS_XFER;
ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
} }
trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
DPRINTK("EXIT\n"); DPRINTK("EXIT\n");
} }

View file

@ -397,6 +397,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
ret); ret);
_dev_pm_opp_remove_table(opp_table, dev, false); _dev_pm_opp_remove_table(opp_table, dev, false);
of_node_put(np);
goto put_opp_table; goto put_opp_table;
} }
} }

View file

@ -288,15 +288,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
cmd->status = BLK_STS_TIMEOUT; cmd->status = BLK_STS_TIMEOUT;
return BLK_EH_HANDLED; return BLK_EH_HANDLED;
} }
/* If we are waiting on our dead timer then we could get timeout
* callbacks for our request. For this we just want to reset the timer
* and let the queue side take care of everything.
*/
if (!completion_done(&cmd->send_complete)) {
nbd_config_put(nbd);
return BLK_EH_RESET_TIMER;
}
config = nbd->config; config = nbd->config;
if (config->num_connections > 1) { if (config->num_connections > 1) {
@ -723,9 +714,9 @@ static int wait_for_reconnect(struct nbd_device *nbd)
return 0; return 0;
if (test_bit(NBD_DISCONNECTED, &config->runtime_flags)) if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
return 0; return 0;
wait_event_interruptible_timeout(config->conn_wait, wait_event_timeout(config->conn_wait,
atomic_read(&config->live_connections), atomic_read(&config->live_connections),
config->dead_conn_timeout); config->dead_conn_timeout);
return atomic_read(&config->live_connections); return atomic_read(&config->live_connections);
} }
@ -740,6 +731,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
if (!refcount_inc_not_zero(&nbd->config_refs)) { if (!refcount_inc_not_zero(&nbd->config_refs)) {
dev_err_ratelimited(disk_to_dev(nbd->disk), dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n"); "Socks array is empty\n");
blk_mq_start_request(req);
return -EINVAL; return -EINVAL;
} }
config = nbd->config; config = nbd->config;
@ -748,6 +740,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
dev_err_ratelimited(disk_to_dev(nbd->disk), dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted send on invalid socket\n"); "Attempted send on invalid socket\n");
nbd_config_put(nbd); nbd_config_put(nbd);
blk_mq_start_request(req);
return -EINVAL; return -EINVAL;
} }
cmd->status = BLK_STS_OK; cmd->status = BLK_STS_OK;
@ -771,6 +764,7 @@ again:
*/ */
sock_shutdown(nbd); sock_shutdown(nbd);
nbd_config_put(nbd); nbd_config_put(nbd);
blk_mq_start_request(req);
return -EIO; return -EIO;
} }
goto again; goto again;
@ -781,6 +775,7 @@ again:
* here so that it gets put _after_ the request that is already on the * here so that it gets put _after_ the request that is already on the
* dispatch list. * dispatch list.
*/ */
blk_mq_start_request(req);
if (unlikely(nsock->pending && nsock->pending != req)) { if (unlikely(nsock->pending && nsock->pending != req)) {
blk_mq_requeue_request(req, true); blk_mq_requeue_request(req, true);
ret = 0; ret = 0;
@ -793,10 +788,10 @@ again:
ret = nbd_send_cmd(nbd, cmd, index); ret = nbd_send_cmd(nbd, cmd, index);
if (ret == -EAGAIN) { if (ret == -EAGAIN) {
dev_err_ratelimited(disk_to_dev(nbd->disk), dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed trying another connection\n"); "Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1); nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock); blk_mq_requeue_request(req, true);
goto again; ret = 0;
} }
out: out:
mutex_unlock(&nsock->tx_lock); mutex_unlock(&nsock->tx_lock);
@ -820,7 +815,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
* done sending everything over the wire. * done sending everything over the wire.
*/ */
init_completion(&cmd->send_complete); init_completion(&cmd->send_complete);
blk_mq_start_request(bd->rq);
/* We can be called directly from the user space process, which means we /* We can be called directly from the user space process, which means we
* could possibly have signals pending so our sendmsg will fail. In * could possibly have signals pending so our sendmsg will fail. In

View file

@ -26,6 +26,7 @@
struct btqcomsmd { struct btqcomsmd {
struct hci_dev *hdev; struct hci_dev *hdev;
bdaddr_t bdaddr;
struct rpmsg_endpoint *acl_channel; struct rpmsg_endpoint *acl_channel;
struct rpmsg_endpoint *cmd_channel; struct rpmsg_endpoint *cmd_channel;
}; };
@ -100,6 +101,38 @@ static int btqcomsmd_close(struct hci_dev *hdev)
return 0; return 0;
} }
static int btqcomsmd_setup(struct hci_dev *hdev)
{
struct btqcomsmd *btq = hci_get_drvdata(hdev);
struct sk_buff *skb;
int err;
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb))
return PTR_ERR(skb);
kfree_skb(skb);
/* Devices do not have persistent storage for BD address. If no
* BD address has been retrieved during probe, mark the device
* as having an invalid BD address.
*/
if (!bacmp(&btq->bdaddr, BDADDR_ANY)) {
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
return 0;
}
/* When setting a configured BD address fails, mark the device
* as having an invalid BD address.
*/
err = qca_set_bdaddr_rome(hdev, &btq->bdaddr);
if (err) {
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
return 0;
}
return 0;
}
static int btqcomsmd_probe(struct platform_device *pdev) static int btqcomsmd_probe(struct platform_device *pdev)
{ {
struct btqcomsmd *btq; struct btqcomsmd *btq;
@ -135,6 +168,7 @@ static int btqcomsmd_probe(struct platform_device *pdev)
hdev->open = btqcomsmd_open; hdev->open = btqcomsmd_open;
hdev->close = btqcomsmd_close; hdev->close = btqcomsmd_close;
hdev->send = btqcomsmd_send; hdev->send = btqcomsmd_send;
hdev->setup = btqcomsmd_setup;
hdev->set_bdaddr = qca_set_bdaddr_rome; hdev->set_bdaddr = qca_set_bdaddr_rome;
ret = hci_register_dev(hdev); ret = hci_register_dev(hdev);

View file

@ -274,8 +274,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
/* Get configuration for the ATL instances */ /* Get configuration for the ATL instances */
snprintf(prop, sizeof(prop), "atl%u", i); snprintf(prop, sizeof(prop), "atl%u", i);
of_node_get(node); cfg_node = of_get_child_by_name(node, prop);
cfg_node = of_find_node_by_name(node, prop);
if (cfg_node) { if (cfg_node) {
ret = of_property_read_u32(cfg_node, "bws", ret = of_property_read_u32(cfg_node, "bws",
&cdesc->bws); &cdesc->bws);

View file

@ -344,6 +344,9 @@ static struct inode *dax_alloc_inode(struct super_block *sb)
struct inode *inode; struct inode *inode;
dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
if (!dax_dev)
return NULL;
inode = &dax_dev->inode; inode = &dax_dev->inode;
inode->i_rdev = 0; inode->i_rdev = 0;
return inode; return inode;

View file

@ -1575,7 +1575,7 @@ static void cm_format_req_event(struct cm_work *work,
param->bth_pkey = cm_get_bth_pkey(work); param->bth_pkey = cm_get_bth_pkey(work);
param->port = cm_id_priv->av.port->port_num; param->port = cm_id_priv->av.port->port_num;
param->primary_path = &work->path[0]; param->primary_path = &work->path[0];
if (req_msg->alt_local_lid) if (cm_req_has_alt_path(req_msg))
param->alternate_path = &work->path[1]; param->alternate_path = &work->path[1];
else else
param->alternate_path = NULL; param->alternate_path = NULL;
@ -1856,7 +1856,8 @@ static int cm_req_handler(struct cm_work *work)
cm_process_routed_req(req_msg, work->mad_recv_wc->wc); cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
memset(&work->path[0], 0, sizeof(work->path[0])); memset(&work->path[0], 0, sizeof(work->path[0]));
memset(&work->path[1], 0, sizeof(work->path[1])); if (cm_req_has_alt_path(req_msg))
memset(&work->path[1], 0, sizeof(work->path[1]));
grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr); grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
ret = ib_get_cached_gid(work->port->cm_dev->ib_device, ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
work->port->port_num, work->port->port_num,
@ -3817,14 +3818,16 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
struct cm_port *port = mad_agent->context; struct cm_port *port = mad_agent->context;
struct cm_work *work; struct cm_work *work;
enum ib_cm_event_type event; enum ib_cm_event_type event;
bool alt_path = false;
u16 attr_id; u16 attr_id;
int paths = 0; int paths = 0;
int going_down = 0; int going_down = 0;
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
case CM_REQ_ATTR_ID: case CM_REQ_ATTR_ID:
paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> alt_path = cm_req_has_alt_path((struct cm_req_msg *)
alt_local_lid != 0); mad_recv_wc->recv_buf.mad);
paths = 1 + (alt_path != 0);
event = IB_CM_REQ_RECEIVED; event = IB_CM_REQ_RECEIVED;
break; break;
case CM_MRA_ATTR_ID: case CM_MRA_ATTR_ID:

View file

@ -1974,14 +1974,15 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
unsigned long flags; unsigned long flags;
int ret; int ret;
INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
ret = ib_mad_enforce_security(mad_agent_priv, ret = ib_mad_enforce_security(mad_agent_priv,
mad_recv_wc->wc->pkey_index); mad_recv_wc->wc->pkey_index);
if (ret) { if (ret) {
ib_free_recv_mad(mad_recv_wc); ib_free_recv_mad(mad_recv_wc);
deref_mad_agent(mad_agent_priv); deref_mad_agent(mad_agent_priv);
return;
} }
INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,

View file

@ -87,16 +87,14 @@ static int enforce_qp_pkey_security(u16 pkey,
if (ret) if (ret)
return ret; return ret;
if (qp_sec->qp == qp_sec->qp->real_qp) { list_for_each_entry(shared_qp_sec,
list_for_each_entry(shared_qp_sec, &qp_sec->shared_qp_list,
&qp_sec->shared_qp_list, shared_qp_list) {
shared_qp_list) { ret = security_ib_pkey_access(shared_qp_sec->security,
ret = security_ib_pkey_access(shared_qp_sec->security, subnet_prefix,
subnet_prefix, pkey);
pkey); if (ret)
if (ret) return ret;
return ret;
}
} }
return 0; return 0;
} }
@ -560,15 +558,22 @@ int ib_security_modify_qp(struct ib_qp *qp,
int ret = 0; int ret = 0;
struct ib_ports_pkeys *tmp_pps; struct ib_ports_pkeys *tmp_pps;
struct ib_ports_pkeys *new_pps; struct ib_ports_pkeys *new_pps;
bool special_qp = (qp->qp_type == IB_QPT_SMI || struct ib_qp *real_qp = qp->real_qp;
qp->qp_type == IB_QPT_GSI || bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
qp->qp_type >= IB_QPT_RESERVED1); real_qp->qp_type == IB_QPT_GSI ||
real_qp->qp_type >= IB_QPT_RESERVED1);
bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) || bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
(qp_attr_mask & IB_QP_ALT_PATH)); (qp_attr_mask & IB_QP_ALT_PATH));
/* The port/pkey settings are maintained only for the real QP. Open
* handles on the real QP will be in the shared_qp_list. When
* enforcing security on the real QP all the shared QPs will be
* checked as well.
*/
if (pps_change && !special_qp) { if (pps_change && !special_qp) {
mutex_lock(&qp->qp_sec->mutex); mutex_lock(&real_qp->qp_sec->mutex);
new_pps = get_new_pps(qp, new_pps = get_new_pps(real_qp,
qp_attr, qp_attr,
qp_attr_mask); qp_attr_mask);
@ -586,14 +591,14 @@ int ib_security_modify_qp(struct ib_qp *qp,
if (!ret) if (!ret)
ret = check_qp_port_pkey_settings(new_pps, ret = check_qp_port_pkey_settings(new_pps,
qp->qp_sec); real_qp->qp_sec);
} }
if (!ret) if (!ret)
ret = qp->device->modify_qp(qp->real_qp, ret = real_qp->device->modify_qp(real_qp,
qp_attr, qp_attr,
qp_attr_mask, qp_attr_mask,
udata); udata);
if (pps_change && !special_qp) { if (pps_change && !special_qp) {
/* Clean up the lists and free the appropriate /* Clean up the lists and free the appropriate
@ -602,8 +607,8 @@ int ib_security_modify_qp(struct ib_qp *qp,
if (ret) { if (ret) {
tmp_pps = new_pps; tmp_pps = new_pps;
} else { } else {
tmp_pps = qp->qp_sec->ports_pkeys; tmp_pps = real_qp->qp_sec->ports_pkeys;
qp->qp_sec->ports_pkeys = new_pps; real_qp->qp_sec->ports_pkeys = new_pps;
} }
if (tmp_pps) { if (tmp_pps) {
@ -611,7 +616,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
port_pkey_list_remove(&tmp_pps->alt); port_pkey_list_remove(&tmp_pps->alt);
} }
kfree(tmp_pps); kfree(tmp_pps);
mutex_unlock(&qp->qp_sec->mutex); mutex_unlock(&real_qp->qp_sec->mutex);
} }
return ret; return ret;
} }

View file

@ -13074,7 +13074,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
first_sdma = last_general; first_sdma = last_general;
last_sdma = first_sdma + dd->num_sdma; last_sdma = first_sdma + dd->num_sdma;
first_rx = last_sdma; first_rx = last_sdma;
last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT; last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
/* VNIC MSIx interrupts get mapped when VNIC contexts are created */ /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues; dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
@ -13294,8 +13294,9 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
* slow source, SDMACleanupDone) * slow source, SDMACleanupDone)
* N interrupts - one per used SDMA engine * N interrupts - one per used SDMA engine
* M interrupt - one per kernel receive context * M interrupt - one per kernel receive context
* V interrupt - one for each VNIC context
*/ */
total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT; total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
/* ask for MSI-X interrupts */ /* ask for MSI-X interrupts */
request = request_msix(dd, total); request = request_msix(dd, total);
@ -13356,10 +13357,12 @@ fail:
* in array of contexts * in array of contexts
* freectxts - number of free user contexts * freectxts - number of free user contexts
* num_send_contexts - number of PIO send contexts being used * num_send_contexts - number of PIO send contexts being used
* num_vnic_contexts - number of contexts reserved for VNIC
*/ */
static int set_up_context_variables(struct hfi1_devdata *dd) static int set_up_context_variables(struct hfi1_devdata *dd)
{ {
unsigned long num_kernel_contexts; unsigned long num_kernel_contexts;
u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
int total_contexts; int total_contexts;
int ret; int ret;
unsigned ngroups; unsigned ngroups;
@ -13393,6 +13396,14 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
num_kernel_contexts); num_kernel_contexts);
num_kernel_contexts = dd->chip_send_contexts - num_vls - 1; num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
} }
/* Accommodate VNIC contexts if possible */
if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) {
dd_dev_err(dd, "No receive contexts available for VNIC\n");
num_vnic_contexts = 0;
}
total_contexts = num_kernel_contexts + num_vnic_contexts;
/* /*
* User contexts: * User contexts:
* - default to 1 user context per real (non-HT) CPU core if * - default to 1 user context per real (non-HT) CPU core if
@ -13402,19 +13413,16 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
num_user_contexts = num_user_contexts =
cpumask_weight(&node_affinity.real_cpu_mask); cpumask_weight(&node_affinity.real_cpu_mask);
total_contexts = num_kernel_contexts + num_user_contexts;
/* /*
* Adjust the counts given a global max. * Adjust the counts given a global max.
*/ */
if (total_contexts > dd->chip_rcv_contexts) { if (total_contexts + num_user_contexts > dd->chip_rcv_contexts) {
dd_dev_err(dd, dd_dev_err(dd,
"Reducing # user receive contexts to: %d, from %d\n", "Reducing # user receive contexts to: %d, from %d\n",
(int)(dd->chip_rcv_contexts - num_kernel_contexts), (int)(dd->chip_rcv_contexts - total_contexts),
(int)num_user_contexts); (int)num_user_contexts);
num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
/* recalculate */ /* recalculate */
total_contexts = num_kernel_contexts + num_user_contexts; num_user_contexts = dd->chip_rcv_contexts - total_contexts;
} }
/* each user context requires an entry in the RMT */ /* each user context requires an entry in the RMT */
@ -13427,25 +13435,24 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
user_rmt_reduced); user_rmt_reduced);
/* recalculate */ /* recalculate */
num_user_contexts = user_rmt_reduced; num_user_contexts = user_rmt_reduced;
total_contexts = num_kernel_contexts + num_user_contexts;
} }
/* Accommodate VNIC contexts */ total_contexts += num_user_contexts;
if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
total_contexts += HFI1_NUM_VNIC_CTXT;
/* the first N are kernel contexts, the rest are user/vnic contexts */ /* the first N are kernel contexts, the rest are user/vnic contexts */
dd->num_rcv_contexts = total_contexts; dd->num_rcv_contexts = total_contexts;
dd->n_krcv_queues = num_kernel_contexts; dd->n_krcv_queues = num_kernel_contexts;
dd->first_dyn_alloc_ctxt = num_kernel_contexts; dd->first_dyn_alloc_ctxt = num_kernel_contexts;
dd->num_vnic_contexts = num_vnic_contexts;
dd->num_user_contexts = num_user_contexts; dd->num_user_contexts = num_user_contexts;
dd->freectxts = num_user_contexts; dd->freectxts = num_user_contexts;
dd_dev_info(dd, dd_dev_info(dd,
"rcv contexts: chip %d, used %d (kernel %d, user %d)\n", "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
(int)dd->chip_rcv_contexts, (int)dd->chip_rcv_contexts,
(int)dd->num_rcv_contexts, (int)dd->num_rcv_contexts,
(int)dd->n_krcv_queues, (int)dd->n_krcv_queues,
(int)dd->num_rcv_contexts - dd->n_krcv_queues); dd->num_vnic_contexts,
dd->num_user_contexts);
/* /*
* Receive array allocation: * Receive array allocation:

View file

@ -1047,6 +1047,8 @@ struct hfi1_devdata {
u64 z_send_schedule; u64 z_send_schedule;
u64 __percpu *send_schedule; u64 __percpu *send_schedule;
/* number of reserved contexts for VNIC usage */
u16 num_vnic_contexts;
/* number of receive contexts in use by the driver */ /* number of receive contexts in use by the driver */
u32 num_rcv_contexts; u32 num_rcv_contexts;
/* number of pio send contexts in use by the driver */ /* number of pio send contexts in use by the driver */

View file

@ -543,7 +543,7 @@ static ssize_t show_nctxts(struct device *device,
* give a more accurate picture of total contexts available. * give a more accurate picture of total contexts available.
*/ */
return scnprintf(buf, PAGE_SIZE, "%u\n", return scnprintf(buf, PAGE_SIZE, "%u\n",
min(dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt, min(dd->num_user_contexts,
(u32)dd->sc_sizes[SC_USER].count)); (u32)dd->sc_sizes[SC_USER].count));
} }

View file

@ -840,6 +840,9 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
struct rdma_netdev *rn; struct rdma_netdev *rn;
int i, size, rc; int i, size, rc;
if (!dd->num_vnic_contexts)
return ERR_PTR(-ENOMEM);
if (!port_num || (port_num > dd->num_pports)) if (!port_num || (port_num > dd->num_pports))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
@ -848,7 +851,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo); size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
netdev = alloc_netdev_mqs(size, name, name_assign_type, setup, netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
dd->chip_sdma_engines, HFI1_NUM_VNIC_CTXT); dd->chip_sdma_engines, dd->num_vnic_contexts);
if (!netdev) if (!netdev)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -856,7 +859,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
vinfo = opa_vnic_dev_priv(netdev); vinfo = opa_vnic_dev_priv(netdev);
vinfo->dd = dd; vinfo->dd = dd;
vinfo->num_tx_q = dd->chip_sdma_engines; vinfo->num_tx_q = dd->chip_sdma_engines;
vinfo->num_rx_q = HFI1_NUM_VNIC_CTXT; vinfo->num_rx_q = dd->num_vnic_contexts;
vinfo->netdev = netdev; vinfo->netdev = netdev;
rn->free_rdma_netdev = hfi1_vnic_free_rn; rn->free_rdma_netdev = hfi1_vnic_free_rn;
rn->set_id = hfi1_vnic_set_vesw_id; rn->set_id = hfi1_vnic_set_vesw_id;

View file

@ -665,12 +665,19 @@ static void srp_path_rec_completion(int status,
static int srp_lookup_path(struct srp_rdma_ch *ch) static int srp_lookup_path(struct srp_rdma_ch *ch)
{ {
struct srp_target_port *target = ch->target; struct srp_target_port *target = ch->target;
int ret; int ret = -ENODEV;
ch->path.numb_path = 1; ch->path.numb_path = 1;
init_completion(&ch->done); init_completion(&ch->done);
/*
* Avoid that the SCSI host can be removed by srp_remove_target()
* before srp_path_rec_completion() is called.
*/
if (!scsi_host_get(target->scsi_host))
goto out;
ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client, ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
target->srp_host->srp_dev->dev, target->srp_host->srp_dev->dev,
target->srp_host->port, target->srp_host->port,
@ -684,18 +691,24 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
GFP_KERNEL, GFP_KERNEL,
srp_path_rec_completion, srp_path_rec_completion,
ch, &ch->path_query); ch, &ch->path_query);
if (ch->path_query_id < 0) ret = ch->path_query_id;
return ch->path_query_id; if (ret < 0)
goto put;
ret = wait_for_completion_interruptible(&ch->done); ret = wait_for_completion_interruptible(&ch->done);
if (ret < 0) if (ret < 0)
return ret; goto put;
if (ch->status < 0) ret = ch->status;
if (ret < 0)
shost_printk(KERN_WARNING, target->scsi_host, shost_printk(KERN_WARNING, target->scsi_host,
PFX "Path record query failed\n"); PFX "Path record query failed\n");
return ch->status; put:
scsi_host_put(target->scsi_host);
out:
return ret;
} }
static int srp_send_req(struct srp_rdma_ch *ch, bool multich) static int srp_send_req(struct srp_rdma_ch *ch, bool multich)

View file

@ -2777,7 +2777,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
{ {
const char *p; const char *p;
unsigned len, count, leading_zero_bytes; unsigned len, count, leading_zero_bytes;
int ret, rc; int ret;
p = name; p = name;
if (strncasecmp(p, "0x", 2) == 0) if (strncasecmp(p, "0x", 2) == 0)
@ -2789,10 +2789,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
count = min(len / 2, 16U); count = min(len / 2, 16U);
leading_zero_bytes = 16 - count; leading_zero_bytes = 16 - count;
memset(i_port_id, 0, leading_zero_bytes); memset(i_port_id, 0, leading_zero_bytes);
rc = hex2bin(i_port_id + leading_zero_bytes, p, count); ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
if (rc < 0) if (ret < 0)
pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc); pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret);
ret = 0;
out: out:
return ret; return ret;
} }

View file

@ -1071,18 +1071,18 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
int nr_parts; int nr_parts;
struct partition_affinity *parts; struct partition_affinity *parts;
parts_node = of_find_node_by_name(gic_node, "ppi-partitions"); parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
if (!parts_node) if (!parts_node)
return; return;
nr_parts = of_get_child_count(parts_node); nr_parts = of_get_child_count(parts_node);
if (!nr_parts) if (!nr_parts)
return; goto out_put_node;
parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL); parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
if (WARN_ON(!parts)) if (WARN_ON(!parts))
return; goto out_put_node;
for_each_child_of_node(parts_node, child_part) { for_each_child_of_node(parts_node, child_part) {
struct partition_affinity *part; struct partition_affinity *part;
@ -1149,6 +1149,9 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
gic_data.ppi_descs[i] = desc; gic_data.ppi_descs[i] = desc;
} }
out_put_node:
of_node_put(parts_node);
} }
static void __init gic_of_setup_kvm_info(struct device_node *node) static void __init gic_of_setup_kvm_info(struct device_node *node)

View file

@ -1365,8 +1365,8 @@ static void flexrm_shutdown(struct mbox_chan *chan)
/* Disable/inactivate ring */ /* Disable/inactivate ring */
writel_relaxed(0x0, ring->regs + RING_CONTROL); writel_relaxed(0x0, ring->regs + RING_CONTROL);
/* Flush ring with timeout of 1s */ /* Set ring flush state */
timeout = 1000; timeout = 1000; /* timeout of 1s */
writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
ring->regs + RING_CONTROL); ring->regs + RING_CONTROL);
do { do {
@ -1374,7 +1374,23 @@ static void flexrm_shutdown(struct mbox_chan *chan)
FLUSH_DONE_MASK) FLUSH_DONE_MASK)
break; break;
mdelay(1); mdelay(1);
} while (timeout--); } while (--timeout);
if (!timeout)
dev_err(ring->mbox->dev,
"setting ring%d flush state timedout\n", ring->num);
/* Clear ring flush state */
timeout = 1000; /* timeout of 1s */
writel_relaxed(0x0, ring + RING_CONTROL);
do {
if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
FLUSH_DONE_MASK))
break;
mdelay(1);
} while (--timeout);
if (!timeout)
dev_err(ring->mbox->dev,
"clearing ring%d flush state timedout\n", ring->num);
/* Abort all in-flight requests */ /* Abort all in-flight requests */
for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) { for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {

View file

@ -407,7 +407,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
finish_wait(&ca->set->bucket_wait, &w); finish_wait(&ca->set->bucket_wait, &w);
out: out:
wake_up_process(ca->alloc_thread); if (ca->alloc_thread)
wake_up_process(ca->alloc_thread);
trace_bcache_alloc(ca, reserve); trace_bcache_alloc(ca, reserve);

View file

@ -625,7 +625,7 @@ re_read:
err = read_sb_page(bitmap->mddev, err = read_sb_page(bitmap->mddev,
offset, offset,
sb_page, sb_page,
0, PAGE_SIZE); 0, sizeof(bitmap_super_t));
} }
if (err) if (err)
return err; return err;
@ -2123,7 +2123,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
if (store.sb_page && bitmap->storage.sb_page) if (store.sb_page && bitmap->storage.sb_page)
memcpy(page_address(store.sb_page), memcpy(page_address(store.sb_page),
page_address(bitmap->storage.sb_page), page_address(bitmap->storage.sb_page),
PAGE_SIZE); sizeof(bitmap_super_t));
bitmap_file_unmap(&bitmap->storage); bitmap_file_unmap(&bitmap->storage);
bitmap->storage = store; bitmap->storage = store;

View file

@ -974,7 +974,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
buffers = c->minimum_buffers; buffers = c->minimum_buffers;
*limit_buffers = buffers; *limit_buffers = buffers;
*threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100; *threshold_buffers = mult_frac(buffers,
DM_BUFIO_WRITEBACK_PERCENT, 100);
} }
/* /*
@ -1910,19 +1911,15 @@ static int __init dm_bufio_init(void)
memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches); memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names); memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
mem = (__u64)((totalram_pages - totalhigh_pages) * mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT; DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
if (mem > ULONG_MAX) if (mem > ULONG_MAX)
mem = ULONG_MAX; mem = ULONG_MAX;
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
* Get the size of vmalloc space the same way as VMALLOC_TOTAL mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
* in fs/proc/internal.h
*/
if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
#endif #endif
dm_bufio_default_cache_size = mem; dm_bufio_default_cache_size = mem;

View file

@ -1201,6 +1201,18 @@ static void background_work_end(struct cache *cache)
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
{
return (bio_data_dir(bio) == WRITE) &&
(bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
}
static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
{
return writeback_mode(&cache->features) &&
(is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
}
static void quiesce(struct dm_cache_migration *mg, static void quiesce(struct dm_cache_migration *mg,
void (*continuation)(struct work_struct *)) void (*continuation)(struct work_struct *))
{ {
@ -1474,12 +1486,50 @@ static void mg_upgrade_lock(struct work_struct *ws)
} }
} }
static void mg_full_copy(struct work_struct *ws)
{
struct dm_cache_migration *mg = ws_to_mg(ws);
struct cache *cache = mg->cache;
struct policy_work *op = mg->op;
bool is_policy_promote = (op->op == POLICY_PROMOTE);
if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
is_discarded_oblock(cache, op->oblock)) {
mg_upgrade_lock(ws);
return;
}
init_continuation(&mg->k, mg_upgrade_lock);
if (copy(mg, is_policy_promote)) {
DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
mg->k.input = BLK_STS_IOERR;
mg_complete(mg, false);
}
}
static void mg_copy(struct work_struct *ws) static void mg_copy(struct work_struct *ws)
{ {
int r;
struct dm_cache_migration *mg = ws_to_mg(ws); struct dm_cache_migration *mg = ws_to_mg(ws);
if (mg->overwrite_bio) { if (mg->overwrite_bio) {
/*
* No exclusive lock was held when we last checked if the bio
* was optimisable. So we have to check again in case things
* have changed (eg, the block may no longer be discarded).
*/
if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
/*
* Fallback to a real full copy after doing some tidying up.
*/
bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
mg->overwrite_bio = NULL;
inc_io_migrations(mg->cache);
mg_full_copy(ws);
return;
}
/* /*
* It's safe to do this here, even though it's new data * It's safe to do this here, even though it's new data
* because all IO has been locked out of the block. * because all IO has been locked out of the block.
@ -1489,26 +1539,8 @@ static void mg_copy(struct work_struct *ws)
*/ */
overwrite(mg, mg_update_metadata_after_copy); overwrite(mg, mg_update_metadata_after_copy);
} else { } else
struct cache *cache = mg->cache; mg_full_copy(ws);
struct policy_work *op = mg->op;
bool is_policy_promote = (op->op == POLICY_PROMOTE);
if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
is_discarded_oblock(cache, op->oblock)) {
mg_upgrade_lock(ws);
return;
}
init_continuation(&mg->k, mg_upgrade_lock);
r = copy(mg, is_policy_promote);
if (r) {
DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
mg->k.input = BLK_STS_IOERR;
mg_complete(mg, false);
}
}
} }
static int mg_lock_writes(struct dm_cache_migration *mg) static int mg_lock_writes(struct dm_cache_migration *mg)
@ -1748,18 +1780,6 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
{
return (bio_data_dir(bio) == WRITE) &&
(bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
}
static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
{
return writeback_mode(&cache->features) &&
(is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
}
static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block, static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
bool *commit_needed) bool *commit_needed)
{ {

View file

@ -29,7 +29,6 @@ struct dm_kobject_holder {
* DM targets must _not_ deference a mapped_device to directly access its members! * DM targets must _not_ deference a mapped_device to directly access its members!
*/ */
struct mapped_device { struct mapped_device {
struct srcu_struct io_barrier;
struct mutex suspend_lock; struct mutex suspend_lock;
/* /*
@ -127,6 +126,8 @@ struct mapped_device {
struct blk_mq_tag_set *tag_set; struct blk_mq_tag_set *tag_set;
bool use_blk_mq:1; bool use_blk_mq:1;
bool init_tio_pdu:1; bool init_tio_pdu:1;
struct srcu_struct io_barrier;
}; };
void dm_init_md_queue(struct mapped_device *md); void dm_init_md_queue(struct mapped_device *md);

View file

@ -1075,7 +1075,7 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size); BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
/* Reject unexpected unaligned bio. */ /* Reject unexpected unaligned bio. */
if (unlikely(bv_in.bv_offset & (cc->sector_size - 1))) if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
return -EIO; return -EIO;
dmreq = dmreq_of_req(cc, req); dmreq = dmreq_of_req(cc, req);
@ -1168,7 +1168,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
int r = 0; int r = 0;
/* Reject unexpected unaligned bio. */ /* Reject unexpected unaligned bio. */
if (unlikely(bv_in.bv_offset & (cc->sector_size - 1))) if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
return -EIO; return -EIO;
dmreq = dmreq_of_req(cc, req); dmreq = dmreq_of_req(cc, req);

View file

@ -1376,7 +1376,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
struct bvec_iter iter; struct bvec_iter iter;
struct bio_vec bv; struct bio_vec bv;
bio_for_each_segment(bv, bio, iter) { bio_for_each_segment(bv, bio, iter) {
if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary", DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
bv.bv_offset, bv.bv_len, ic->sectors_per_block); bv.bv_offset, bv.bv_len, ic->sectors_per_block);
return DM_MAPIO_KILL; return DM_MAPIO_KILL;

View file

@ -499,8 +499,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
if (IS_ERR(clone)) { if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */ /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
bool queue_dying = blk_queue_dying(q); bool queue_dying = blk_queue_dying(q);
DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing",
PTR_ERR(clone), queue_dying ? " (path offline)" : "");
if (queue_dying) { if (queue_dying) {
atomic_inc(&m->pg_init_in_progress); atomic_inc(&m->pg_init_in_progress);
activate_or_offline_path(pgpath); activate_or_offline_path(pgpath);

View file

@ -1758,13 +1758,12 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
return true; return true;
} }
static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data)
sector_t start, sector_t len, void *data)
{ {
struct request_queue *q = bdev_get_queue(dev->bdev); struct request_queue *q = bdev_get_queue(dev->bdev);
return q && blk_queue_discard(q); return q && !blk_queue_discard(q);
} }
static bool dm_table_supports_discards(struct dm_table *t) static bool dm_table_supports_discards(struct dm_table *t)
@ -1772,28 +1771,24 @@ static bool dm_table_supports_discards(struct dm_table *t)
struct dm_target *ti; struct dm_target *ti;
unsigned i; unsigned i;
/*
* Unless any target used by the table set discards_supported,
* require at least one underlying device to support discards.
* t->devices includes internal dm devices such as mirror logs
* so we need to use iterate_devices here, which targets
* supporting discard selectively must provide.
*/
for (i = 0; i < dm_table_get_num_targets(t); i++) { for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i); ti = dm_table_get_target(t, i);
if (!ti->num_discard_bios) if (!ti->num_discard_bios)
continue; return false;
if (ti->discards_supported) /*
return true; * Either the target provides discard support (as implied by setting
* 'discards_supported') or it relies on _all_ data devices having
if (ti->type->iterate_devices && * discard support.
ti->type->iterate_devices(ti, device_discard_capable, NULL)) */
return true; if (!ti->discards_supported &&
(!ti->type->iterate_devices ||
ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
return false;
} }
return false; return true;
} }
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,

View file

@ -660,6 +660,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
struct dmz_target *dmz = ti->private; struct dmz_target *dmz = ti->private;
struct request_queue *q; struct request_queue *q;
struct dmz_dev *dev; struct dmz_dev *dev;
sector_t aligned_capacity;
int ret; int ret;
/* Get the target device */ /* Get the target device */
@ -685,15 +686,17 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
goto err; goto err;
} }
q = bdev_get_queue(dev->bdev);
dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
if (ti->begin || (ti->len != dev->capacity)) { aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1);
if (ti->begin ||
((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
ti->error = "Partial mapping not supported"; ti->error = "Partial mapping not supported";
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
} }
q = bdev_get_queue(dev->bdev); dev->zone_nr_sectors = blk_queue_zone_sectors(q);
dev->zone_nr_sectors = q->limits.chunk_sectors;
dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors); dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors); dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
@ -929,8 +932,10 @@ static int dmz_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data) iterate_devices_callout_fn fn, void *data)
{ {
struct dmz_target *dmz = ti->private; struct dmz_target *dmz = ti->private;
struct dmz_dev *dev = dmz->dev;
sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
return fn(ti, dmz->ddev, 0, dmz->dev->capacity, data); return fn(ti, dmz->ddev, 0, capacity, data);
} }
static struct target_type dmz_type = { static struct target_type dmz_type = {

View file

@ -1695,7 +1695,7 @@ static struct mapped_device *alloc_dev(int minor)
struct mapped_device *md; struct mapped_device *md;
void *old_md; void *old_md;
md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
if (!md) { if (!md) {
DMWARN("unable to allocate device, out of memory."); DMWARN("unable to allocate device, out of memory.");
return NULL; return NULL;
@ -1795,7 +1795,7 @@ bad_io_barrier:
bad_minor: bad_minor:
module_put(THIS_MODULE); module_put(THIS_MODULE);
bad_module_get: bad_module_get:
kfree(md); kvfree(md);
return NULL; return NULL;
} }
@ -1814,7 +1814,7 @@ static void free_dev(struct mapped_device *md)
free_minor(minor); free_minor(minor);
module_put(THIS_MODULE); module_put(THIS_MODULE);
kfree(md); kvfree(md);
} }
static void __bind_mempools(struct mapped_device *md, struct dm_table *t) static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
@ -2709,11 +2709,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
md = container_of(kobj, struct mapped_device, kobj_holder.kobj); md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
if (test_bit(DMF_FREEING, &md->flags) || spin_lock(&_minor_lock);
dm_deleting_md(md)) if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
return NULL; md = NULL;
goto out;
}
dm_get(md); dm_get(md);
out:
spin_unlock(&_minor_lock);
return md; return md;
} }

View file

@ -8039,7 +8039,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
if (did_change) if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait, wait_event(mddev->sb_wait,
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && !mddev->suspended); !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
mddev->suspended);
if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
percpu_ref_put(&mddev->writes_pending); percpu_ref_put(&mddev->writes_pending);
return false; return false;
@ -8110,7 +8111,6 @@ void md_allow_write(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_state);
/* wait for the dirty state to be recorded in the metadata */ /* wait for the dirty state to be recorded in the metadata */
wait_event(mddev->sb_wait, wait_event(mddev->sb_wait,
!test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) &&
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
} else } else
spin_unlock(&mddev->lock); spin_unlock(&mddev->lock);

View file

@ -990,14 +990,6 @@ static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
_wait_barrier(conf, idx); _wait_barrier(conf, idx);
} }
static void wait_all_barriers(struct r1conf *conf)
{
int idx;
for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
_wait_barrier(conf, idx);
}
static void _allow_barrier(struct r1conf *conf, int idx) static void _allow_barrier(struct r1conf *conf, int idx)
{ {
atomic_dec(&conf->nr_pending[idx]); atomic_dec(&conf->nr_pending[idx]);
@ -1011,14 +1003,6 @@ static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
_allow_barrier(conf, idx); _allow_barrier(conf, idx);
} }
static void allow_all_barriers(struct r1conf *conf)
{
int idx;
for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
_allow_barrier(conf, idx);
}
/* conf->resync_lock should be held */ /* conf->resync_lock should be held */
static int get_unqueued_pending(struct r1conf *conf) static int get_unqueued_pending(struct r1conf *conf)
{ {
@ -1654,8 +1638,12 @@ static void print_conf(struct r1conf *conf)
static void close_sync(struct r1conf *conf) static void close_sync(struct r1conf *conf)
{ {
wait_all_barriers(conf); int idx;
allow_all_barriers(conf);
for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
_wait_barrier(conf, idx);
_allow_barrier(conf, idx);
}
mempool_destroy(conf->r1buf_pool); mempool_destroy(conf->r1buf_pool);
conf->r1buf_pool = NULL; conf->r1buf_pool = NULL;

View file

@ -194,7 +194,6 @@ struct venus_buffer {
* @fh: a holder of v4l file handle structure * @fh: a holder of v4l file handle structure
* @streamon_cap: stream on flag for capture queue * @streamon_cap: stream on flag for capture queue
* @streamon_out: stream on flag for output queue * @streamon_out: stream on flag for output queue
* @cmd_stop: a flag to signal encoder/decoder commands
* @width: current capture width * @width: current capture width
* @height: current capture height * @height: current capture height
* @out_width: current output width * @out_width: current output width
@ -258,7 +257,6 @@ struct venus_inst {
} controls; } controls;
struct v4l2_fh fh; struct v4l2_fh fh;
unsigned int streamon_cap, streamon_out; unsigned int streamon_cap, streamon_out;
bool cmd_stop;
u32 width; u32 width;
u32 height; u32 height;
u32 out_width; u32 out_width;

View file

@ -623,13 +623,6 @@ void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
mutex_lock(&inst->lock); mutex_lock(&inst->lock);
if (inst->cmd_stop) {
vbuf->flags |= V4L2_BUF_FLAG_LAST;
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
inst->cmd_stop = false;
goto unlock;
}
v4l2_m2m_buf_queue(m2m_ctx, vbuf); v4l2_m2m_buf_queue(m2m_ctx, vbuf);
if (!(inst->streamon_out & inst->streamon_cap)) if (!(inst->streamon_out & inst->streamon_cap))

View file

@ -484,6 +484,7 @@ int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd)
return -EINVAL; return -EINVAL;
} }
EXPORT_SYMBOL_GPL(hfi_session_process_buf);
irqreturn_t hfi_isr_thread(int irq, void *dev_id) irqreturn_t hfi_isr_thread(int irq, void *dev_id)
{ {

View file

@ -344,7 +344,7 @@ static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
desc->attrs = DMA_ATTR_WRITE_COMBINE; desc->attrs = DMA_ATTR_WRITE_COMBINE;
desc->size = ALIGN(size, SZ_4K); desc->size = ALIGN(size, SZ_4K);
desc->kva = dma_alloc_attrs(dev, size, &desc->da, GFP_KERNEL, desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
desc->attrs); desc->attrs);
if (!desc->kva) if (!desc->kva)
return -ENOMEM; return -ENOMEM;
@ -710,10 +710,8 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev)
if (ret) if (ret)
return ret; return ret;
hdev->ifaceq_table.kva = desc.kva; hdev->ifaceq_table = desc;
hdev->ifaceq_table.da = desc.da; offset = IFACEQ_TABLE_SIZE;
hdev->ifaceq_table.size = IFACEQ_TABLE_SIZE;
offset = hdev->ifaceq_table.size;
for (i = 0; i < IFACEQ_NUM; i++) { for (i = 0; i < IFACEQ_NUM; i++) {
queue = &hdev->queues[i]; queue = &hdev->queues[i];
@ -755,9 +753,7 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev)
if (ret) { if (ret) {
hdev->sfr.da = 0; hdev->sfr.da = 0;
} else { } else {
hdev->sfr.da = desc.da; hdev->sfr = desc;
hdev->sfr.kva = desc.kva;
hdev->sfr.size = ALIGNED_SFR_SIZE;
sfr = hdev->sfr.kva; sfr = hdev->sfr.kva;
sfr->buf_size = ALIGNED_SFR_SIZE; sfr->buf_size = ALIGNED_SFR_SIZE;
} }

View file

@ -469,8 +469,14 @@ static int vdec_subscribe_event(struct v4l2_fh *fh,
static int static int
vdec_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd) vdec_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
{ {
if (cmd->cmd != V4L2_DEC_CMD_STOP) switch (cmd->cmd) {
case V4L2_DEC_CMD_STOP:
if (cmd->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
return -EINVAL;
break;
default:
return -EINVAL; return -EINVAL;
}
return 0; return 0;
} }
@ -479,6 +485,7 @@ static int
vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd) vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
{ {
struct venus_inst *inst = to_inst(file); struct venus_inst *inst = to_inst(file);
struct hfi_frame_data fdata = {0};
int ret; int ret;
ret = vdec_try_decoder_cmd(file, fh, cmd); ret = vdec_try_decoder_cmd(file, fh, cmd);
@ -486,12 +493,23 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
return ret; return ret;
mutex_lock(&inst->lock); mutex_lock(&inst->lock);
inst->cmd_stop = true;
/*
* Implement V4L2_DEC_CMD_STOP by enqueue an empty buffer on decoder
* input to signal EOS.
*/
if (!(inst->streamon_out & inst->streamon_cap))
goto unlock;
fdata.buffer_type = HFI_BUFFER_INPUT;
fdata.flags |= HFI_BUFFERFLAG_EOS;
fdata.device_addr = 0xdeadbeef;
ret = hfi_session_process_buf(inst, &fdata);
unlock:
mutex_unlock(&inst->lock); mutex_unlock(&inst->lock);
return ret;
hfi_session_flush(inst);
return 0;
} }
static const struct v4l2_ioctl_ops vdec_ioctl_ops = { static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
@ -718,7 +736,6 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
inst->reconfig = false; inst->reconfig = false;
inst->sequence_cap = 0; inst->sequence_cap = 0;
inst->sequence_out = 0; inst->sequence_out = 0;
inst->cmd_stop = false;
ret = vdec_init_session(inst); ret = vdec_init_session(inst);
if (ret) if (ret)
@ -807,11 +824,6 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
vb->timestamp = timestamp_us * NSEC_PER_USEC; vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++; vbuf->sequence = inst->sequence_cap++;
if (inst->cmd_stop) {
vbuf->flags |= V4L2_BUF_FLAG_LAST;
inst->cmd_stop = false;
}
if (vbuf->flags & V4L2_BUF_FLAG_LAST) { if (vbuf->flags & V4L2_BUF_FLAG_LAST) {
const struct v4l2_event ev = { .type = V4L2_EVENT_EOS }; const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };

View file

@ -963,13 +963,12 @@ static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type,
if (!vbuf) if (!vbuf)
return; return;
vb = &vbuf->vb2_buf;
vb->planes[0].bytesused = bytesused;
vb->planes[0].data_offset = data_offset;
vbuf->flags = flags; vbuf->flags = flags;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
vb = &vbuf->vb2_buf;
vb2_set_plane_payload(vb, 0, bytesused + data_offset);
vb->planes[0].data_offset = data_offset;
vb->timestamp = timestamp_us * NSEC_PER_USEC; vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++; vbuf->sequence = inst->sequence_cap++;
} else { } else {

View file

@ -298,11 +298,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
if (!dev->max_timeout) if (!dev->max_timeout)
return -ENOTTY; return -ENOTTY;
/* Check for multiply overflow */
if (val > U32_MAX / 1000)
return -EINVAL;
tmp = val * 1000; tmp = val * 1000;
if (tmp < dev->min_timeout || if (tmp < dev->min_timeout || tmp > dev->max_timeout)
tmp > dev->max_timeout) return -EINVAL;
return -EINVAL;
if (dev->s_timeout) if (dev->s_timeout)
ret = dev->s_timeout(dev, tmp); ret = dev->s_timeout(dev, tmp);

View file

@ -87,8 +87,6 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
data->state = STATE_BIT_PULSE; data->state = STATE_BIT_PULSE;
return 0; return 0;
} else if (eq_margin(ev.duration, NEC_REPEAT_SPACE, NEC_UNIT / 2)) { } else if (eq_margin(ev.duration, NEC_REPEAT_SPACE, NEC_UNIT / 2)) {
rc_repeat(dev);
IR_dprintk(1, "Repeat last key\n");
data->state = STATE_TRAILER_PULSE; data->state = STATE_TRAILER_PULSE;
return 0; return 0;
} }
@ -151,19 +149,26 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
if (!geq_margin(ev.duration, NEC_TRAILER_SPACE, NEC_UNIT / 2)) if (!geq_margin(ev.duration, NEC_TRAILER_SPACE, NEC_UNIT / 2))
break; break;
address = bitrev8((data->bits >> 24) & 0xff); if (data->count == NEC_NBITS) {
not_address = bitrev8((data->bits >> 16) & 0xff); address = bitrev8((data->bits >> 24) & 0xff);
command = bitrev8((data->bits >> 8) & 0xff); not_address = bitrev8((data->bits >> 16) & 0xff);
not_command = bitrev8((data->bits >> 0) & 0xff); command = bitrev8((data->bits >> 8) & 0xff);
not_command = bitrev8((data->bits >> 0) & 0xff);
scancode = ir_nec_bytes_to_scancode(address, not_address, scancode = ir_nec_bytes_to_scancode(address,
command, not_command, not_address,
&rc_proto); command,
not_command,
&rc_proto);
if (data->is_nec_x) if (data->is_nec_x)
data->necx_repeat = true; data->necx_repeat = true;
rc_keydown(dev, rc_proto, scancode, 0);
} else {
rc_repeat(dev);
}
rc_keydown(dev, rc_proto, scancode, 0);
data->state = STATE_INACTIVE; data->state = STATE_INACTIVE;
return 0; return 0;
} }

View file

@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
unsigned char *cmd, unsigned char *cmd,
const struct firmware *firmware) { const struct firmware *firmware) {
struct as10x_fw_pkt_t fw_pkt; struct as10x_fw_pkt_t *fw_pkt;
int total_read_bytes = 0, errno = 0; int total_read_bytes = 0, errno = 0;
unsigned char addr_has_changed = 0; unsigned char addr_has_changed = 0;
fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL);
if (!fw_pkt)
return -ENOMEM;
for (total_read_bytes = 0; total_read_bytes < firmware->size; ) { for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
int read_bytes = 0, data_len = 0; int read_bytes = 0, data_len = 0;
/* parse intel hex line */ /* parse intel hex line */
read_bytes = parse_hex_line( read_bytes = parse_hex_line(
(u8 *) (firmware->data + total_read_bytes), (u8 *) (firmware->data + total_read_bytes),
fw_pkt.raw.address, fw_pkt->raw.address,
fw_pkt.raw.data, fw_pkt->raw.data,
&data_len, &data_len,
&addr_has_changed); &addr_has_changed);
@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
/* detect the end of file */ /* detect the end of file */
total_read_bytes += read_bytes; total_read_bytes += read_bytes;
if (total_read_bytes == firmware->size) { if (total_read_bytes == firmware->size) {
fw_pkt.u.request[0] = 0x00; fw_pkt->u.request[0] = 0x00;
fw_pkt.u.request[1] = 0x03; fw_pkt->u.request[1] = 0x03;
/* send EOF command */ /* send EOF command */
errno = bus_adap->ops->upload_fw_pkt(bus_adap, errno = bus_adap->ops->upload_fw_pkt(bus_adap,
(uint8_t *) (uint8_t *)
&fw_pkt, 2, 0); fw_pkt, 2, 0);
if (errno < 0) if (errno < 0)
goto error; goto error;
} else { } else {
if (!addr_has_changed) { if (!addr_has_changed) {
/* prepare command to send */ /* prepare command to send */
fw_pkt.u.request[0] = 0x00; fw_pkt->u.request[0] = 0x00;
fw_pkt.u.request[1] = 0x01; fw_pkt->u.request[1] = 0x01;
data_len += sizeof(fw_pkt.u.request); data_len += sizeof(fw_pkt->u.request);
data_len += sizeof(fw_pkt.raw.address); data_len += sizeof(fw_pkt->raw.address);
/* send cmd to device */ /* send cmd to device */
errno = bus_adap->ops->upload_fw_pkt(bus_adap, errno = bus_adap->ops->upload_fw_pkt(bus_adap,
(uint8_t *) (uint8_t *)
&fw_pkt, fw_pkt,
data_len, data_len,
0); 0);
if (errno < 0) if (errno < 0)
@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
} }
} }
error: error:
kfree(fw_pkt);
return (errno == 0) ? total_read_bytes : errno; return (errno == 0) ? total_read_bytes : errno;
} }

View file

@ -1684,7 +1684,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
nr = dev->devno; nr = dev->devno;
assoc_desc = udev->actconfig->intf_assoc[0]; assoc_desc = udev->actconfig->intf_assoc[0];
if (assoc_desc->bFirstInterface != ifnum) { if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) {
dev_err(d, "Not found matching IAD interface\n"); dev_err(d, "Not found matching IAD interface\n");
retval = -ENODEV; retval = -ENODEV;
goto err_if; goto err_if;

View file

@ -1227,6 +1227,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
} }
EXPORT_SYMBOL(v4l2_ctrl_fill); EXPORT_SYMBOL(v4l2_ctrl_fill);
static u32 user_flags(const struct v4l2_ctrl *ctrl)
{
u32 flags = ctrl->flags;
if (ctrl->is_ptr)
flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
return flags;
}
static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes) static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
{ {
memset(ev->reserved, 0, sizeof(ev->reserved)); memset(ev->reserved, 0, sizeof(ev->reserved));
@ -1234,7 +1244,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change
ev->id = ctrl->id; ev->id = ctrl->id;
ev->u.ctrl.changes = changes; ev->u.ctrl.changes = changes;
ev->u.ctrl.type = ctrl->type; ev->u.ctrl.type = ctrl->type;
ev->u.ctrl.flags = ctrl->flags; ev->u.ctrl.flags = user_flags(ctrl);
if (ctrl->is_ptr) if (ctrl->is_ptr)
ev->u.ctrl.value64 = 0; ev->u.ctrl.value64 = 0;
else else
@ -2577,10 +2587,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
else else
qc->id = ctrl->id; qc->id = ctrl->id;
strlcpy(qc->name, ctrl->name, sizeof(qc->name)); strlcpy(qc->name, ctrl->name, sizeof(qc->name));
qc->flags = ctrl->flags; qc->flags = user_flags(ctrl);
qc->type = ctrl->type; qc->type = ctrl->type;
if (ctrl->is_ptr)
qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
qc->elem_size = ctrl->elem_size; qc->elem_size = ctrl->elem_size;
qc->elems = ctrl->elems; qc->elems = ctrl->elems;
qc->nr_of_dims = ctrl->nr_of_dims; qc->nr_of_dims = ctrl->nr_of_dims;

View file

@ -522,6 +522,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
.name = "Avoton SoC", .name = "Avoton SoC",
.iTCO_version = 3, .iTCO_version = 3,
.gpio_version = AVOTON_GPIO, .gpio_version = AVOTON_GPIO,
.spi_type = INTEL_SPI_BYT,
}, },
[LPC_BAYTRAIL] = { [LPC_BAYTRAIL] = {
.name = "Bay Trail SoC", .name = "Bay Trail SoC",

View file

@ -1814,8 +1814,13 @@ static void __init doc_dbg_register(struct mtd_info *floor)
struct dentry *root = floor->dbg.dfs_dir; struct dentry *root = floor->dbg.dfs_dir;
struct docg3 *docg3 = floor->priv; struct docg3 *docg3 = floor->priv;
if (IS_ERR_OR_NULL(root)) if (IS_ERR_OR_NULL(root)) {
if (IS_ENABLED(CONFIG_DEBUG_FS) &&
!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
dev_warn(floor->dev.parent,
"CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
return; return;
}
debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3, debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3,
&flashcontrol_fops); &flashcontrol_fops);

View file

@ -2547,6 +2547,7 @@ static struct platform_driver atmel_nand_controller_driver = {
.driver = { .driver = {
.name = "atmel-nand-controller", .name = "atmel-nand-controller",
.of_match_table = of_match_ptr(atmel_nand_controller_of_ids), .of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
.pm = &atmel_nand_controller_pm_ops,
}, },
.probe = atmel_nand_controller_probe, .probe = atmel_nand_controller_probe,
.remove = atmel_nand_controller_remove, .remove = atmel_nand_controller_remove,

View file

@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
op = ECC_DECODE; op = ECC_DECODE;
dec = readw(ecc->regs + ECC_DECDONE); dec = readw(ecc->regs + ECC_DECDONE);
if (dec & ecc->sectors) { if (dec & ecc->sectors) {
/*
* Clear decode IRQ status once again to ensure that
* there will be no extra IRQ.
*/
readw(ecc->regs + ECC_DECIRQ_STA);
ecc->sectors = 0; ecc->sectors = 0;
complete(&ecc->done); complete(&ecc->done);
} else { } else {
@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
} }
} }
writel(0, ecc->regs + ECC_IRQ_REG(op));
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
/* disable it */ /* disable it */
mtk_ecc_wait_idle(ecc, op); mtk_ecc_wait_idle(ecc, op);
if (op == ECC_DECODE)
/*
* Clear decode IRQ status in case there is a timeout to wait
* decode IRQ.
*/
readw(ecc->regs + ECC_DECIRQ_STA);
writew(0, ecc->regs + ECC_IRQ_REG(op)); writew(0, ecc->regs + ECC_IRQ_REG(op));
writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));

View file

@ -1246,6 +1246,7 @@ int nand_reset(struct nand_chip *chip, int chipnr)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(nand_reset);
/** /**
* nand_check_erased_buf - check if a buffer contains (almost) only 0xff data * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
@ -2799,15 +2800,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf) size_t *retlen, const uint8_t *buf)
{ {
struct nand_chip *chip = mtd_to_nand(mtd); struct nand_chip *chip = mtd_to_nand(mtd);
int chipnr = (int)(to >> chip->chip_shift);
struct mtd_oob_ops ops; struct mtd_oob_ops ops;
int ret; int ret;
/* Wait for the device to get ready */
panic_nand_wait(mtd, chip, 400);
/* Grab the device */ /* Grab the device */
panic_nand_get_device(chip, mtd, FL_WRITING); panic_nand_get_device(chip, mtd, FL_WRITING);
chip->select_chip(mtd, chipnr);
/* Wait for the device to get ready */
panic_nand_wait(mtd, chip, 400);
memset(&ops, 0, sizeof(ops)); memset(&ops, 0, sizeof(ops));
ops.len = len; ops.len = len;
ops.datbuf = (uint8_t *)buf; ops.datbuf = (uint8_t *)buf;

View file

@ -520,11 +520,16 @@ static int nandsim_debugfs_create(struct nandsim *dev)
struct dentry *root = nsmtd->dbg.dfs_dir; struct dentry *root = nsmtd->dbg.dfs_dir;
struct dentry *dent; struct dentry *dent;
if (!IS_ENABLED(CONFIG_DEBUG_FS)) /*
* Just skip debugfs initialization when the debugfs directory is
* missing.
*/
if (IS_ERR_OR_NULL(root)) {
if (IS_ENABLED(CONFIG_DEBUG_FS) &&
!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
return 0; return 0;
}
if (IS_ERR_OR_NULL(root))
return -1;
dent = debugfs_create_file("nandsim_wear_report", S_IRUSR, dent = debugfs_create_file("nandsim_wear_report", S_IRUSR,
root, dev, &dfs_fops); root, dev, &dfs_fops);

View file

@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
0x97, 0x79, 0xe5, 0x24, 0xb5}; 0x97, 0x79, 0xe5, 0x24, 0xb5};
/** /**
* omap_calculate_ecc_bch - Generate bytes of ECC bytes * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
* @mtd: MTD device structure * @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed * @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer * @ecc_code: The ecc_code buffer
* @i: The sector number (for a multi sector page)
* *
* Support calculating of BCH4/8 ecc vectors for the page * Support calculating of BCH4/8/16 ECC vectors for one sector
* within a page. Sector number is in @i.
*/ */
static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd, static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_calc) const u_char *dat, u_char *ecc_calc, int i)
{ {
struct omap_nand_info *info = mtd_to_omap(mtd); struct omap_nand_info *info = mtd_to_omap(mtd);
int eccbytes = info->nand.ecc.bytes; int eccbytes = info->nand.ecc.bytes;
struct gpmc_nand_regs *gpmc_regs = &info->reg; struct gpmc_nand_regs *gpmc_regs = &info->reg;
u8 *ecc_code; u8 *ecc_code;
unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4; unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
u32 val; u32 val;
int i, j; int j;
ecc_code = ecc_calc;
switch (info->ecc_opt) {
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
case OMAP_ECC_BCH8_CODE_HW:
bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
*ecc_code++ = (bch_val4 & 0xFF);
*ecc_code++ = ((bch_val3 >> 24) & 0xFF);
*ecc_code++ = ((bch_val3 >> 16) & 0xFF);
*ecc_code++ = ((bch_val3 >> 8) & 0xFF);
*ecc_code++ = (bch_val3 & 0xFF);
*ecc_code++ = ((bch_val2 >> 24) & 0xFF);
*ecc_code++ = ((bch_val2 >> 16) & 0xFF);
*ecc_code++ = ((bch_val2 >> 8) & 0xFF);
*ecc_code++ = (bch_val2 & 0xFF);
*ecc_code++ = ((bch_val1 >> 24) & 0xFF);
*ecc_code++ = ((bch_val1 >> 16) & 0xFF);
*ecc_code++ = ((bch_val1 >> 8) & 0xFF);
*ecc_code++ = (bch_val1 & 0xFF);
break;
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
case OMAP_ECC_BCH4_CODE_HW:
bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
*ecc_code++ = ((bch_val2 >> 12) & 0xFF);
*ecc_code++ = ((bch_val2 >> 4) & 0xFF);
*ecc_code++ = ((bch_val2 & 0xF) << 4) |
((bch_val1 >> 28) & 0xF);
*ecc_code++ = ((bch_val1 >> 20) & 0xFF);
*ecc_code++ = ((bch_val1 >> 12) & 0xFF);
*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
*ecc_code++ = ((bch_val1 & 0xF) << 4);
break;
case OMAP_ECC_BCH16_CODE_HW:
val = readl(gpmc_regs->gpmc_bch_result6[i]);
ecc_code[0] = ((val >> 8) & 0xFF);
ecc_code[1] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result5[i]);
ecc_code[2] = ((val >> 24) & 0xFF);
ecc_code[3] = ((val >> 16) & 0xFF);
ecc_code[4] = ((val >> 8) & 0xFF);
ecc_code[5] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result4[i]);
ecc_code[6] = ((val >> 24) & 0xFF);
ecc_code[7] = ((val >> 16) & 0xFF);
ecc_code[8] = ((val >> 8) & 0xFF);
ecc_code[9] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result3[i]);
ecc_code[10] = ((val >> 24) & 0xFF);
ecc_code[11] = ((val >> 16) & 0xFF);
ecc_code[12] = ((val >> 8) & 0xFF);
ecc_code[13] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result2[i]);
ecc_code[14] = ((val >> 24) & 0xFF);
ecc_code[15] = ((val >> 16) & 0xFF);
ecc_code[16] = ((val >> 8) & 0xFF);
ecc_code[17] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result1[i]);
ecc_code[18] = ((val >> 24) & 0xFF);
ecc_code[19] = ((val >> 16) & 0xFF);
ecc_code[20] = ((val >> 8) & 0xFF);
ecc_code[21] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result0[i]);
ecc_code[22] = ((val >> 24) & 0xFF);
ecc_code[23] = ((val >> 16) & 0xFF);
ecc_code[24] = ((val >> 8) & 0xFF);
ecc_code[25] = ((val >> 0) & 0xFF);
break;
default:
return -EINVAL;
}
/* ECC scheme specific syndrome customizations */
switch (info->ecc_opt) {
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back
*/
for (j = 0; j < eccbytes; j++)
ecc_calc[j] ^= bch4_polynomial[j];
break;
case OMAP_ECC_BCH4_CODE_HW:
/* Set 8th ECC byte as 0x0 for ROM compatibility */
ecc_calc[eccbytes - 1] = 0x0;
break;
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back
*/
for (j = 0; j < eccbytes; j++)
ecc_calc[j] ^= bch8_polynomial[j];
break;
case OMAP_ECC_BCH8_CODE_HW:
/* Set 14th ECC byte as 0x0 for ROM compatibility */
ecc_calc[eccbytes - 1] = 0x0;
break;
case OMAP_ECC_BCH16_CODE_HW:
break;
default:
return -EINVAL;
}
return 0;
}
/**
* omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
*
* Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
* when SW based correction is required as ECC is required for one sector
* at a time.
*/
static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_calc)
{
return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
}
/**
* omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
*
* Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
*/
static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_calc)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
int eccbytes = info->nand.ecc.bytes;
unsigned long nsectors;
int i, ret;
nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
for (i = 0; i < nsectors; i++) { for (i = 0; i < nsectors; i++) {
ecc_code = ecc_calc; ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
switch (info->ecc_opt) { if (ret)
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: return ret;
case OMAP_ECC_BCH8_CODE_HW:
bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
*ecc_code++ = (bch_val4 & 0xFF);
*ecc_code++ = ((bch_val3 >> 24) & 0xFF);
*ecc_code++ = ((bch_val3 >> 16) & 0xFF);
*ecc_code++ = ((bch_val3 >> 8) & 0xFF);
*ecc_code++ = (bch_val3 & 0xFF);
*ecc_code++ = ((bch_val2 >> 24) & 0xFF);
*ecc_code++ = ((bch_val2 >> 16) & 0xFF);
*ecc_code++ = ((bch_val2 >> 8) & 0xFF);
*ecc_code++ = (bch_val2 & 0xFF);
*ecc_code++ = ((bch_val1 >> 24) & 0xFF);
*ecc_code++ = ((bch_val1 >> 16) & 0xFF);
*ecc_code++ = ((bch_val1 >> 8) & 0xFF);
*ecc_code++ = (bch_val1 & 0xFF);
break;
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
case OMAP_ECC_BCH4_CODE_HW:
bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
*ecc_code++ = ((bch_val2 >> 12) & 0xFF);
*ecc_code++ = ((bch_val2 >> 4) & 0xFF);
*ecc_code++ = ((bch_val2 & 0xF) << 4) |
((bch_val1 >> 28) & 0xF);
*ecc_code++ = ((bch_val1 >> 20) & 0xFF);
*ecc_code++ = ((bch_val1 >> 12) & 0xFF);
*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
*ecc_code++ = ((bch_val1 & 0xF) << 4);
break;
case OMAP_ECC_BCH16_CODE_HW:
val = readl(gpmc_regs->gpmc_bch_result6[i]);
ecc_code[0] = ((val >> 8) & 0xFF);
ecc_code[1] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result5[i]);
ecc_code[2] = ((val >> 24) & 0xFF);
ecc_code[3] = ((val >> 16) & 0xFF);
ecc_code[4] = ((val >> 8) & 0xFF);
ecc_code[5] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result4[i]);
ecc_code[6] = ((val >> 24) & 0xFF);
ecc_code[7] = ((val >> 16) & 0xFF);
ecc_code[8] = ((val >> 8) & 0xFF);
ecc_code[9] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result3[i]);
ecc_code[10] = ((val >> 24) & 0xFF);
ecc_code[11] = ((val >> 16) & 0xFF);
ecc_code[12] = ((val >> 8) & 0xFF);
ecc_code[13] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result2[i]);
ecc_code[14] = ((val >> 24) & 0xFF);
ecc_code[15] = ((val >> 16) & 0xFF);
ecc_code[16] = ((val >> 8) & 0xFF);
ecc_code[17] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result1[i]);
ecc_code[18] = ((val >> 24) & 0xFF);
ecc_code[19] = ((val >> 16) & 0xFF);
ecc_code[20] = ((val >> 8) & 0xFF);
ecc_code[21] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result0[i]);
ecc_code[22] = ((val >> 24) & 0xFF);
ecc_code[23] = ((val >> 16) & 0xFF);
ecc_code[24] = ((val >> 8) & 0xFF);
ecc_code[25] = ((val >> 0) & 0xFF);
break;
default:
return -EINVAL;
}
/* ECC scheme specific syndrome customizations */ ecc_calc += eccbytes;
switch (info->ecc_opt) {
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back */
for (j = 0; j < eccbytes; j++)
ecc_calc[j] ^= bch4_polynomial[j];
break;
case OMAP_ECC_BCH4_CODE_HW:
/* Set 8th ECC byte as 0x0 for ROM compatibility */
ecc_calc[eccbytes - 1] = 0x0;
break;
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back */
for (j = 0; j < eccbytes; j++)
ecc_calc[j] ^= bch8_polynomial[j];
break;
case OMAP_ECC_BCH8_CODE_HW:
/* Set 14th ECC byte as 0x0 for ROM compatibility */
ecc_calc[eccbytes - 1] = 0x0;
break;
case OMAP_ECC_BCH16_CODE_HW:
break;
default:
return -EINVAL;
}
ecc_calc += eccbytes;
} }
return 0; return 0;
@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->write_buf(mtd, buf, mtd->writesize); chip->write_buf(mtd, buf, mtd->writesize);
/* Update ecc vector from GPMC result registers */ /* Update ecc vector from GPMC result registers */
chip->ecc.calculate(mtd, buf, &ecc_calc[0]); omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total); chip->ecc.total);
@ -1508,6 +1551,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
return 0; return 0;
} }
/**
* omap_write_subpage_bch - BCH hardware ECC based subpage write
* @mtd: mtd info structure
* @chip: nand chip info structure
* @offset: column address of subpage within the page
* @data_len: data length
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
*
* OMAP optimized subpage write method.
*/
static int omap_write_subpage_bch(struct mtd_info *mtd,
struct nand_chip *chip, u32 offset,
u32 data_len, const u8 *buf,
int oob_required, int page)
{
u8 *ecc_calc = chip->buffers->ecccalc;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
u32 start_step = offset / ecc_size;
u32 end_step = (offset + data_len - 1) / ecc_size;
int step, ret = 0;
/*
* Write entire page at one go as it would be optimal
* as ECC is calculated by hardware.
* ECC is calculated for all subpages but we choose
* only what we want.
*/
/* Enable GPMC ECC engine */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
/* Write data */
chip->write_buf(mtd, buf, mtd->writesize);
for (step = 0; step < ecc_steps; step++) {
/* mask ECC of un-touched subpages by padding 0xFF */
if (step < start_step || step > end_step)
memset(ecc_calc, 0xff, ecc_bytes);
else
ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
if (ret)
return ret;
buf += ecc_size;
ecc_calc += ecc_bytes;
}
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
ecc_calc = chip->buffers->ecccalc;
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
/* write OOB buffer to NAND device */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
/** /**
* omap_read_page_bch - BCH ecc based page read function for entire page * omap_read_page_bch - BCH ecc based page read function for entire page
* @mtd: mtd info structure * @mtd: mtd info structure
@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->ecc.total); chip->ecc.total);
/* Calculate ecc bytes */ /* Calculate ecc bytes */
chip->ecc.calculate(mtd, buf, ecc_calc); omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total); chip->ecc.total);
@ -2044,7 +2153,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4; nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data; nand_chip->ecc.correct = nand_bch_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */ /* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1; oobbytes_per_step = nand_chip->ecc.bytes + 1;
@ -2066,9 +2175,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4; nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data; nand_chip->ecc.correct = omap_elm_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch; nand_chip->ecc.write_page = omap_write_page_bch;
nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops); mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes; oobbytes_per_step = nand_chip->ecc.bytes;
@ -2087,7 +2196,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8; nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data; nand_chip->ecc.correct = nand_bch_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */ /* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1; oobbytes_per_step = nand_chip->ecc.bytes + 1;
@ -2109,9 +2218,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8; nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data; nand_chip->ecc.correct = omap_elm_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch; nand_chip->ecc.write_page = omap_write_page_bch;
nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops); mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes; oobbytes_per_step = nand_chip->ecc.bytes;
@ -2131,9 +2240,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 16; nand_chip->ecc.strength = 16;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data; nand_chip->ecc.correct = omap_elm_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch; nand_chip->ecc.write_page = omap_write_page_bch;
nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops); mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes; oobbytes_per_step = nand_chip->ecc.bytes;

View file

@ -422,7 +422,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
if (ret < 0) if (ret < 0)
return ret; return ret;
val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
val |= ret << SSFSTS_CTL_COP_SHIFT; val |= ret << SSFSTS_CTL_COP_SHIFT;
val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE; val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
val |= SSFSTS_CTL_SCGO; val |= SSFSTS_CTL_SCGO;
@ -432,7 +432,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
if (ret) if (ret)
return ret; return ret;
status = readl(ispi->base + SSFSTS_CTL); status = readl(ispi->sregs + SSFSTS_CTL);
if (status & SSFSTS_CTL_FCERR) if (status & SSFSTS_CTL_FCERR)
return -EIO; return -EIO;
else if (status & SSFSTS_CTL_AEL) else if (status & SSFSTS_CTL_AEL)

View file

@ -398,6 +398,7 @@
#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ #define E1000_ICR_LSC 0x00000004 /* Link Status Change */
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
/* If this bit asserted, the driver should claim the interrupt */ /* If this bit asserted, the driver should claim the interrupt */

View file

@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
* Checks to see of the link status of the hardware has changed. If a * Checks to see of the link status of the hardware has changed. If a
* change in link status has been detected, then we read the PHY registers * change in link status has been detected, then we read the PHY registers
* to get the current speed/duplex if link exists. * to get the current speed/duplex if link exists.
*
* Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
* up).
**/ **/
s32 e1000e_check_for_copper_link(struct e1000_hw *hw) s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
{ {
@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
* Change or Rx Sequence Error interrupt. * Change or Rx Sequence Error interrupt.
*/ */
if (!mac->get_link_status) if (!mac->get_link_status)
return 0; return 1;
/* First we want to see if the MII Status Register reports /* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex * link. If so, then we want to get the current speed/duplex
@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
* different link partner. * different link partner.
*/ */
ret_val = e1000e_config_fc_after_link_up(hw); ret_val = e1000e_config_fc_after_link_up(hw);
if (ret_val) if (ret_val) {
e_dbg("Error configuring flow control\n"); e_dbg("Error configuring flow control\n");
return ret_val;
}
return ret_val; return 1;
} }
/** /**

View file

@ -1910,14 +1910,30 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
struct net_device *netdev = data; struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 icr;
bool enable = true;
hw->mac.get_link_status = true; icr = er32(ICR);
if (icr & E1000_ICR_RXO) {
/* guard against interrupt when we're going down */ ew32(ICR, E1000_ICR_RXO);
if (!test_bit(__E1000_DOWN, &adapter->state)) { enable = false;
mod_timer(&adapter->watchdog_timer, jiffies + 1); /* napi poll will re-enable Other, make sure it runs */
ew32(IMS, E1000_IMS_OTHER); if (napi_schedule_prep(&adapter->napi)) {
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
__napi_schedule(&adapter->napi);
}
} }
if (icr & E1000_ICR_LSC) {
ew32(ICR, E1000_ICR_LSC);
hw->mac.get_link_status = true;
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
if (enable && !test_bit(__E1000_DOWN, &adapter->state))
ew32(IMS, E1000_IMS_OTHER);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -2687,7 +2703,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
napi_complete_done(napi, work_done); napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->state)) { if (!test_bit(__E1000_DOWN, &adapter->state)) {
if (adapter->msix_entries) if (adapter->msix_entries)
ew32(IMS, adapter->rx_ring->ims_val); ew32(IMS, adapter->rx_ring->ims_val |
E1000_IMS_OTHER);
else else
e1000_irq_enable(adapter); e1000_irq_enable(adapter);
} }
@ -3004,8 +3021,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
hw->mac.ops.config_collision_dist(hw); hw->mac.ops.config_collision_dist(hw);
/* SPT and CNP Si errata workaround to avoid data corruption */ /* SPT and KBL Si errata workaround to avoid data corruption */
if (hw->mac.type >= e1000_pch_spt) { if (hw->mac.type == e1000_pch_spt) {
u32 reg_val; u32 reg_val;
reg_val = er32(IOSFPC); reg_val = er32(IOSFPC);
@ -3013,7 +3030,9 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(IOSFPC, reg_val); ew32(IOSFPC, reg_val);
reg_val = er32(TARC(0)); reg_val = er32(TARC(0));
reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ; /* SPT and KBL Si errata workaround to avoid Tx hang */
reg_val &= ~BIT(28);
reg_val |= BIT(29);
ew32(TARC(0), reg_val); ew32(TARC(0), reg_val);
} }
} }
@ -4204,7 +4223,7 @@ static void e1000e_trigger_lsc(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
if (adapter->msix_entries) if (adapter->msix_entries)
ew32(ICS, E1000_ICS_OTHER); ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER);
else else
ew32(ICS, E1000_ICS_LSC); ew32(ICS, E1000_ICS_LSC);
} }
@ -5081,7 +5100,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
case e1000_media_type_copper: case e1000_media_type_copper:
if (hw->mac.get_link_status) { if (hw->mac.get_link_status) {
ret_val = hw->mac.ops.check_for_link(hw); ret_val = hw->mac.ops.check_for_link(hw);
link_active = !hw->mac.get_link_status; link_active = ret_val > 0;
} else { } else {
link_active = true; link_active = true;
} }
@ -5099,7 +5118,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
break; break;
} }
if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
(er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
/* See e1000_kmrn_lock_loss_workaround_ich8lan() */ /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
e_info("Gigabit has been disabled, downgrading speed\n"); e_info("Gigabit has been disabled, downgrading speed\n");

View file

@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
s32 ret_val = 0; s32 ret_val = 0;
u16 i, phy_status; u16 i, phy_status;
*success = false;
for (i = 0; i < iterations; i++) { for (i = 0; i < iterations; i++) {
/* Some PHYs require the MII_BMSR register to be read /* Some PHYs require the MII_BMSR register to be read
* twice due to the link bit being sticky. No harm doing * twice due to the link bit being sticky. No harm doing
@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val) if (ret_val)
break; break;
if (phy_status & BMSR_LSTATUS) if (phy_status & BMSR_LSTATUS) {
*success = true;
break; break;
}
if (usec_interval >= 1000) if (usec_interval >= 1000)
msleep(usec_interval / 1000); msleep(usec_interval / 1000);
else else
udelay(usec_interval); udelay(usec_interval);
} }
*success = (i < iterations);
return ret_val; return ret_val;
} }

View file

@ -1229,7 +1229,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
break; break;
/* prevent any other reads prior to eop_desc */ /* prevent any other reads prior to eop_desc */
read_barrier_depends(); smp_rmb();
/* if DD is not set pending work has not been completed */ /* if DD is not set pending work has not been completed */
if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))

View file

@ -3760,7 +3760,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
break; break;
/* prevent any other reads prior to eop_desc */ /* prevent any other reads prior to eop_desc */
read_barrier_depends(); smp_rmb();
/* if the descriptor isn't done, no work yet to do */ /* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->cmd_type_offset_bsz & if (!(eop_desc->cmd_type_offset_bsz &

View file

@ -759,7 +759,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
break; break;
/* prevent any other reads prior to eop_desc */ /* prevent any other reads prior to eop_desc */
read_barrier_depends(); smp_rmb();
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */ /* we have caught up to head, no work left to do */

Some files were not shown because too many files have changed in this diff Show more