updating to mainline 4.14.0-rc7

This commit is contained in:
Jake Day 2017-10-30 10:17:04 -04:00
parent aa26187b8c
commit bcab2fb28d
203 changed files with 1943 additions and 1854 deletions

View file

@ -211,7 +211,9 @@ Description:
device, after it has been suspended at run time, from a resume
request to the moment the device will be ready to process I/O,
in microseconds. If it is equal to 0, however, this means that
the PM QoS resume latency may be arbitrary.
the PM QoS resume latency may be arbitrary and the special value
"n/a" means that user space cannot accept any resume latency at
all for the given device.
Not all drivers support this attribute. If it isn't supported,
it is not present.

View file

@ -1108,14 +1108,6 @@ When kbuild executes, the following steps are followed (roughly):
ld
Link target. Often, LDFLAGS_$@ is used to set specific options to ld.
objcopy
Copy binary. Uses OBJCOPYFLAGS usually specified in
arch/$(ARCH)/Makefile.
OBJCOPYFLAGS_$@ may be used to set additional options.
gzip
Compress target. Use maximum compression to compress target.
Example:
#arch/x86/boot/Makefile
LDFLAGS_bootsect := -Ttext 0x0 -s --oformat binary
@ -1139,6 +1131,19 @@ When kbuild executes, the following steps are followed (roughly):
resulting in the target file being recompiled for no
obvious reason.
objcopy
Copy binary. Uses OBJCOPYFLAGS usually specified in
arch/$(ARCH)/Makefile.
OBJCOPYFLAGS_$@ may be used to set additional options.
gzip
Compress target. Use maximum compression to compress target.
Example:
#arch/x86/boot/compressed/Makefile
$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,gzip)
dtc
Create flattened device tree blob object suitable for linking
into vmlinux. Device tree blobs linked into vmlinux are placed
@ -1219,7 +1224,7 @@ When kbuild executes, the following steps are followed (roughly):
that may be shared between individual architectures.
The recommended approach how to use a generic header file is
to list the file in the Kbuild file.
See "7.3 generic-y" for further info on syntax etc.
See "7.2 generic-y" for further info on syntax etc.
--- 6.11 Post-link pass
@ -1254,13 +1259,13 @@ A Kbuild file may be defined under arch/<arch>/include/uapi/asm/ and
arch/<arch>/include/asm/ to list asm files coming from asm-generic.
See subsequent chapter for the syntax of the Kbuild file.
--- 7.1 no-export-headers
--- 7.1 no-export-headers
no-export-headers is essentially used by include/uapi/linux/Kbuild to
avoid exporting specific headers (e.g. kvm.h) on architectures that do
not support it. It should be avoided as much as possible.
--- 7.2 generic-y
--- 7.2 generic-y
If an architecture uses a verbatim copy of a header from
include/asm-generic then this is listed in the file
@ -1287,7 +1292,7 @@ See subsequent chapter for the syntax of the Kbuild file.
Example: termios.h
#include <asm-generic/termios.h>
--- 7.3 generated-y
--- 7.3 generated-y
If an architecture generates other header files alongside generic-y
wrappers, generated-y specifies them.
@ -1299,7 +1304,7 @@ See subsequent chapter for the syntax of the Kbuild file.
#arch/x86/include/asm/Kbuild
generated-y += syscalls_32.h
--- 7.5 mandatory-y
--- 7.4 mandatory-y
mandatory-y is essentially used by include/uapi/asm-generic/Kbuild.asm
to define the minimum set of headers that must be exported in

View file

@ -10179,7 +10179,6 @@ F: Documentation/parport*.txt
PARAVIRT_OPS INTERFACE
M: Juergen Gross <jgross@suse.com>
M: Chris Wright <chrisw@sous-sol.org>
M: Alok Kataria <akataria@vmware.com>
M: Rusty Russell <rusty@rustcorp.com.au>
L: virtualization@lists.linux-foundation.org

View file

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Fearless Coyote
# *DOCUMENTATION*
@ -130,8 +130,8 @@ endif
ifneq ($(KBUILD_OUTPUT),)
# check that the output directory actually exists
saved-output := $(KBUILD_OUTPUT)
$(shell [ -d $(KBUILD_OUTPUT) ] || mkdir -p $(KBUILD_OUTPUT))
KBUILD_OUTPUT := $(realpath $(KBUILD_OUTPUT))
KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \
&& /bin/pwd)
$(if $(KBUILD_OUTPUT),, \
$(error failed to create output directory "$(saved-output)"))
@ -697,11 +697,11 @@ KBUILD_CFLAGS += $(stackp-flag)
ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),)
CLANG_TARGET := -target $(notdir $(CROSS_COMPILE:%-=%))
CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
endif
ifneq ($(GCC_TOOLCHAIN),)
CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN)
CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
endif
KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
@ -1399,7 +1399,7 @@ help:
@echo ' Build, install, and boot kernel before'
@echo ' running kselftest on it'
@echo ' kselftest-clean - Remove all generated kselftest files'
@echo ' kselftest-merge - Merge all the config dependencies of kselftest to existed'
@echo ' kselftest-merge - Merge all the config dependencies of kselftest to existing'
@echo ' .config.'
@echo ''
@echo 'Userspace tools targets:'

View file

@ -181,10 +181,10 @@ alcor_init_irq(void)
* comes in on. This makes interrupt processing much easier.
*/
static int __init
static int
alcor_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[7][5] __initdata = {
static char irq_tab[7][5] = {
/*INT INTA INTB INTC INTD */
/* note: IDSEL 17 is XLT only */
{16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */

View file

@ -173,10 +173,10 @@ pc164_init_irq(void)
* because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
*/
static inline int __init
static inline int
eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[5][5] __initdata = {
static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */
{16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */
@ -203,10 +203,10 @@ eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
* because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
*/
static inline int __init
static inline int
cabriolet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[5][5] __initdata = {
static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{ 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */
{ 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */
@ -287,10 +287,10 @@ cia_cab_init_pci(void)
*
*/
static inline int __init
static inline int
alphapc164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[7][5] __initdata = {
static char irq_tab[7][5] = {
/*INT INTA INTB INTC INTD */
{ 16+2, 16+2, 16+9, 16+13, 16+17}, /* IdSel 5, slot 2, J20 */
{ 16+0, 16+0, 16+7, 16+11, 16+15}, /* IdSel 6, slot 0, J29 */

View file

@ -356,7 +356,7 @@ clipper_init_irq(void)
* 10 64 bit PCI option slot 3 (not bus 0)
*/
static int __init
static int
isa_irq_fixup(const struct pci_dev *dev, int irq)
{
u8 irq8;
@ -372,10 +372,10 @@ isa_irq_fixup(const struct pci_dev *dev, int irq)
return irq8 & 0xf;
}
static int __init
static int
dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[6][5] __initdata = {
static char irq_tab[6][5] = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */
{ 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/
@ -394,10 +394,10 @@ dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return isa_irq_fixup(dev, irq);
}
static int __init
static int
monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[13][5] __initdata = {
static char irq_tab[13][5] = {
/*INT INTA INTB INTC INTD */
{ 45, 45, 45, 45, 45}, /* IdSel 3 21143 PCI1 */
{ -1, -1, -1, -1, -1}, /* IdSel 4 unused */
@ -423,7 +423,7 @@ monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
}
static u8 __init
static u8
monet_swizzle(struct pci_dev *dev, u8 *pinp)
{
struct pci_controller *hose = dev->sysdata;
@ -456,10 +456,10 @@ monet_swizzle(struct pci_dev *dev, u8 *pinp)
return slot;
}
static int __init
static int
webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[13][5] __initdata = {
static char irq_tab[13][5] = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */
{ -1, -1, -1, -1, -1}, /* IdSel 8 unused */
@ -478,10 +478,10 @@ webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
}
static int __init
static int
clipper_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[7][5] __initdata = {
static char irq_tab[7][5] = {
/*INT INTA INTB INTC INTD */
{ 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */
{ 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */

View file

@ -167,10 +167,10 @@ eb64p_init_irq(void)
* comes in on. This makes interrupt processing much easier.
*/
static int __init
static int
eb64p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[5][5] __initdata = {
static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */
{16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */

View file

@ -141,7 +141,7 @@ eiger_init_irq(void)
}
}
static int __init
static int
eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
u8 irq_orig;
@ -158,7 +158,7 @@ eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq_orig - 0x80;
}
static u8 __init
static u8
eiger_swizzle(struct pci_dev *dev, u8 *pinp)
{
struct pci_controller *hose = dev->sysdata;

View file

@ -149,10 +149,10 @@ miata_init_irq(void)
* comes in on. This makes interrupt processing much easier.
*/
static int __init
static int
miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[18][5] __initdata = {
static char irq_tab[18][5] = {
/*INT INTA INTB INTC INTD */
{16+ 8, 16+ 8, 16+ 8, 16+ 8, 16+ 8}, /* IdSel 14, DC21142 */
{ -1, -1, -1, -1, -1}, /* IdSel 15, EIDE */
@ -196,7 +196,7 @@ miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
static u8 __init
static u8
miata_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;

View file

@ -145,10 +145,10 @@ mikasa_init_irq(void)
* comes in on. This makes interrupt processing much easier.
*/
static int __init
static int
mikasa_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[8][5] __initdata = {
static char irq_tab[8][5] = {
/*INT INTA INTB INTC INTD */
{16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 17, SCSI */
{ -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */

View file

@ -62,7 +62,7 @@ nautilus_init_irq(void)
common_init_isa_dma();
}
static int __init
static int
nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
/* Preserve the IRQ set up by the console. */

View file

@ -193,10 +193,10 @@ noritake_init_irq(void)
* comes in on. This makes interrupt processing much easier.
*/
static int __init
static int
noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[15][5] __initdata = {
static char irq_tab[15][5] = {
/*INT INTA INTB INTC INTD */
/* note: IDSELs 16, 17, and 25 are CORELLE only */
{ 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */
@ -221,7 +221,7 @@ noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
static u8 __init
static u8
noritake_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;

View file

@ -221,10 +221,10 @@ rawhide_init_irq(void)
*
*/
static int __init
static int
rawhide_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[5][5] __initdata = {
static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{ 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 */
{ 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */

View file

@ -117,10 +117,10 @@ ruffian_kill_arch (int mode)
*
*/
static int __init
static int
ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[11][5] __initdata = {
static char irq_tab[11][5] = {
/*INT INTA INTB INTC INTD */
{-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */
{-1, -1, -1, -1, -1}, /* IdSel 14, SIO */
@ -139,7 +139,7 @@ ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
static u8 __init
static u8
ruffian_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;

View file

@ -142,7 +142,7 @@ rx164_init_irq(void)
*
*/
static int __init
static int
rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
#if 0
@ -156,7 +156,7 @@ rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{ 16+1, 16+1, 16+6, 16+11, 16+16}, /* IdSel 10, slot 4 */
};
#else
static char irq_tab[6][5] __initdata = {
static char irq_tab[6][5] = {
/*INT INTA INTB INTC INTD */
{ 16+0, 16+0, 16+6, 16+11, 16+16}, /* IdSel 5, slot 0 */
{ 16+1, 16+1, 16+7, 16+12, 16+17}, /* IdSel 6, slot 1 */

View file

@ -192,10 +192,10 @@ sable_init_irq(void)
* with the values in the irq swizzling tables above.
*/
static int __init
static int
sable_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[9][5] __initdata = {
static char irq_tab[9][5] = {
/*INT INTA INTB INTC INTD */
{ 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */
{ 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */
@ -374,10 +374,10 @@ lynx_init_irq(void)
* with the values in the irq swizzling tables above.
*/
static int __init
static int
lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[19][5] __initdata = {
static char irq_tab[19][5] = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */
{ -1, -1, -1, -1, -1}, /* IdSel 14, PPB */
@ -404,7 +404,7 @@ lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
static u8 __init
static u8
lynx_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;

View file

@ -144,7 +144,7 @@ sio_fixup_irq_levels(unsigned int level_bits)
outb((level_bits >> 8) & 0xff, 0x4d1);
}
static inline int __init
static inline int
noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
/*
@ -165,7 +165,7 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
* that they use the default INTA line, if they are interrupt
* driven at all).
*/
static char irq_tab[][5] __initdata = {
static char irq_tab[][5] = {
/*INT A B C D */
{ 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */
{-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
@ -183,10 +183,10 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq >= 0 ? tmp : -1;
}
static inline int __init
static inline int
p2k_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[][5] __initdata = {
static char irq_tab[][5] = {
/*INT A B C D */
{ 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */
{-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */

View file

@ -94,10 +94,10 @@ sx164_init_irq(void)
* 9 32 bit PCI option slot 3
*/
static int __init
static int
sx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[5][5] __initdata = {
static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{ 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */
{ 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */

View file

@ -155,10 +155,10 @@ takara_init_irq(void)
* assign it whatever the hell IRQ we like and it doesn't matter.
*/
static int __init
static int
takara_map_irq_srm(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[15][5] __initdata = {
static char irq_tab[15][5] = {
{ 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */
{ 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */
{ 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */
@ -210,7 +210,7 @@ takara_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
static u8 __init
static u8
takara_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot = PCI_SLOT(dev->devfn);

View file

@ -288,10 +288,10 @@ wildfire_device_interrupt(unsigned long vector)
* 7 64 bit PCI 1 option slot 7
*/
static int __init
static int
wildfire_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[8][5] __initdata = {
static char irq_tab[8][5] = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 0 ISA Bridge */
{ 36, 36, 36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */

View file

@ -137,14 +137,15 @@
/*
* DW sdio controller has external ciu clock divider
* controlled via register in SDIO IP. Due to its
* unexpected default value (it should devide by 1
* but it devides by 8) SDIO IP uses wrong clock and
* unexpected default value (it should divide by 1
* but it divides by 8) SDIO IP uses wrong clock and
* works unstable (see STAR 9001204800)
* We switched to the minimum possible value of the
* divisor (div-by-2) in HSDK platform code.
* So add temporary fix and change clock frequency
* from 100000000 to 12500000 Hz until we fix dw sdio
* driver itself.
* to 50000000 Hz until we fix dw sdio driver itself.
*/
clock-frequency = <12500000>;
clock-frequency = <50000000>;
#clock-cells = <0>;
};

View file

@ -63,7 +63,6 @@ CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_DW=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_RESET_HSDK=y
CONFIG_EXT3_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y

View file

@ -23,6 +23,8 @@
#include <linux/cpumask.h>
#include <linux/reboot.h>
#include <linux/irqdomain.h>
#include <linux/export.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/mach_desc.h>
@ -30,6 +32,9 @@
#ifndef CONFIG_ARC_HAS_LLSC
arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
EXPORT_SYMBOL_GPL(smp_bitops_lock);
#endif
struct plat_smp_ops __weak plat_smp_ops;

View file

@ -8,3 +8,4 @@
menuconfig ARC_SOC_HSDK
bool "ARC HS Development Kit SOC"
select CLK_HSDK
select RESET_HSDK

View file

@ -74,6 +74,10 @@ static void __init hsdk_set_cpu_freq_1ghz(void)
pr_err("Failed to setup CPU frequency to 1GHz!");
}
#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000)
#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
static void __init hsdk_init_early(void)
{
/*
@ -89,6 +93,12 @@ static void __init hsdk_init_early(void)
/* Really apply settings made above */
writel(1, (void __iomem *) CREG_PAE_UPDATE);
/*
* Switch SDIO external ciu clock divider from default div-by-8 to
* minimum possible div-by-2.
*/
iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
/*
* Setup CPU frequency to 1GHz.
* TODO: remove it after smart hsdk pll driver will be introduced.

View file

@ -1,7 +1,7 @@
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/rwlock.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>

View file

@ -478,28 +478,30 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
return ret;
dir = iommu_tce_direction(tce);
idx = srcu_read_lock(&vcpu->kvm->srcu);
if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
return H_PARAMETER;
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
ret = H_PARAMETER;
goto unlock_exit;
}
entry = ioba >> stt->page_shift;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
if (dir == DMA_NONE) {
if (dir == DMA_NONE)
ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
stit->tbl, entry);
} else {
idx = srcu_read_lock(&vcpu->kvm->srcu);
else
ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
entry, ua, dir);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
if (ret == H_SUCCESS)
continue;
if (ret == H_TOO_HARD)
return ret;
goto unlock_exit;
WARN_ON_ONCE(1);
kvmppc_clear_tce(stit->tbl, entry);
@ -507,7 +509,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
kvmppc_tce_put(stt, entry, tce);
return H_SUCCESS;
unlock_exit:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);

View file

@ -989,13 +989,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
beq no_xive
ld r11, VCPU_XIVE_SAVED_STATE(r4)
li r9, TM_QW1_OS
stdcix r11,r9,r10
eieio
stdcix r11,r9,r10
lwz r11, VCPU_XIVE_CAM_WORD(r4)
li r9, TM_QW1_OS + TM_WORD2
stwcix r11,r9,r10
li r9, 1
stw r9, VCPU_XIVE_PUSHED(r4)
eieio
no_xive:
#endif /* CONFIG_KVM_XICS */
@ -1310,6 +1311,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
bne 3f
BEGIN_FTR_SECTION
PPC_MSGSYNC
lwsync
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
@ -1400,8 +1402,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
cmpldi cr0, r10, 0
beq 1f
/* First load to pull the context, we ignore the value */
lwzx r11, r7, r10
eieio
lwzx r11, r7, r10
/* Second load to recover the context state (Words 0 and 1) */
ldx r11, r6, r10
b 3f
@ -1409,8 +1411,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
cmpldi cr0, r10, 0
beq 1f
/* First load to pull the context, we ignore the value */
lwzcix r11, r7, r10
eieio
lwzcix r11, r7, r10
/* Second load to recover the context state (Words 0 and 1) */
ldcix r11, r6, r10
3: std r11, VCPU_XIVE_SAVED_STATE(r9)
@ -1420,6 +1422,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
stw r10, VCPU_XIVE_PUSHED(r9)
stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
eieio
1:
#endif /* CONFIG_KVM_XICS */
/* Save more register state */
@ -2788,6 +2791,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
PPC_MSGCLR(6)
/* see if it's a host IPI */
li r3, 1
BEGIN_FTR_SECTION
PPC_MSGSYNC
lwsync
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
bnelr

View file

@ -644,8 +644,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
#endif
case KVM_CAP_PPC_HTM:
r = cpu_has_feature(CPU_FTR_TM_COMP) &&
is_kvmppc_hv_enabled(kvm);
r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled;
break;
default:
r = 0;

View file

@ -521,12 +521,15 @@ ENTRY(pgm_check_handler)
tmhh %r8,0x0001 # test problem state bit
jnz 2f # -> fault in user space
#if IS_ENABLED(CONFIG_KVM)
# cleanup critical section for sie64a
# cleanup critical section for program checks in sie64a
lgr %r14,%r9
slg %r14,BASED(.Lsie_critical_start)
clg %r14,BASED(.Lsie_critical_length)
jhe 0f
brasl %r14,.Lcleanup_sie
lg %r14,__SF_EMPTY(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
#endif
0: tmhh %r8,0x4000 # PER bit set in old PSW ?
jnz 1f # -> enabled, can't be a double fault

View file

@ -808,7 +808,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
ENTRY(\sym)
UNWIND_HINT_IRET_REGS offset=8
UNWIND_HINT_IRET_REGS offset=\has_error_code*8
/* Sanity check */
.if \shift_ist != -1 && \paranoid == 0

View file

@ -546,9 +546,6 @@ static int bts_event_init(struct perf_event *event)
if (event->attr.type != bts_pmu.type)
return -ENOENT;
if (x86_add_exclusive(x86_lbr_exclusive_bts))
return -EBUSY;
/*
* BTS leaks kernel addresses even when CPL0 tracing is
* disabled, so disallow intel_bts driver for unprivileged
@ -562,6 +559,9 @@ static int bts_event_init(struct perf_event *event)
!capable(CAP_SYS_ADMIN))
return -EACCES;
if (x86_add_exclusive(x86_lbr_exclusive_bts))
return -EBUSY;
ret = x86_reserve_hardware();
if (ret) {
x86_del_exclusive(x86_lbr_exclusive_bts);

View file

@ -110,10 +110,6 @@ build_mmio_write(__writeq, "q", unsigned long, "r", )
#endif
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap

View file

@ -27,6 +27,8 @@ static const struct pci_device_id amd_root_ids[] = {
{}
};
#define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
@ -37,6 +39,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{}
};
EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
@ -48,6 +51,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};
@ -402,11 +406,48 @@ void amd_flush_garts(void)
}
EXPORT_SYMBOL_GPL(amd_flush_garts);
static void __fix_erratum_688(void *info)
{
#define MSR_AMD64_IC_CFG 0xC0011021
msr_set_bit(MSR_AMD64_IC_CFG, 3);
msr_set_bit(MSR_AMD64_IC_CFG, 14);
}
/* Apply erratum 688 fix so machines without a BIOS fix work. */
static __init void fix_erratum_688(void)
{
struct pci_dev *F4;
u32 val;
if (boot_cpu_data.x86 != 0x14)
return;
if (!amd_northbridges.num)
return;
F4 = node_to_amd_nb(0)->link;
if (!F4)
return;
if (pci_read_config_dword(F4, 0x164, &val))
return;
if (val & BIT(2))
return;
on_each_cpu(__fix_erratum_688, NULL, 0);
pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
}
static __init int init_amd_nbs(void)
{
amd_cache_northbridges();
amd_cache_gart();
fix_erratum_688();
return 0;
}

View file

@ -86,8 +86,8 @@ static struct orc_entry *orc_find(unsigned long ip)
idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
if (unlikely((idx >= lookup_num_blocks-1))) {
orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%lx\n",
idx, lookup_num_blocks, ip);
orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
idx, lookup_num_blocks, (void *)ip);
return NULL;
}
@ -96,8 +96,8 @@ static struct orc_entry *orc_find(unsigned long ip)
if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
(__start_orc_unwind + stop > __stop_orc_unwind))) {
orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%lx\n",
idx, lookup_num_blocks, start, stop, ip);
orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
idx, lookup_num_blocks, start, stop, (void *)ip);
return NULL;
}
@ -373,7 +373,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_REG_R10:
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg R10 at ip %p\n",
orc_warn("missing regs for base reg R10 at ip %pB\n",
(void *)state->ip);
goto done;
}
@ -382,7 +382,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_REG_R13:
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg R13 at ip %p\n",
orc_warn("missing regs for base reg R13 at ip %pB\n",
(void *)state->ip);
goto done;
}
@ -391,7 +391,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_REG_DI:
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg DI at ip %p\n",
orc_warn("missing regs for base reg DI at ip %pB\n",
(void *)state->ip);
goto done;
}
@ -400,7 +400,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_REG_DX:
if (!state->regs || !state->full_regs) {
orc_warn("missing regs for base reg DX at ip %p\n",
orc_warn("missing regs for base reg DX at ip %pB\n",
(void *)state->ip);
goto done;
}
@ -408,7 +408,7 @@ bool unwind_next_frame(struct unwind_state *state)
break;
default:
orc_warn("unknown SP base reg %d for ip %p\n",
orc_warn("unknown SP base reg %d for ip %pB\n",
orc->sp_reg, (void *)state->ip);
goto done;
}
@ -436,7 +436,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_TYPE_REGS:
if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) {
orc_warn("can't dereference registers at %p for ip %p\n",
orc_warn("can't dereference registers at %p for ip %pB\n",
(void *)sp, (void *)orig_ip);
goto done;
}
@ -448,7 +448,7 @@ bool unwind_next_frame(struct unwind_state *state)
case ORC_TYPE_REGS_IRET:
if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) {
orc_warn("can't dereference iret registers at %p for ip %p\n",
orc_warn("can't dereference iret registers at %p for ip %pB\n",
(void *)sp, (void *)orig_ip);
goto done;
}
@ -465,7 +465,8 @@ bool unwind_next_frame(struct unwind_state *state)
break;
default:
orc_warn("unknown .orc_unwind entry type %d\n", orc->type);
orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
orc->type, (void *)orig_ip);
break;
}
@ -487,7 +488,7 @@ bool unwind_next_frame(struct unwind_state *state)
break;
default:
orc_warn("unknown BP base reg %d for ip %p\n",
orc_warn("unknown BP base reg %d for ip %pB\n",
orc->bp_reg, (void *)orig_ip);
goto done;
}
@ -496,7 +497,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (state->stack_info.type == prev_type &&
on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
state->sp <= prev_sp) {
orc_warn("stack going in the wrong direction? ip=%p\n",
orc_warn("stack going in the wrong direction? ip=%pB\n",
(void *)orig_ip);
goto done;
}

View file

@ -174,15 +174,3 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return "[mpx]";
return NULL;
}
int valid_phys_addr_range(phys_addr_t addr, size_t count)
{
return addr + count <= __pa(high_memory);
}
int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
{
phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
return valid_phys_addr_range(addr, count);
}

View file

@ -377,7 +377,8 @@ int register_cpu(struct cpu *cpu, int num)
per_cpu(cpu_sys_devices, num) = &cpu->dev;
register_cpu_under_node(num, cpu_to_node(num));
dev_pm_qos_expose_latency_limit(&cpu->dev, 0);
dev_pm_qos_expose_latency_limit(&cpu->dev,
PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
return 0;
}

View file

@ -14,23 +14,20 @@
static int dev_update_qos_constraint(struct device *dev, void *data)
{
s64 *constraint_ns_p = data;
s32 constraint_ns = -1;
s64 constraint_ns = -1;
if (dev->power.subsys_data && dev->power.subsys_data->domain_data)
constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
if (constraint_ns < 0) {
if (constraint_ns < 0)
constraint_ns = dev_pm_qos_read_value(dev);
constraint_ns *= NSEC_PER_USEC;
}
if (constraint_ns == 0)
if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
return 0;
/*
* constraint_ns cannot be negative here, because the device has been
* suspended.
*/
if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0)
constraint_ns *= NSEC_PER_USEC;
if (constraint_ns < *constraint_ns_p || *constraint_ns_p < 0)
*constraint_ns_p = constraint_ns;
return 0;
@ -63,10 +60,14 @@ static bool default_suspend_ok(struct device *dev)
spin_unlock_irqrestore(&dev->power.lock, flags);
if (constraint_ns < 0)
if (constraint_ns == 0)
return false;
constraint_ns *= NSEC_PER_USEC;
if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
constraint_ns = -1;
else
constraint_ns *= NSEC_PER_USEC;
/*
* We can walk the children without any additional locking, because
* they all have been suspended at this point and their
@ -76,14 +77,19 @@ static bool default_suspend_ok(struct device *dev)
device_for_each_child(dev, &constraint_ns,
dev_update_qos_constraint);
if (constraint_ns > 0) {
constraint_ns -= td->suspend_latency_ns +
td->resume_latency_ns;
if (constraint_ns == 0)
return false;
if (constraint_ns < 0) {
/* The children have no constraints. */
td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
td->cached_suspend_ok = true;
} else {
constraint_ns -= td->suspend_latency_ns + td->resume_latency_ns;
if (constraint_ns > 0) {
td->effective_constraint_ns = constraint_ns;
td->cached_suspend_ok = true;
} else {
td->effective_constraint_ns = 0;
}
}
td->effective_constraint_ns = constraint_ns;
td->cached_suspend_ok = constraint_ns >= 0;
/*
* The children have been suspended already, so we don't need to take
@ -145,13 +151,14 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
td = &to_gpd_data(pdd)->td;
constraint_ns = td->effective_constraint_ns;
/* default_suspend_ok() need not be called before us. */
if (constraint_ns < 0) {
if (constraint_ns < 0)
constraint_ns = dev_pm_qos_read_value(pdd->dev);
constraint_ns *= NSEC_PER_USEC;
}
if (constraint_ns == 0)
if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
continue;
constraint_ns *= NSEC_PER_USEC;
/*
* constraint_ns cannot be negative here, because the device has
* been suspended.

View file

@ -189,7 +189,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
plist_head_init(&c->list);
c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
c->notifiers = n;

View file

@ -253,7 +253,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
|| (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN;
else if (__dev_pm_qos_read_value(dev) < 0)
else if (__dev_pm_qos_read_value(dev) == 0)
retval = -EPERM;
else if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;

View file

@ -218,7 +218,14 @@ static ssize_t pm_qos_resume_latency_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));
s32 value = dev_pm_qos_requested_resume_latency(dev);
if (value == 0)
return sprintf(buf, "n/a\n");
else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
value = 0;
return sprintf(buf, "%d\n", value);
}
static ssize_t pm_qos_resume_latency_store(struct device *dev,
@ -228,11 +235,21 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
s32 value;
int ret;
if (kstrtos32(buf, 0, &value))
return -EINVAL;
if (!kstrtos32(buf, 0, &value)) {
/*
* Prevent users from writing negative or "no constraint" values
* directly.
*/
if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
return -EINVAL;
if (value < 0)
if (value == 0)
value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
} else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) {
value = 0;
} else {
return -EINVAL;
}
ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
value);

View file

@ -386,6 +386,15 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
return result;
}
/*
* Different settings for sk->sk_sndtimeo can result in different return values
* if there is a signal pending when we enter sendmsg, because reasons?
*/
static inline int was_interrupted(int result)
{
return result == -ERESTARTSYS || result == -EINTR;
}
/* always call with the tx_lock held */
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
@ -458,7 +467,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
result = sock_xmit(nbd, index, 1, &from,
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
if (result <= 0) {
if (result == -ERESTARTSYS) {
if (was_interrupted(result)) {
/* If we havne't sent anything we can just return BUSY,
* however if we have sent something we need to make
* sure we only allow this req to be sent until we are
@ -502,7 +511,7 @@ send_pages:
}
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
if (result <= 0) {
if (result == -ERESTARTSYS) {
if (was_interrupted(result)) {
/* We've already sent the header, we
* have no choice but to set pending and
* return BUSY.

View file

@ -298,8 +298,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->needs_update = 0;
}
/* resume_latency is 0 means no restriction */
if (resume_latency && resume_latency < latency_req)
if (resume_latency < latency_req &&
resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
latency_req = resume_latency;
/* Special case when user has set very strict latency requirement */

View file

@ -238,7 +238,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
efi_random_get_seed(sys_table);
if (!nokaslr()) {
/* hibernation expects the runtime regions to stay in the same place */
if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) {
/*
* Randomize the base of the UEFI runtime services region.
* Preserve the 2 MB alignment of the region by taking a

View file

@ -593,6 +593,9 @@ static long efi_runtime_query_capsulecaps(unsigned long arg)
if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps)))
return -EFAULT;
if (qcaps.capsule_count == ULONG_MAX)
return -EINVAL;
capsules = kcalloc(qcaps.capsule_count + 1,
sizeof(efi_capsule_header_t), GFP_KERNEL);
if (!capsules)

View file

@ -225,11 +225,7 @@ static int uvd_v6_0_suspend(void *handle)
if (r)
return r;
/* Skip this for APU for now */
if (!(adev->flags & AMD_IS_APU))
r = amdgpu_uvd_suspend(adev);
return r;
return amdgpu_uvd_suspend(adev);
}
static int uvd_v6_0_resume(void *handle)
@ -237,12 +233,10 @@ static int uvd_v6_0_resume(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* Skip this for APU for now */
if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_uvd_resume(adev);
if (r)
return r;
}
r = amdgpu_uvd_resume(adev);
if (r)
return r;
return uvd_v6_0_hw_init(adev);
}

View file

@ -830,7 +830,7 @@ uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
{
uint32_t reference_clock, tmp;
struct cgs_display_info info = {0};
struct cgs_mode_info mode_info;
struct cgs_mode_info mode_info = {0};
info.mode_info = &mode_info;
@ -3948,10 +3948,9 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
uint32_t ref_clock;
uint32_t refresh_rate = 0;
struct cgs_display_info info = {0};
struct cgs_mode_info mode_info;
struct cgs_mode_info mode_info = {0};
info.mode_info = &mode_info;
cgs_get_active_displays_info(hwmgr->device, &info);
num_active_displays = info.display_count;
@ -3967,6 +3966,7 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
frame_time_in_us = 1000000 / refresh_rate;
pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
data->frame_time_x2 = frame_time_in_us * 2 / 100;
display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);

View file

@ -2723,6 +2723,9 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
unsigned char *bb_start_sva;
if (!wa_ctx->per_ctx.valid)
return 0;
per_ctx_start[0] = 0x18800001;
per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;

View file

@ -701,8 +701,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
CACHELINE_BYTES;
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
workload->wa_ctx.per_ctx.valid = per_ctx & 1;
}
if (emulate_schedule_in)

View file

@ -1429,18 +1429,7 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
mmio_hw_access_pre(dev_priv);
vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
mmio_hw_access_post(dev_priv);
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
}
static int instdone_mmio_read(struct intel_vgpu *vgpu,
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@ -1589,6 +1578,8 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
if (HAS_BSD2(dev_priv)) \
MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
} while (0)
#define MMIO_RING_D(prefix, d) \
@ -1635,10 +1626,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
#undef RING_REG
#define RING_REG(base) (base + 0x6c)
MMIO_RING_DFH(RING_REG, D_ALL, 0, instdone_mmio_read, NULL);
MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_ALL, instdone_mmio_read, NULL);
MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
#undef RING_REG
MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, instdone_mmio_read, NULL);
MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
@ -1648,7 +1638,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL);
MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
/* RING MODE */
@ -1662,9 +1652,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL);
mmio_read_from_hw, NULL);
MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL);
mmio_read_from_hw, NULL);
MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
@ -2411,9 +2401,6 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL,
intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
@ -2476,68 +2463,34 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
intel_vgpu_reg_master_irq_handler);
MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
F_CMD_ACCESS, NULL, NULL);
MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
ring_mode_mmio_write);
MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL);
MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
mmio_read_from_hw, NULL);
#define RING_REG(base) (base + 0xd0)
MMIO_RING_F(RING_REG, 4, F_RO, 0,
~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
ring_reset_ctl_write);
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0,
~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
ring_reset_ctl_write);
#undef RING_REG
#define RING_REG(base) (base + 0x230)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
#undef RING_REG
#define RING_REG(base) (base + 0x234)
MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
NULL, NULL);
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0,
~0LL, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x244)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x370)
MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 48, F_RO, 0, ~0, D_BDW_PLUS,
NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x3a0)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
#undef RING_REG
MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
@ -2557,11 +2510,9 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
#define RING_REG(base) (base + 0x270)
MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@ -2849,7 +2800,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x65f08, D_SKL | D_KBL);
MMIO_D(0x320f0, D_SKL | D_KBL);
MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x70034, D_SKL_PLUS);
MMIO_D(0x71034, D_SKL_PLUS);
MMIO_D(0x72034, D_SKL_PLUS);

View file

@ -54,9 +54,6 @@
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
#define _REG_VECS_EXCC 0x1A028
#define _REG_VCS2_EXCC 0x1c028
#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)

View file

@ -68,6 +68,7 @@ struct shadow_indirect_ctx {
struct shadow_per_ctx {
unsigned long guest_gma;
unsigned long shadow_gma;
unsigned valid;
};
struct intel_shadow_wa_ctx {

View file

@ -2537,6 +2537,10 @@ static const struct file_operations fops = {
.poll = i915_perf_poll,
.read = i915_perf_read,
.unlocked_ioctl = i915_perf_ioctl,
/* Our ioctl have no arguments, so it's safe to use the same function
* to handle 32bits compatibility.
*/
.compat_ioctl = i915_perf_ioctl,
};

View file

@ -477,6 +477,11 @@ static int da9052_hwmon_probe(struct platform_device *pdev)
/* disable touchscreen features */
da9052_reg_write(hwmon->da9052, DA9052_TSI_CONT_A_REG, 0x00);
/* Sample every 1ms */
da9052_reg_update(hwmon->da9052, DA9052_ADC_CONT_REG,
DA9052_ADCCONT_ADCMODE,
DA9052_ADCCONT_ADCMODE);
err = da9052_request_irq(hwmon->da9052, DA9052_IRQ_TSIREADY,
"tsiready-irq", da9052_tsi_datardy_irq,
hwmon);

View file

@ -268,14 +268,11 @@ static int tmp102_probe(struct i2c_client *client,
return err;
}
tmp102->ready_time = jiffies;
if (tmp102->config_orig & TMP102_CONF_SD) {
/*
* Mark that we are not ready with data until the first
* conversion is complete
*/
tmp102->ready_time += msecs_to_jiffies(CONVERSION_TIME_MS);
}
/*
* Mark that we are not ready with data until the first
* conversion is complete
*/
tmp102->ready_time = jiffies + msecs_to_jiffies(CONVERSION_TIME_MS);
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
tmp102,

View file

@ -175,13 +175,24 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
/*
* LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't
* mistakenly call the .dump() function.
*/
if (index == RDMA_NL_LS) {
if (cb_table[op].doit)
return cb_table[op].doit(skb, nlh, extack);
return -EINVAL;
}
/* FIXME: Convert IWCM to properly handle doit callbacks */
if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM ||
index == RDMA_NL_IWCM) {
struct netlink_dump_control c = {
.dump = cb_table[op].dump,
};
return netlink_dump_start(nls, skb, nlh, &c);
if (c.dump)
return netlink_dump_start(nls, skb, nlh, &c);
return -EINVAL;
}
if (cb_table[op].doit)

View file

@ -1258,6 +1258,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0605", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
{ "ELAN0611", 0 },
{ "ELAN1000", 0 },
{ }
};

View file

@ -232,9 +232,10 @@ static int rmi_f30_map_gpios(struct rmi_function *fn,
unsigned int trackstick_button = BTN_LEFT;
bool button_mapped = false;
int i;
int button_count = min_t(u8, f30->gpioled_count, TRACKSTICK_RANGE_END);
f30->gpioled_key_map = devm_kcalloc(&fn->dev,
f30->gpioled_count,
button_count,
sizeof(f30->gpioled_key_map[0]),
GFP_KERNEL);
if (!f30->gpioled_key_map) {
@ -242,7 +243,7 @@ static int rmi_f30_map_gpios(struct rmi_function *fn,
return -ENOMEM;
}
for (i = 0; i < f30->gpioled_count; i++) {
for (i = 0; i < button_count; i++) {
if (!rmi_f30_is_valid_button(i, f30->ctrl))
continue;

View file

@ -230,13 +230,17 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
/* Walk this report and pull out the info we need */
while (i < length) {
prefix = report[i];
/* Skip over prefix */
i++;
prefix = report[i++];
/* Determine data size and save the data in the proper variable */
size = PREF_SIZE(prefix);
size = (1U << PREF_SIZE(prefix)) >> 1;
if (i + size > length) {
dev_err(ddev,
"Not enough data (need %d, have %d)\n",
i + size, length);
break;
}
switch (size) {
case 1:
data = report[i];
@ -244,8 +248,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
case 2:
data16 = get_unaligned_le16(&report[i]);
break;
case 3:
size = 4;
case 4:
data32 = get_unaligned_le32(&report[i]);
break;
}

View file

@ -342,7 +342,7 @@ static int sun4i_can_start(struct net_device *dev)
/* enter the selected mode */
mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE;
else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE;
@ -811,7 +811,6 @@ static int sun4ican_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_PRESUME_ACK |
CAN_CTRLMODE_3_SAMPLES;
priv->base = addr;
priv->clk = clk;

View file

@ -137,6 +137,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
#define CMD_RESET_ERROR_COUNTER 49
#define CMD_TX_ACKNOWLEDGE 50
#define CMD_CAN_ERROR_EVENT 51
#define CMD_FLUSH_QUEUE_REPLY 68
#define CMD_LEAF_USB_THROTTLE 77
#define CMD_LEAF_LOG_MESSAGE 106
@ -1301,6 +1302,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
goto warn;
break;
case CMD_FLUSH_QUEUE_REPLY:
if (dev->family != KVASER_LEAF)
goto warn;
break;
default:
warn: dev_warn(dev->udev->dev.parent,
"Unhandled message (%d)\n", msg->id);
@ -1609,7 +1615,8 @@ static int kvaser_usb_close(struct net_device *netdev)
if (err)
netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
if (err)
netdev_warn(netdev, "Cannot reset card, error %d\n", err);
err = kvaser_usb_stop_chip(priv);

View file

@ -1824,11 +1824,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
int i;
char *p = NULL;
const struct e1000_stats *stat = e1000_gstrings_stats;
e1000_update_stats(adapter);
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) {
char *p;
switch (stat->type) {
case NETDEV_STATS:
p = (char *)netdev + stat->stat_offset;
@ -1839,15 +1840,13 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
default:
WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
stat->type, i);
break;
continue;
}
if (stat->sizeof_stat == sizeof(u64))
data[i] = *(u64 *)p;
else
data[i] = *(u32 *)p;
stat++;
}
/* BUG_ON(i != E1000_STATS_LEN); */
}

View file

@ -520,8 +520,6 @@ void e1000_down(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u32 rctl, tctl;
netif_carrier_off(netdev);
/* disable receives in the hardware */
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
@ -537,6 +535,15 @@ void e1000_down(struct e1000_adapter *adapter)
E1000_WRITE_FLUSH();
msleep(10);
/* Set the carrier off after transmits have been disabled in the
* hardware, to avoid race conditions with e1000_watchdog() (which
* may be running concurrently to us, checking for the carrier
* bit to decide whether it should enable transmits again). Such
* a race condition would result into transmission being disabled
* in the hardware until the next IFF_DOWN+IFF_UP cycle.
*/
netif_carrier_off(netdev);
napi_disable(&adapter->napi);
e1000_irq_disable(adapter);

View file

@ -2102,6 +2102,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
if (unlikely(i40e_rx_is_programming_status(qword))) {
i40e_clean_programming_status(rx_ring, rx_desc, qword);
cleaned_count++;
continue;
}
size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
@ -2269,7 +2270,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
goto enable_int;
}
if (ITR_IS_DYNAMIC(tx_itr_setting)) {
if (ITR_IS_DYNAMIC(rx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}

View file

@ -5326,7 +5326,7 @@ dma_error:
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
if (i--)
if (i-- == 0)
i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i];
}

View file

@ -8020,29 +8020,23 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
return 0;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */
while (tx_buffer != first) {
for (;;) {
tx_buffer = &tx_ring->tx_buffer_info[i];
if (dma_unmap_len(tx_buffer, len))
dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
if (i--)
if (tx_buffer == first)
break;
if (i == 0)
i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i];
i--;
}
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
dev_kfree_skb_any(first->skb);
first->skb = NULL;

View file

@ -1167,6 +1167,11 @@ struct mvpp2_bm_pool {
u32 port_map;
};
#define IS_TSO_HEADER(txq_pcpu, addr) \
((addr) >= (txq_pcpu)->tso_headers_dma && \
(addr) < (txq_pcpu)->tso_headers_dma + \
(txq_pcpu)->size * TSO_HEADER_SIZE)
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
#define MVPP2_QDIST_MULTI_MODE 1
@ -1534,7 +1539,7 @@ static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
u16 tcam_data;
tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
if (tcam_data != data)
return false;
return true;
@ -2609,8 +2614,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
/* place holders only - no ports */
mvpp2_prs_mac_drop_all_set(priv, 0, false);
mvpp2_prs_mac_promisc_set(priv, 0, false);
mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_ALL, false);
mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_IP6, false);
}
/* Set default entries for various types of dsa packets */
@ -3391,7 +3396,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
struct mvpp2_prs_entry *pe;
int tid;
pe = kzalloc(sizeof(*pe), GFP_KERNEL);
pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
if (!pe)
return NULL;
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
@ -3453,7 +3458,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
if (tid < 0)
return tid;
pe = kzalloc(sizeof(*pe), GFP_KERNEL);
pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
if (!pe)
return -ENOMEM;
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
@ -5321,8 +5326,9 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
tx_buf->size, DMA_TO_DEVICE);
if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
tx_buf->size, DMA_TO_DEVICE);
if (tx_buf->skb)
dev_kfree_skb_any(tx_buf->skb);
@ -5609,7 +5615,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->tso_headers =
dma_alloc_coherent(port->dev->dev.parent,
MVPP2_AGGR_TXQ_SIZE * TSO_HEADER_SIZE,
txq_pcpu->size * TSO_HEADER_SIZE,
&txq_pcpu->tso_headers_dma,
GFP_KERNEL);
if (!txq_pcpu->tso_headers)
@ -5623,7 +5629,7 @@ cleanup:
kfree(txq_pcpu->buffs);
dma_free_coherent(port->dev->dev.parent,
MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
txq_pcpu->size * TSO_HEADER_SIZE,
txq_pcpu->tso_headers,
txq_pcpu->tso_headers_dma);
}
@ -5647,7 +5653,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
kfree(txq_pcpu->buffs);
dma_free_coherent(port->dev->dev.parent,
MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
txq_pcpu->size * TSO_HEADER_SIZE,
txq_pcpu->tso_headers,
txq_pcpu->tso_headers_dma);
}
@ -6212,12 +6218,15 @@ static inline void
tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
struct mvpp2_tx_desc *desc)
{
struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
dma_addr_t buf_dma_addr =
mvpp2_txdesc_dma_addr_get(port, desc);
size_t buf_sz =
mvpp2_txdesc_size_get(port, desc);
dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
buf_sz, DMA_TO_DEVICE);
if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
buf_sz, DMA_TO_DEVICE);
mvpp2_txq_desc_put(txq);
}
@ -6490,7 +6499,7 @@ out:
}
/* Finalize TX processing */
if (txq_pcpu->count >= txq->done_pkts_coal)
if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
mvpp2_txq_done(port, txq, txq_pcpu);
/* Set the timer in case not all frags were processed */

View file

@ -77,35 +77,41 @@ static void add_delayed_event(struct mlx5_priv *priv,
list_add_tail(&delayed_event->list, &priv->waiting_events_list);
}
static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx,
struct mlx5_core_dev *dev,
struct mlx5_priv *priv)
static void delayed_event_release(struct mlx5_device_context *dev_ctx,
struct mlx5_priv *priv)
{
struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
struct mlx5_delayed_event *de;
struct mlx5_delayed_event *n;
struct list_head temp;
INIT_LIST_HEAD(&temp);
spin_lock_irq(&priv->ctx_lock);
/* stop delaying events */
priv->is_accum_events = false;
/* fire all accumulated events before new event comes */
list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
list_splice_init(&priv->waiting_events_list, &temp);
if (!dev_ctx->context)
goto out;
list_for_each_entry_safe(de, n, &priv->waiting_events_list, list)
dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
out:
spin_unlock_irq(&priv->ctx_lock);
list_for_each_entry_safe(de, n, &temp, list) {
list_del(&de->list);
kfree(de);
}
}
static void cleanup_delayed_evets(struct mlx5_priv *priv)
/* accumulating events that can come after mlx5_ib calls to
* ib_register_device, till adding that interface to the events list.
*/
static void delayed_event_start(struct mlx5_priv *priv)
{
struct mlx5_delayed_event *de;
struct mlx5_delayed_event *n;
spin_lock_irq(&priv->ctx_lock);
priv->is_accum_events = false;
list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
list_del(&de->list);
kfree(de);
}
priv->is_accum_events = true;
spin_unlock_irq(&priv->ctx_lock);
}
@ -122,11 +128,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
return;
dev_ctx->intf = intf;
/* accumulating events that can come after mlx5_ib calls to
* ib_register_device, till adding that interface to the events list.
*/
priv->is_accum_events = true;
delayed_event_start(priv);
dev_ctx->context = intf->add(dev);
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
@ -137,8 +140,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
fire_delayed_event_locked(dev_ctx, dev, priv);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (dev_ctx->intf->pfault) {
if (priv->pfault) {
@ -150,11 +151,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
}
#endif
spin_unlock_irq(&priv->ctx_lock);
} else {
kfree(dev_ctx);
/* delete all accumulated events */
cleanup_delayed_evets(priv);
}
delayed_event_release(dev_ctx, priv);
if (!dev_ctx->context)
kfree(dev_ctx);
}
static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
@ -205,17 +207,21 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
if (!dev_ctx)
return;
delayed_event_start(priv);
if (intf->attach) {
if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
return;
goto out;
intf->attach(dev, dev_ctx->context);
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
} else {
if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
return;
goto out;
dev_ctx->context = intf->add(dev);
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
}
out:
delayed_event_release(dev_ctx, priv);
}
void mlx5_attach_device(struct mlx5_core_dev *dev)
@ -414,8 +420,14 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
if (priv->is_accum_events)
add_delayed_event(priv, dev, event, param);
/* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
* still in priv->ctx_list. In this case, only notify the dev_ctx if its
* ADDED or ATTACHED bit are set.
*/
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
if (dev_ctx->intf->event)
if (dev_ctx->intf->event &&
(test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
dev_ctx->intf->event(dev, dev_ctx->context, event, param);
spin_unlock_irqrestore(&priv->ctx_lock, flags);

View file

@ -41,6 +41,11 @@
#define MLX5E_CEE_STATE_UP 1
#define MLX5E_CEE_STATE_DOWN 0
enum {
MLX5E_VENDOR_TC_GROUP_NUM = 7,
MLX5E_LOWEST_PRIO_GROUP = 0,
};
/* If dcbx mode is non-host set the dcbx mode to host.
*/
static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
@ -85,6 +90,9 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 tc_group[IEEE_8021QAZ_MAX_TCS];
bool is_tc_group_6_exist = false;
bool is_zero_bw_ets_tc = false;
int err = 0;
int i;
@ -96,37 +104,64 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
if (err)
return err;
}
for (i = 0; i < ets->ets_cap; i++) {
err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
if (err)
return err;
err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
if (err)
return err;
if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
is_zero_bw_ets_tc = true;
if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
is_tc_group_6_exist = true;
}
/* Report 0% ets tc if exits*/
if (is_zero_bw_ets_tc) {
for (i = 0; i < ets->ets_cap; i++)
if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
ets->tc_tx_bw[i] = 0;
}
/* Update tc_tsa based on fw setting*/
for (i = 0; i < ets->ets_cap; i++) {
if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
!is_tc_group_6_exist)
priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
}
memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
return err;
}
enum {
MLX5E_VENDOR_TC_GROUP_NUM = 7,
MLX5E_ETS_TC_GROUP_NUM = 0,
};
static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
{
bool any_tc_mapped_to_ets = false;
bool ets_zero_bw = false;
int strict_group;
int i;
for (i = 0; i <= max_tc; i++)
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
for (i = 0; i <= max_tc; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
any_tc_mapped_to_ets = true;
if (!ets->tc_tx_bw[i])
ets_zero_bw = true;
}
}
strict_group = any_tc_mapped_to_ets ? 1 : 0;
/* strict group has higher priority than ets group */
strict_group = MLX5E_LOWEST_PRIO_GROUP;
if (any_tc_mapped_to_ets)
strict_group++;
if (ets_zero_bw)
strict_group++;
for (i = 0; i <= max_tc; i++) {
switch (ets->tc_tsa[i]) {
@ -137,7 +172,9 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
tc_group[i] = strict_group++;
break;
case IEEE_8021QAZ_TSA_ETS:
tc_group[i] = MLX5E_ETS_TC_GROUP_NUM;
tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
if (ets->tc_tx_bw[i] && ets_zero_bw)
tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
break;
}
}
@ -146,8 +183,22 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
u8 *tc_group, int max_tc)
{
int bw_for_ets_zero_bw_tc = 0;
int last_ets_zero_bw_tc = -1;
int num_ets_zero_bw = 0;
int i;
for (i = 0; i <= max_tc; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
!ets->tc_tx_bw[i]) {
num_ets_zero_bw++;
last_ets_zero_bw_tc = i;
}
}
if (num_ets_zero_bw)
bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
for (i = 0; i <= max_tc; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_VENDOR:
@ -157,12 +208,26 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break;
case IEEE_8021QAZ_TSA_ETS:
tc_tx_bw[i] = ets->tc_tx_bw[i];
tc_tx_bw[i] = ets->tc_tx_bw[i] ?
ets->tc_tx_bw[i] :
bw_for_ets_zero_bw_tc;
break;
}
}
/* Make sure the total bw for ets zero bw group is 100% */
if (last_ets_zero_bw_tc != -1)
tc_tx_bw[last_ets_zero_bw_tc] +=
MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
}
/* If there are ETS BW 0,
* Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
* Set group #0 to all the ETS BW 0 tcs and
* equally splits the 100% BW between them
* Report both group #0 and #1 as ETS type.
* All the tcs in group #0 will be reported with 0% BW.
*/
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
{
struct mlx5_core_dev *mdev = priv->mdev;
@ -188,7 +253,6 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
return err;
memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
return err;
}
@ -209,17 +273,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
}
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
if (!ets->tc_tx_bw[i]) {
netdev_err(netdev,
"Failed to validate ETS: BW 0 is illegal\n");
return -EINVAL;
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
bw_sum += ets->tc_tx_bw[i];
}
}
if (bw_sum != 0 && bw_sum != 100) {
netdev_err(netdev,
@ -533,8 +589,7 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
int pgid, u8 *bw_pct)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct ieee_ets ets;
if (pgid >= CEE_DCBX_MAX_PGS) {
netdev_err(netdev,
@ -542,8 +597,8 @@ static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
return;
}
if (mlx5_query_port_tc_bw_alloc(mdev, pgid, bw_pct))
*bw_pct = 0;
mlx5e_dcbnl_ieee_getets(netdev, &ets);
*bw_pct = ets.tc_tx_bw[pgid];
}
static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
@ -739,8 +794,6 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
ets.prio_tc[i] = i;
}
memcpy(priv->dcbx.tc_tsa, ets.tc_tsa, sizeof(ets.tc_tsa));
/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
ets.prio_tc[0] = 1;
ets.prio_tc[1] = 0;

View file

@ -78,9 +78,11 @@ struct mlx5e_tc_flow {
};
struct mlx5e_tc_flow_parse_attr {
struct ip_tunnel_info tun_info;
struct mlx5_flow_spec spec;
int num_mod_hdr_actions;
void *mod_hdr_actions;
int mirred_ifindex;
};
enum {
@ -322,6 +324,12 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow);
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
struct net_device **encap_dev,
struct mlx5e_tc_flow *flow);
static struct mlx5_flow_handle *
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@ -329,9 +337,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5_flow_handle *rule;
struct net_device *out_dev, *encap_dev = NULL;
struct mlx5_flow_handle *rule = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
int err;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
out_dev = __dev_get_by_index(dev_net(priv->netdev),
attr->parse_attr->mirred_ifindex);
err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
out_dev, &encap_dev, flow);
if (err) {
rule = ERR_PTR(err);
if (err != -EAGAIN)
goto err_attach_encap;
}
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
}
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err) {
rule = ERR_PTR(err);
@ -347,10 +373,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
}
rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
if (IS_ERR(rule))
goto err_add_rule;
/* we get here if (1) there's no error (rule being null) or when
* (2) there's an encap action and we're on -EAGAIN (no valid neigh)
*/
if (rule != ERR_PTR(-EAGAIN)) {
rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
if (IS_ERR(rule))
goto err_add_rule;
}
return rule;
err_add_rule:
@ -361,6 +391,7 @@ err_mod_hdr:
err_add_vlan:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
mlx5e_detach_encap(priv, flow);
err_attach_encap:
return rule;
}
@ -389,6 +420,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5e_tc_flow *flow;
int err;
@ -404,10 +437,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
mlx5e_rep_queue_neigh_stats_work(priv);
list_for_each_entry(flow, &e->flows, encap) {
flow->esw_attr->encap_id = e->encap_id;
flow->rule = mlx5e_tc_add_fdb_flow(priv,
flow->esw_attr->parse_attr,
flow);
esw_attr = flow->esw_attr;
esw_attr->encap_id = e->encap_id;
flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
@ -421,15 +453,13 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter;
list_for_each_entry(flow, &e->flows, encap) {
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
counter = mlx5_flow_rule_counter(flow->rule);
mlx5_del_flow_rules(flow->rule);
mlx5_fc_destroy(priv->mdev, counter);
mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
}
}
@ -1942,7 +1972,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev, *encap_dev = NULL;
struct net_device *out_dev;
struct mlx5e_priv *out_priv;
out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
@ -1955,17 +1985,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
} else if (encap) {
err = mlx5e_attach_encap(priv, info,
out_dev, &encap_dev, flow);
if (err && err != -EAGAIN)
return err;
parse_attr->mirred_ifindex = ifindex;
parse_attr->tun_info = *info;
attr->parse_attr = parse_attr;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
attr->parse_attr = parse_attr;
/* attr->out_rep is resolved when we handle encap */
} else {
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
@ -2047,7 +2073,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
if (err < 0)
goto err_handle_encap_flow;
goto err_free;
flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
} else {
err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
@ -2058,10 +2084,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule);
goto err_free;
if (err != -EAGAIN)
goto err_free;
}
flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
if (err != -EAGAIN)
flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params);
if (err)
@ -2075,16 +2104,6 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
err_del_rule:
mlx5e_tc_del_flow(priv, flow);
err_handle_encap_flow:
if (err == -EAGAIN) {
err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params);
if (err)
mlx5e_tc_del_flow(priv, flow);
else
return 0;
}
err_free:
kvfree(parse_attr);
kfree(flow);

View file

@ -356,10 +356,11 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
unsigned long flags;
spin_lock(&health->wq_lock);
spin_lock_irqsave(&health->wq_lock, flags);
set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
spin_unlock(&health->wq_lock);
spin_unlock_irqrestore(&health->wq_lock, flags);
cancel_delayed_work_sync(&dev->priv.health.recover_work);
}

View file

@ -677,6 +677,27 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
}
EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
u8 tc, u8 *tc_group)
{
u32 out[MLX5_ST_SZ_DW(qetc_reg)];
void *ets_tcn_conf;
int err;
err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
if (err)
return err;
ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out,
tc_configuration[tc]);
*tc_group = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
group);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
{
u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};

View file

@ -110,6 +110,8 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
*/
if (!switchdev_port_same_parent_id(in_dev, out_dev))
return -EOPNOTSUPP;
if (!nfp_netdev_is_nfp_repr(out_dev))
return -EOPNOTSUPP;
output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
if (!output->port)

View file

@ -74,7 +74,7 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
plat_dat->axi->axi_wr_osr_lmt--;
}
if (of_property_read_u32(np, "read,read-requests",
if (of_property_read_u32(np, "snps,read-requests",
&plat_dat->axi->axi_rd_osr_lmt)) {
/**
* Since the register has a reset value of 1, if property

View file

@ -150,6 +150,13 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
plat->rx_queues_to_use = 1;
plat->tx_queues_to_use = 1;
/* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
* to always set this, otherwise Queue will be classified as AVB
* (because MTL_QUEUE_AVB = 0).
*/
plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
if (!rx_node)
return;

View file

@ -197,8 +197,8 @@ static int ipvtap_init(void)
{
int err;
err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap");
err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap",
THIS_MODULE);
if (err)
goto out1;

View file

@ -204,8 +204,8 @@ static int macvtap_init(void)
{
int err;
err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap");
err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap",
THIS_MODULE);
if (err)
goto out1;

View file

@ -517,6 +517,10 @@ static int tap_open(struct inode *inode, struct file *file)
&tap_proto, 0);
if (!q)
goto err;
if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
sk_free(&q->sk);
goto err;
}
RCU_INIT_POINTER(q->sock.wq, &q->wq);
init_waitqueue_head(&q->wq.wait);
@ -540,22 +544,18 @@ static int tap_open(struct inode *inode, struct file *file)
if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
sock_set_flag(&q->sk, SOCK_ZEROCOPY);
err = -ENOMEM;
if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL))
goto err_array;
err = tap_set_queue(tap, file, q);
if (err)
goto err_queue;
if (err) {
/* tap_sock_destruct() will take care of freeing skb_array */
goto err_put;
}
dev_put(tap->dev);
rtnl_unlock();
return err;
err_queue:
skb_array_cleanup(&q->skb_array);
err_array:
err_put:
sock_put(&q->sk);
err:
if (tap)
@ -1249,8 +1249,8 @@ static int tap_list_add(dev_t major, const char *device_name)
return 0;
}
int tap_create_cdev(struct cdev *tap_cdev,
dev_t *tap_major, const char *device_name)
int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
const char *device_name, struct module *module)
{
int err;
@ -1259,6 +1259,7 @@ int tap_create_cdev(struct cdev *tap_cdev,
goto out1;
cdev_init(tap_cdev, &tap_fops);
tap_cdev->owner = module;
err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
if (err)
goto out2;

View file

@ -1286,6 +1286,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
buflen += SKB_DATA_ALIGN(len + pad);
rcu_read_unlock();
alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
return ERR_PTR(-ENOMEM);
@ -2028,7 +2029,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (!dev)
return -ENOMEM;
err = dev_get_valid_name(net, dev, name);
if (err)
if (err < 0)
goto err_free_dev;
dev_net_set(dev, net);

View file

@ -561,6 +561,7 @@ static const struct driver_info wwan_info = {
#define HP_VENDOR_ID 0x03f0
#define MICROSOFT_VENDOR_ID 0x045e
#define UBLOX_VENDOR_ID 0x1546
#define TPLINK_VENDOR_ID 0x2357
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@ -806,6 +807,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
@ -856,6 +864,12 @@ static const struct usb_device_id products[] = {
USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x81ba, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = (kernel_ulong_t)&wwan_info,
}, {
/* Huawei ME906 and ME909 */
USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x15c1, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&wwan_info,
}, {
/* ZTE modules */
USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, USB_CLASS_COMM,

View file

@ -615,6 +615,7 @@ enum rtl8152_flags {
#define VENDOR_ID_LENOVO 0x17ef
#define VENDOR_ID_LINKSYS 0x13b1
#define VENDOR_ID_NVIDIA 0x0955
#define VENDOR_ID_TPLINK 0x2357
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
@ -5318,6 +5319,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
{REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)},
{}
};

View file

@ -2545,10 +2545,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
nvme_fc_abort_aen_ops(ctrl);
/* wait for all io that had to be aborted */
spin_lock_irqsave(&ctrl->lock, flags);
spin_lock_irq(&ctrl->lock);
wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
ctrl->flags &= ~FCCTRL_TERMIO;
spin_unlock_irqrestore(&ctrl->lock, flags);
spin_unlock_irq(&ctrl->lock);
nvme_fc_term_aen_ops(ctrl);
@ -2734,7 +2734,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
{
struct nvme_fc_ctrl *ctrl;
unsigned long flags;
int ret, idx;
int ret, idx, retry;
if (!(rport->remoteport.port_role &
(FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
@ -2760,6 +2760,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->rport = rport;
ctrl->dev = lport->dev;
ctrl->cnum = idx;
init_waitqueue_head(&ctrl->ioabort_wait);
get_device(ctrl->dev);
kref_init(&ctrl->ref);
@ -2825,9 +2826,37 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
spin_unlock_irqrestore(&rport->lock, flags);
ret = nvme_fc_create_association(ctrl);
/*
* It's possible that transactions used to create the association
* may fail. Examples: CreateAssociation LS or CreateIOConnection
* LS gets dropped/corrupted/fails; or a frame gets dropped or a
* command times out for one of the actions to init the controller
* (Connect, Get/Set_Property, Set_Features, etc). Many of these
* transport errors (frame drop, LS failure) inherently must kill
* the association. The transport is coded so that any command used
* to create the association (prior to a LIVE state transition
* while NEW or RECONNECTING) will fail if it completes in error or
* times out.
*
* As such: as the connect request was mostly likely due to a
* udev event that discovered the remote port, meaning there is
* not an admin or script there to restart if the connect
* request fails, retry the initial connection creation up to
* three times before giving up and declaring failure.
*/
for (retry = 0; retry < 3; retry++) {
ret = nvme_fc_create_association(ctrl);
if (!ret)
break;
}
if (ret) {
/* couldn't schedule retry - fail out */
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
ctrl->ctrl.opts = NULL;
/* initiate nvme ctrl ref counting teardown */
nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);

View file

@ -571,6 +571,12 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
return;
if (nvme_rdma_queue_idx(queue) == 0) {
nvme_rdma_free_qe(queue->device->dev,
&queue->ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
}
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
}
@ -739,8 +745,6 @@ out:
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
nvme_rdma_stop_queue(&ctrl->queues[0]);
if (remove) {
blk_cleanup_queue(ctrl->ctrl.admin_q);
@ -765,8 +769,10 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (new) {
ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
if (IS_ERR(ctrl->ctrl.admin_tagset))
if (IS_ERR(ctrl->ctrl.admin_tagset)) {
error = PTR_ERR(ctrl->ctrl.admin_tagset);
goto out_free_queue;
}
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.admin_q)) {
@ -846,8 +852,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
if (new) {
ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
if (IS_ERR(ctrl->ctrl.tagset))
if (IS_ERR(ctrl->ctrl.tagset)) {
ret = PTR_ERR(ctrl->ctrl.tagset);
goto out_free_io_queues;
}
ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
if (IS_ERR(ctrl->ctrl.connect_q)) {

View file

@ -387,12 +387,21 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
u32 old_sqhd, new_sqhd;
u16 sqhd;
if (status)
nvmet_set_status(req, status);
if (req->sq->size)
req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size;
req->rsp->sq_head = cpu_to_le16(req->sq->sqhd);
if (req->sq->size) {
do {
old_sqhd = req->sq->sqhd;
new_sqhd = (old_sqhd + 1) % req->sq->size;
} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
old_sqhd);
}
sqhd = req->sq->sqhd & 0x0000FFFF;
req->rsp->sq_head = cpu_to_le16(sqhd);
req->rsp->sq_id = cpu_to_le16(req->sq->qid);
req->rsp->command_id = req->cmd->common.command_id;

View file

@ -74,7 +74,7 @@ struct nvmet_sq {
struct percpu_ref ref;
u16 qid;
u16 size;
u16 sqhd;
u32 sqhd;
struct completion free_done;
struct completion confirm_done;
};

View file

@ -534,8 +534,16 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
continue;
irq = irq_find_mapping(gc->irqdomain, irqnr + i);
generic_handle_irq(irq);
/* Clear interrupt */
/* Clear interrupt.
* We must read the pin register again, in case the
* value was changed while executing
* generic_handle_irq() above.
*/
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
regval = readl(regs + i);
writel(regval, regs + i);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
ret = IRQ_HANDLED;
}
}

View file

@ -407,10 +407,10 @@ static int mcp23s08_get(struct gpio_chip *chip, unsigned offset)
ret = mcp_read(mcp, MCP_GPIO, &status);
if (ret < 0)
status = 0;
else
else {
mcp->cached_gpio = status;
status = !!(status & (1 << offset));
mcp->cached_gpio = status;
}
mutex_unlock(&mcp->lock);
return status;

View file

@ -33,6 +33,7 @@
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/spinlock.h>
#include <asm/intel_pmc_ipc.h>
@ -131,6 +132,7 @@ static struct intel_pmc_ipc_dev {
/* gcr */
void __iomem *gcr_mem_base;
bool has_gcr_regs;
spinlock_t gcr_lock;
/* punit */
struct platform_device *punit_dev;
@ -225,17 +227,17 @@ int intel_pmc_gcr_read(u32 offset, u32 *data)
{
int ret;
mutex_lock(&ipclock);
spin_lock(&ipcdev.gcr_lock);
ret = is_gcr_valid(offset);
if (ret < 0) {
mutex_unlock(&ipclock);
spin_unlock(&ipcdev.gcr_lock);
return ret;
}
*data = readl(ipcdev.gcr_mem_base + offset);
mutex_unlock(&ipclock);
spin_unlock(&ipcdev.gcr_lock);
return 0;
}
@ -255,17 +257,17 @@ int intel_pmc_gcr_write(u32 offset, u32 data)
{
int ret;
mutex_lock(&ipclock);
spin_lock(&ipcdev.gcr_lock);
ret = is_gcr_valid(offset);
if (ret < 0) {
mutex_unlock(&ipclock);
spin_unlock(&ipcdev.gcr_lock);
return ret;
}
writel(data, ipcdev.gcr_mem_base + offset);
mutex_unlock(&ipclock);
spin_unlock(&ipcdev.gcr_lock);
return 0;
}
@ -287,7 +289,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
u32 new_val;
int ret = 0;
mutex_lock(&ipclock);
spin_lock(&ipcdev.gcr_lock);
ret = is_gcr_valid(offset);
if (ret < 0)
@ -309,7 +311,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
}
gcr_ipc_unlock:
mutex_unlock(&ipclock);
spin_unlock(&ipcdev.gcr_lock);
return ret;
}
EXPORT_SYMBOL_GPL(intel_pmc_gcr_update);
@ -480,52 +482,41 @@ static irqreturn_t ioc(int irq, void *dev_id)
static int ipc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
resource_size_t pci_resource;
struct intel_pmc_ipc_dev *pmc = &ipcdev;
int ret;
int len;
ipcdev.dev = &pci_dev_get(pdev)->dev;
ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ;
ret = pci_enable_device(pdev);
if (ret)
return ret;
ret = pci_request_regions(pdev, "intel_pmc_ipc");
if (ret)
return ret;
pci_resource = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
if (!pci_resource || !len) {
dev_err(&pdev->dev, "Failed to get resource\n");
return -ENOMEM;
}
init_completion(&ipcdev.cmd_complete);
if (request_irq(pdev->irq, ioc, 0, "intel_pmc_ipc", &ipcdev)) {
dev_err(&pdev->dev, "Failed to request irq\n");
/* Only one PMC is supported */
if (pmc->dev)
return -EBUSY;
pmc->irq_mode = IPC_TRIGGER_MODE_IRQ;
spin_lock_init(&ipcdev.gcr_lock);
ret = pcim_enable_device(pdev);
if (ret)
return ret;
ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
if (ret)
return ret;
init_completion(&pmc->cmd_complete);
pmc->ipc_base = pcim_iomap_table(pdev)[0];
ret = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_pmc_ipc",
pmc);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq\n");
return ret;
}
ipcdev.ipc_base = ioremap_nocache(pci_resource, len);
if (!ipcdev.ipc_base) {
dev_err(&pdev->dev, "Failed to ioremap ipc base\n");
free_irq(pdev->irq, &ipcdev);
ret = -ENOMEM;
}
pmc->dev = &pdev->dev;
return ret;
}
pci_set_drvdata(pdev, pmc);
static void ipc_pci_remove(struct pci_dev *pdev)
{
free_irq(pdev->irq, &ipcdev);
pci_release_regions(pdev);
pci_dev_put(pdev);
iounmap(ipcdev.ipc_base);
ipcdev.dev = NULL;
return 0;
}
static const struct pci_device_id ipc_pci_ids[] = {
@ -540,7 +531,6 @@ static struct pci_driver ipc_pci_driver = {
.name = "intel_pmc_ipc",
.id_table = ipc_pci_ids,
.probe = ipc_pci_probe,
.remove = ipc_pci_remove,
};
static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev,
@ -850,17 +840,12 @@ static int ipc_plat_get_res(struct platform_device *pdev)
return -ENXIO;
}
size = PLAT_RESOURCE_IPC_SIZE + PLAT_RESOURCE_GCR_SIZE;
res->end = res->start + size - 1;
addr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(addr))
return PTR_ERR(addr);
if (!request_mem_region(res->start, size, pdev->name)) {
dev_err(&pdev->dev, "Failed to request ipc resource\n");
return -EBUSY;
}
addr = ioremap_nocache(res->start, size);
if (!addr) {
dev_err(&pdev->dev, "I/O memory remapping failed\n");
release_mem_region(res->start, size);
return -ENOMEM;
}
ipcdev.ipc_base = addr;
ipcdev.gcr_mem_base = addr + PLAT_RESOURCE_GCR_OFFSET;
@ -917,12 +902,12 @@ MODULE_DEVICE_TABLE(acpi, ipc_acpi_ids);
static int ipc_plat_probe(struct platform_device *pdev)
{
struct resource *res;
int ret;
ipcdev.dev = &pdev->dev;
ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ;
init_completion(&ipcdev.cmd_complete);
spin_lock_init(&ipcdev.gcr_lock);
ipcdev.irq = platform_get_irq(pdev, 0);
if (ipcdev.irq < 0) {
@ -939,11 +924,11 @@ static int ipc_plat_probe(struct platform_device *pdev)
ret = ipc_create_pmc_devices();
if (ret) {
dev_err(&pdev->dev, "Failed to create pmc devices\n");
goto err_device;
return ret;
}
if (request_irq(ipcdev.irq, ioc, IRQF_NO_SUSPEND,
"intel_pmc_ipc", &ipcdev)) {
if (devm_request_irq(&pdev->dev, ipcdev.irq, ioc, IRQF_NO_SUSPEND,
"intel_pmc_ipc", &ipcdev)) {
dev_err(&pdev->dev, "Failed to request irq\n");
ret = -EBUSY;
goto err_irq;
@ -960,40 +945,22 @@ static int ipc_plat_probe(struct platform_device *pdev)
return 0;
err_sys:
free_irq(ipcdev.irq, &ipcdev);
devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
err_irq:
platform_device_unregister(ipcdev.tco_dev);
platform_device_unregister(ipcdev.punit_dev);
platform_device_unregister(ipcdev.telemetry_dev);
err_device:
iounmap(ipcdev.ipc_base);
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_IPC_INDEX);
if (res) {
release_mem_region(res->start,
PLAT_RESOURCE_IPC_SIZE +
PLAT_RESOURCE_GCR_SIZE);
}
return ret;
}
static int ipc_plat_remove(struct platform_device *pdev)
{
struct resource *res;
sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group);
free_irq(ipcdev.irq, &ipcdev);
devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
platform_device_unregister(ipcdev.tco_dev);
platform_device_unregister(ipcdev.punit_dev);
platform_device_unregister(ipcdev.telemetry_dev);
iounmap(ipcdev.ipc_base);
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_IPC_INDEX);
if (res) {
release_mem_region(res->start,
PLAT_RESOURCE_IPC_SIZE +
PLAT_RESOURCE_GCR_SIZE);
}
ipcdev.dev = NULL;
return 0;
}

View file

@ -590,7 +590,7 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id)
case AXP803_DCDC3:
return !!(reg & BIT(6));
case AXP803_DCDC6:
return !!(reg & BIT(7));
return !!(reg & BIT(5));
}
break;

View file

@ -29,7 +29,7 @@ static const struct regulator_ops rn5t618_reg_ops = {
};
#define REG(rid, ereg, emask, vreg, vmask, min, max, step) \
[RN5T618_##rid] = { \
{ \
.name = #rid, \
.of_match = of_match_ptr(#rid), \
.regulators_node = of_match_ptr("regulators"), \

View file

@ -357,6 +357,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->next_port_scan = jiffies;
adapter->erp_action.adapter = adapter;
if (zfcp_qdio_setup(adapter))
goto failed;
@ -513,6 +515,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
port->dev.groups = zfcp_port_attr_groups;
port->dev.release = zfcp_port_release;
port->erp_action.adapter = adapter;
port->erp_action.port = port;
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;

View file

@ -193,9 +193,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
&zfcp_sdev->status);
erp_action = &zfcp_sdev->erp_action;
memset(erp_action, 0, sizeof(struct zfcp_erp_action));
erp_action->port = port;
erp_action->sdev = sdev;
WARN_ON_ONCE(erp_action->port != port);
WARN_ON_ONCE(erp_action->sdev != sdev);
if (!(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@ -208,8 +207,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
zfcp_erp_action_dismiss_port(port);
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
erp_action = &port->erp_action;
memset(erp_action, 0, sizeof(struct zfcp_erp_action));
erp_action->port = port;
WARN_ON_ONCE(erp_action->port != port);
WARN_ON_ONCE(erp_action->sdev != NULL);
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
@ -219,7 +218,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
zfcp_erp_action_dismiss_adapter(adapter);
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
erp_action = &adapter->erp_action;
memset(erp_action, 0, sizeof(struct zfcp_erp_action));
WARN_ON_ONCE(erp_action->port != NULL);
WARN_ON_ONCE(erp_action->sdev != NULL);
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@ -229,7 +229,11 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
return NULL;
}
erp_action->adapter = adapter;
WARN_ON_ONCE(erp_action->adapter != adapter);
memset(&erp_action->list, 0, sizeof(erp_action->list));
memset(&erp_action->timer, 0, sizeof(erp_action->timer));
erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
erp_action->fsf_req_id = 0;
erp_action->action = need;
erp_action->status = act_status;

View file

@ -115,10 +115,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
struct zfcp_unit *unit;
int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
zfcp_sdev->erp_action.adapter = adapter;
zfcp_sdev->erp_action.sdev = sdev;
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (!port)
return -ENXIO;
zfcp_sdev->erp_action.port = port;
unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
if (unit)
put_device(&unit->dev);

View file

@ -302,9 +302,11 @@ int aac_send_shutdown(struct aac_dev * dev)
return -ENOMEM;
aac_fib_init(fibctx);
mutex_lock(&dev->ioctl_mutex);
dev->adapter_shutdown = 1;
mutex_unlock(&dev->ioctl_mutex);
if (!dev->adapter_shutdown) {
mutex_lock(&dev->ioctl_mutex);
dev->adapter_shutdown = 1;
mutex_unlock(&dev->ioctl_mutex);
}
cmd = (struct aac_close *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_CloseAll);

View file

@ -1551,8 +1551,9 @@ static void __aac_shutdown(struct aac_dev * aac)
{
int i;
mutex_lock(&aac->ioctl_mutex);
aac->adapter_shutdown = 1;
aac_send_shutdown(aac);
mutex_unlock(&aac->ioctl_mutex);
if (aac->aif_thread) {
int i;
@ -1565,7 +1566,11 @@ static void __aac_shutdown(struct aac_dev * aac)
}
kthread_stop(aac->thread);
}
aac_send_shutdown(aac);
aac_adapter_disable_int(aac);
if (aac_is_src(aac)) {
if (aac->max_msix > 1) {
for (i = 0; i < aac->max_msix; i++) {

View file

@ -4091,7 +4091,7 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h,
memset(id_ctlr, 0, sizeof(*id_ctlr));
rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
if (!rc)
if (id_ctlr->configured_logical_drive_count < 256)
if (id_ctlr->configured_logical_drive_count < 255)
*nlocals = id_ctlr->configured_logical_drive_count;
else
*nlocals = le16_to_cpu(

View file

@ -3061,6 +3061,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
@ -3175,8 +3177,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
if (ha->mqenable) {
bool mq = false;
bool startit = false;

View file

@ -1379,8 +1379,6 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
ret = scsi_setup_cmnd(sdev, req);
out:
if (ret != BLKPREP_OK)
cmd->flags &= ~SCMD_INITIALIZED;
return scsi_prep_return(q, req, ret);
}
@ -1900,7 +1898,6 @@ static int scsi_mq_prep_fn(struct request *req)
struct scsi_device *sdev = req->q->queuedata;
struct Scsi_Host *shost = sdev->host;
struct scatterlist *sg;
int ret;
scsi_init_command(sdev, cmd);
@ -1934,10 +1931,7 @@ static int scsi_mq_prep_fn(struct request *req)
blk_mq_start_request(req);
ret = scsi_setup_cmnd(sdev, req);
if (ret != BLK_STS_OK)
cmd->flags &= ~SCMD_INITIALIZED;
return ret;
return scsi_setup_cmnd(sdev, req);
}
static void scsi_mq_done(struct scsi_cmnd *cmd)

View file

@ -837,7 +837,7 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
val = 0;
list_for_each_entry(srp, &sfp->rq_list, entry) {
if (val > SG_MAX_QUEUE)
if (val >= SG_MAX_QUEUE)
break;
rinfo[val].req_state = srp->done + 1;
rinfo[val].problem =

Some files were not shown because too many files have changed in this diff Show more