diff --git a/Documentation/devicetree/bindings/arm/tegra.yaml b/Documentation/devicetree/bindings/arm/tegra.yaml index b962fa6d649c3..d79d36ac0c44a 100644 --- a/Documentation/devicetree/bindings/arm/tegra.yaml +++ b/Documentation/devicetree/bindings/arm/tegra.yaml @@ -54,7 +54,7 @@ properties: - const: toradex,apalis_t30 - const: nvidia,tegra30 - items: - - const: toradex,apalis_t30-eval-v1.1 + - const: toradex,apalis_t30-v1.1-eval - const: toradex,apalis_t30-eval - const: toradex,apalis_t30-v1.1 - const: toradex,apalis_t30 diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt index fbb59c9ddda65..78044c340e208 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt @@ -9,7 +9,7 @@ function block. All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node. For a description of the MMSYS_CONFIG binding, see -Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt. +Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml. DISP function blocks ==================== diff --git a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml index 7f2578d48e3f0..9eb4bb529ad5c 100644 --- a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml +++ b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml @@ -19,7 +19,9 @@ properties: - const: allwinner,sun8i-v3s-emac - const: allwinner,sun50i-a64-emac - items: - - const: allwinner,sun50i-h6-emac + - enum: + - allwinner,sun20i-d1-emac + - allwinner,sun50i-h6-emac - const: allwinner,sun50i-a64-emac reg: diff --git a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml new file mode 100644 index 0000000000000..b9ca8ef4f2be5 --- /dev/null +++ b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml @@ -0,0 +1,89 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/ufs/samsung,exynos-ufs.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Samsung SoC series UFS host controller Device Tree Bindings + +maintainers: + - Alim Akhtar + +description: | + Each Samsung UFS host controller instance should have its own node. + This binding define Samsung specific binding other then what is used + in the common ufshcd bindings + [1] Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt + +properties: + + compatible: + enum: + - samsung,exynos7-ufs + + reg: + items: + - description: HCI register + - description: vendor specific register + - description: unipro register + - description: UFS protector register + + reg-names: + items: + - const: hci + - const: vs_hci + - const: unipro + - const: ufsp + + clocks: + items: + - description: ufs link core clock + - description: unipro main clock + + clock-names: + items: + - const: core_clk + - const: sclk_unipro_main + + interrupts: + maxItems: 1 + + phys: + maxItems: 1 + + phy-names: + const: ufs-phy + +required: + - compatible + - reg + - interrupts + - phys + - phy-names + - clocks + - clock-names + +additionalProperties: false + +examples: + - | + #include + #include + + ufs: ufs@15570000 { + compatible = "samsung,exynos7-ufs"; + reg = <0x15570000 0x100>, + <0x15570100 0x100>, + <0x15571000 0x200>, + <0x15572000 0x300>; + reg-names = "hci", "vs_hci", "unipro", "ufsp"; + interrupts = ; + clocks = <&clock_fsys1 ACLK_UFS20_LINK>, + <&clock_fsys1 SCLK_UFSUNIPRO20_USER>; + clock-names = "core_clk", "sclk_unipro_main"; + pinctrl-names = "default"; + pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>; + phys = <&ufs_phy>; + phy-names = "ufs-phy"; + }; +... diff --git a/Documentation/networking/dsa/sja1105.rst b/Documentation/networking/dsa/sja1105.rst index 564caeebe2b26..29b1bae0cf000 100644 --- a/Documentation/networking/dsa/sja1105.rst +++ b/Documentation/networking/dsa/sja1105.rst @@ -296,7 +296,7 @@ not available. Device Tree bindings and board design ===================================== -This section references ``Documentation/devicetree/bindings/net/dsa/sja1105.txt`` +This section references ``Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml`` and aims to showcase some potential switch caveats. RMII PHY role and out-of-band signaling diff --git a/MAINTAINERS b/MAINTAINERS index eeb4c70b3d5b5..ca6d6fde85cf8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14342,7 +14342,8 @@ F: Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml F: drivers/pci/controller/pci-ixp4xx.c PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD) -M: Jonathan Derrick +M: Nirmal Patel +R: Jonathan Derrick L: linux-pci@vger.kernel.org S: Supported F: drivers/pci/controller/vmd.c diff --git a/Makefile b/Makefile index 34a0afc3a8eb1..5e7c1d8544412 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 15 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Opossums on Parade # *DOCUMENTATION* diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 02e5b673bdade..4e87783c90ad7 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -20,7 +20,7 @@ config ALPHA select NEED_SG_DMA_LENGTH select VIRT_TO_BUS select GENERIC_IRQ_PROBE - select GENERIC_PCI_IOMAP if PCI + select GENERIC_PCI_IOMAP select AUTO_IRQ_AFFINITY if SMP select GENERIC_IRQ_SHOW select ARCH_WANT_IPC_PARSE_VERSION @@ -199,7 +199,6 @@ config ALPHA_EIGER config ALPHA_JENSEN bool "Jensen" - depends on BROKEN select HAVE_EISA help DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one diff --git a/arch/alpha/include/asm/asm-prototypes.h b/arch/alpha/include/asm/asm-prototypes.h index b34cc1f06720f..c8ae46fc2e745 100644 --- a/arch/alpha/include/asm/asm-prototypes.h +++ b/arch/alpha/include/asm/asm-prototypes.h @@ -16,3 +16,4 @@ extern void __divlu(void); extern void __remlu(void); extern void __divqu(void); extern void __remqu(void); +extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, unsigned long , unsigned long); diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index 0fab5ac907758..c9cb554fbe54c 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -60,7 +60,7 @@ extern inline void set_hae(unsigned long new_hae) * Change virtual addresses to physical addresses and vv. */ #ifdef USE_48_BIT_KSEG -static inline unsigned long virt_to_phys(void *address) +static inline unsigned long virt_to_phys(volatile void *address) { return (unsigned long)address - IDENT_ADDR; } @@ -70,7 +70,7 @@ static inline void * phys_to_virt(unsigned long address) return (void *) (address + IDENT_ADDR); } #else -static inline unsigned long virt_to_phys(void *address) +static inline unsigned long virt_to_phys(volatile void *address) { unsigned long phys = (unsigned long)address; @@ -106,7 +106,7 @@ static inline void * phys_to_virt(unsigned long address) extern unsigned long __direct_map_base; extern unsigned long __direct_map_size; -static inline unsigned long __deprecated virt_to_bus(void *address) +static inline unsigned long __deprecated virt_to_bus(volatile void *address) { unsigned long phys = virt_to_phys(address); unsigned long bus = phys + __direct_map_base; diff --git a/arch/alpha/include/asm/jensen.h b/arch/alpha/include/asm/jensen.h index 916895155a889..1c4131453db2c 100644 --- a/arch/alpha/include/asm/jensen.h +++ b/arch/alpha/include/asm/jensen.h @@ -111,18 +111,18 @@ __EXTERN_INLINE void jensen_set_hae(unsigned long addr) * convinced that I need one of the newer machines. */ -static inline unsigned int jensen_local_inb(unsigned long addr) +__EXTERN_INLINE unsigned int jensen_local_inb(unsigned long addr) { return 0xff & *(vuip)((addr << 9) + EISA_VL82C106); } -static inline void jensen_local_outb(u8 b, unsigned long addr) +__EXTERN_INLINE void jensen_local_outb(u8 b, unsigned long addr) { *(vuip)((addr << 9) + EISA_VL82C106) = b; mb(); } -static inline unsigned int jensen_bus_inb(unsigned long addr) +__EXTERN_INLINE unsigned int jensen_bus_inb(unsigned long addr) { long result; @@ -131,7 +131,7 @@ static inline unsigned int jensen_bus_inb(unsigned long addr) return __kernel_extbl(result, addr & 3); } -static inline void jensen_bus_outb(u8 b, unsigned long addr) +__EXTERN_INLINE void jensen_bus_outb(u8 b, unsigned long addr) { jensen_set_hae(0); *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101; diff --git a/arch/alpha/include/asm/setup.h b/arch/alpha/include/asm/setup.h new file mode 100644 index 0000000000000..262aab99e391c --- /dev/null +++ b/arch/alpha/include/asm/setup.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ALPHA_SETUP_H +#define __ALPHA_SETUP_H + +#include + +/* + * We leave one page for the initial stack page, and one page for + * the initial process structure. Also, the console eats 3 MB for + * the initial bootloader (one of which we can reclaim later). + */ +#define BOOT_PCB 0x20000000 +#define BOOT_ADDR 0x20000000 +/* Remove when official MILO sources have ELF support: */ +#define BOOT_SIZE (16*1024) + +#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS +#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */ +#else +#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */ +#endif + +#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS) +#define SWAPPER_PGD KERNEL_START +#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000) +#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000) +#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000) +#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000) + +#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000) + +/* + * This is setup by the secondary bootstrap loader. Because + * the zero page is zeroed out as soon as the vm system is + * initialized, we need to copy things out into a more permanent + * place. + */ +#define PARAM ZERO_PGE +#define COMMAND_LINE ((char *)(absolute_pointer(PARAM + 0x0000))) +#define INITRD_START (*(unsigned long *) (PARAM+0x100)) +#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108)) + +#endif diff --git a/arch/alpha/include/uapi/asm/setup.h b/arch/alpha/include/uapi/asm/setup.h index 13b7ee465b0e5..f881ea5947cbc 100644 --- a/arch/alpha/include/uapi/asm/setup.h +++ b/arch/alpha/include/uapi/asm/setup.h @@ -1,43 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef __ALPHA_SETUP_H -#define __ALPHA_SETUP_H +#ifndef _UAPI__ALPHA_SETUP_H +#define _UAPI__ALPHA_SETUP_H #define COMMAND_LINE_SIZE 256 -/* - * We leave one page for the initial stack page, and one page for - * the initial process structure. Also, the console eats 3 MB for - * the initial bootloader (one of which we can reclaim later). - */ -#define BOOT_PCB 0x20000000 -#define BOOT_ADDR 0x20000000 -/* Remove when official MILO sources have ELF support: */ -#define BOOT_SIZE (16*1024) - -#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS -#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */ -#else -#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */ -#endif - -#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS) -#define SWAPPER_PGD KERNEL_START -#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000) -#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000) -#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000) -#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000) - -#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000) - -/* - * This is setup by the secondary bootstrap loader. Because - * the zero page is zeroed out as soon as the vm system is - * initialized, we need to copy things out into a more permanent - * place. - */ -#define PARAM ZERO_PGE -#define COMMAND_LINE ((char*)(PARAM + 0x0000)) -#define INITRD_START (*(unsigned long *) (PARAM+0x100)) -#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108)) - -#endif +#endif /* _UAPI__ALPHA_SETUP_H */ diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c index e5d870ff225f4..5c9c884281244 100644 --- a/arch/alpha/kernel/sys_jensen.c +++ b/arch/alpha/kernel/sys_jensen.c @@ -7,6 +7,11 @@ * * Code supporting the Jensen. */ +#define __EXTERN_INLINE +#include +#include +#undef __EXTERN_INLINE + #include #include #include @@ -17,11 +22,6 @@ #include -#define __EXTERN_INLINE inline -#include -#include -#undef __EXTERN_INLINE - #include #include #include diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile index 854d5e79979e4..1cc74f7b50efb 100644 --- a/arch/alpha/lib/Makefile +++ b/arch/alpha/lib/Makefile @@ -14,6 +14,7 @@ ev6-$(CONFIG_ALPHA_EV6) := ev6- ev67-$(CONFIG_ALPHA_EV67) := ev67- lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \ + udiv-qrnnd.o \ udelay.o \ $(ev6-y)memset.o \ $(ev6-y)memcpy.o \ diff --git a/arch/alpha/math-emu/qrnnd.S b/arch/alpha/lib/udiv-qrnnd.S similarity index 98% rename from arch/alpha/math-emu/qrnnd.S rename to arch/alpha/lib/udiv-qrnnd.S index d6373ec1bff9e..b887aa5428e55 100644 --- a/arch/alpha/math-emu/qrnnd.S +++ b/arch/alpha/lib/udiv-qrnnd.S @@ -25,6 +25,7 @@ # along with GCC; see the file COPYING. If not, write to the # Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, # MA 02111-1307, USA. +#include .set noreorder .set noat @@ -161,3 +162,4 @@ $Odd: ret $31,($26),1 .end __udiv_qrnnd +EXPORT_SYMBOL(__udiv_qrnnd) diff --git a/arch/alpha/math-emu/Makefile b/arch/alpha/math-emu/Makefile index 6eda0973e1832..3206402a87929 100644 --- a/arch/alpha/math-emu/Makefile +++ b/arch/alpha/math-emu/Makefile @@ -7,4 +7,4 @@ ccflags-y := -w obj-$(CONFIG_MATHEMU) += math-emu.o -math-emu-objs := math.o qrnnd.o +math-emu-objs := math.o diff --git a/arch/alpha/math-emu/math.c b/arch/alpha/math-emu/math.c index f7cef66af88de..4212258f3cfdc 100644 --- a/arch/alpha/math-emu/math.c +++ b/arch/alpha/math-emu/math.c @@ -403,5 +403,3 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask) egress: return si_code; } - -EXPORT_SYMBOL(__udiv_qrnnd); diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 5a294f20e9dec..ff4962750b3d0 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -513,7 +513,7 @@ size_t sve_state_size(struct task_struct const *task) void sve_alloc(struct task_struct *task) { if (task->thread.sve_state) { - memset(task->thread.sve_state, 0, sve_state_size(current)); + memset(task->thread.sve_state, 0, sve_state_size(task)); return; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 19100fe8f7e49..40adb8cdbf5af 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -58,7 +57,7 @@ #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #include -unsigned long __stack_chk_guard __read_mostly; +unsigned long __stack_chk_guard __ro_after_init; EXPORT_SYMBOL(__stack_chk_guard); #endif diff --git a/arch/m68k/include/asm/raw_io.h b/arch/m68k/include/asm/raw_io.h index 911826ea83ce1..80eb2396d01eb 100644 --- a/arch/m68k/include/asm/raw_io.h +++ b/arch/m68k/include/asm/raw_io.h @@ -17,21 +17,21 @@ * two accesses to memory, which may be undesirable for some devices. */ #define in_8(addr) \ - ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; }) + ({ u8 __v = (*(__force volatile u8 *) (unsigned long)(addr)); __v; }) #define in_be16(addr) \ - ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; }) + ({ u16 __v = (*(__force volatile u16 *) (unsigned long)(addr)); __v; }) #define in_be32(addr) \ - ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; }) + ({ u32 __v = (*(__force volatile u32 *) (unsigned long)(addr)); __v; }) #define in_le16(addr) \ - ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; }) + ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (unsigned long)(addr)); __v; }) #define in_le32(addr) \ - ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; }) + ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (unsigned long)(addr)); __v; }) -#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b)) -#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w)) -#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l)) -#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w)) -#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l)) +#define out_8(addr,b) (void)((*(__force volatile u8 *) (unsigned long)(addr)) = (b)) +#define out_be16(addr,w) (void)((*(__force volatile u16 *) (unsigned long)(addr)) = (w)) +#define out_be32(addr,l) (void)((*(__force volatile u32 *) (unsigned long)(addr)) = (l)) +#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (unsigned long)(addr)) = cpu_to_le16(w)) +#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (unsigned long)(addr)) = cpu_to_le32(l)) #define raw_inb in_8 #define raw_inw in_be16 diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c index e1e90c49a4962..dfd6202fd403e 100644 --- a/arch/m68k/mvme147/config.c +++ b/arch/m68k/mvme147/config.c @@ -171,7 +171,6 @@ static int bcd2int (unsigned char b) int mvme147_hwclk(int op, struct rtc_time *t) { -#warning check me! if (!op) { m147_rtc->ctrl = RTC_READ; t->tm_year = bcd2int (m147_rtc->bcd_year); @@ -183,6 +182,9 @@ int mvme147_hwclk(int op, struct rtc_time *t) m147_rtc->ctrl = 0; if (t->tm_year < 70) t->tm_year += 100; + } else { + /* FIXME Setting the time is not yet supported */ + return -EOPNOTSUPP; } return 0; } diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c index b59593c7cfb9d..b4422c2dfbbf4 100644 --- a/arch/m68k/mvme16x/config.c +++ b/arch/m68k/mvme16x/config.c @@ -436,7 +436,6 @@ int bcd2int (unsigned char b) int mvme16x_hwclk(int op, struct rtc_time *t) { -#warning check me! if (!op) { rtc->ctrl = RTC_READ; t->tm_year = bcd2int (rtc->bcd_year); @@ -448,6 +447,9 @@ int mvme16x_hwclk(int op, struct rtc_time *t) rtc->ctrl = 0; if (t->tm_year < 70) t->tm_year += 100; + } else { + /* FIXME Setting the time is not yet supported */ + return -EOPNOTSUPP; } return 0; } diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index d00313d1274e8..0561568f7b489 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -184,7 +184,7 @@ extern int npmem_ranges; #include #include -#define PAGE0 ((struct zeropage *)__PAGE_OFFSET) +#define PAGE0 ((struct zeropage *)absolute_pointer(__PAGE_OFFSET)) /* DEFINITION OF THE ZERO-PAGE (PAG0) */ /* based on work by Jason Eckhardt (jason@equator.com) */ diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c index f03adb1999e77..367f6397bda7a 100644 --- a/arch/parisc/lib/iomap.c +++ b/arch/parisc/lib/iomap.c @@ -513,12 +513,15 @@ void ioport_unmap(void __iomem *addr) } } +#ifdef CONFIG_PCI void pci_iounmap(struct pci_dev *dev, void __iomem * addr) { if (!INDIRECT_ADDR(addr)) { iounmap(addr); } } +EXPORT_SYMBOL(pci_iounmap); +#endif EXPORT_SYMBOL(ioread8); EXPORT_SYMBOL(ioread16); @@ -544,4 +547,3 @@ EXPORT_SYMBOL(iowrite16_rep); EXPORT_SYMBOL(iowrite32_rep); EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_unmap); -EXPORT_SYMBOL(pci_iounmap); diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 6900d0ac24212..089ee3ea55c8a 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -35,7 +35,6 @@ endif BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \ -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ - -include $(srctree)/include/linux/compiler_attributes.h \ $(LINUXINCLUDE) ifdef CONFIG_PPC64_BOOT_WRAPPER @@ -70,6 +69,7 @@ ifeq ($(call cc-option-yn, -fstack-protector),y) BOOTCFLAGS += -fno-stack-protector endif +BOOTCFLAGS += -include $(srctree)/include/linux/compiler_attributes.h BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj) DTC_FLAGS ?= -p 1024 diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c index a73f3f70a6576..de10a26972581 100644 --- a/arch/powerpc/kernel/interrupt.c +++ b/arch/powerpc/kernel/interrupt.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32) @@ -136,6 +137,48 @@ notrace long system_call_exception(long r3, long r4, long r5, */ irq_soft_mask_regs_set_state(regs, IRQS_ENABLED); + /* + * If system call is called with TM active, set _TIF_RESTOREALL to + * prevent RFSCV being used to return to userspace, because POWER9 + * TM implementation has problems with this instruction returning to + * transactional state. Final register values are not relevant because + * the transaction will be aborted upon return anyway. Or in the case + * of unsupported_scv SIGILL fault, the return state does not much + * matter because it's an edge case. + */ + if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && + unlikely(MSR_TM_TRANSACTIONAL(regs->msr))) + current_thread_info()->flags |= _TIF_RESTOREALL; + + /* + * If the system call was made with a transaction active, doom it and + * return without performing the system call. Unless it was an + * unsupported scv vector, in which case it's treated like an illegal + * instruction. + */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) && + !trap_is_unsupported_scv(regs)) { + /* Enable TM in the kernel, and disable EE (for scv) */ + hard_irq_disable(); + mtmsr(mfmsr() | MSR_TM); + + /* tabort, this dooms the transaction, nothing else */ + asm volatile(".long 0x7c00071d | ((%0) << 16)" + :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)); + + /* + * Userspace will never see the return value. Execution will + * resume after the tbegin. of the aborted transaction with the + * checkpointed register state. A context switch could occur + * or signal delivered to the process before resuming the + * doomed transaction context, but that should all be handled + * as expected. + */ + return -ENOSYS; + } +#endif // CONFIG_PPC_TRANSACTIONAL_MEM + local_irq_enable(); if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) { diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index d4212d2ff0b54..ec950b08a8dcc 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -12,7 +12,6 @@ #include #include #include -#include .section ".toc","aw" SYS_CALL_TABLE: @@ -55,12 +54,6 @@ COMPAT_SYS_CALL_TABLE: .globl system_call_vectored_\name system_call_vectored_\name: _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM -BEGIN_FTR_SECTION - extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */ - bne tabort_syscall -END_FTR_SECTION_IFSET(CPU_FTR_TM) -#endif SCV_INTERRUPT_TO_KERNEL mr r10,r1 ld r1,PACAKSAVE(r13) @@ -247,12 +240,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_common_real) .globl system_call_common system_call_common: _ASM_NOKPROBE_SYMBOL(system_call_common) -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM -BEGIN_FTR_SECTION - extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */ - bne tabort_syscall -END_FTR_SECTION_IFSET(CPU_FTR_TM) -#endif mr r10,r1 ld r1,PACAKSAVE(r13) std r10,0(r1) @@ -425,34 +412,6 @@ SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b) RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart) #endif -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM -tabort_syscall: -_ASM_NOKPROBE_SYMBOL(tabort_syscall) - /* Firstly we need to enable TM in the kernel */ - mfmsr r10 - li r9, 1 - rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG - mtmsrd r10, 0 - - /* tabort, this dooms the transaction, nothing else */ - li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT) - TABORT(R9) - - /* - * Return directly to userspace. We have corrupted user register state, - * but userspace will never see that register state. Execution will - * resume after the tbegin of the aborted transaction with the - * checkpointed register state. - */ - li r9, MSR_RI - andc r10, r10, r9 - mtmsrd r10, 1 - mtspr SPRN_SRR0, r11 - mtspr SPRN_SRR1, r12 - RFI_TO_USER - b . /* prevent speculative execution */ -#endif - /* * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not * touched, no exit work created, then this can be used. diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index 47a683cd00d24..fd829f7f25a47 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -249,6 +249,7 @@ void machine_check_queue_event(void) { int index; struct machine_check_event evt; + unsigned long msr; if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) return; @@ -262,8 +263,20 @@ void machine_check_queue_event(void) memcpy(&local_paca->mce_info->mce_event_queue[index], &evt, sizeof(evt)); - /* Queue irq work to process this event later. */ - irq_work_queue(&mce_event_process_work); + /* + * Queue irq work to process this event later. Before + * queuing the work enable translation for non radix LPAR, + * as irq_work_queue may try to access memory outside RMO + * region. + */ + if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) { + msr = mfmsr(); + mtmsr(msr | MSR_IR | MSR_DR); + irq_work_queue(&mce_event_process_work); + mtmsr(msr); + } else { + irq_work_queue(&mce_event_process_work); + } } void mce_common_process_ue(struct pt_regs *regs, diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 75079397c2a56..90484425a1e6b 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2536,7 +2536,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) /* The following code handles the fake_suspend = 1 case */ mflr r0 std r0, PPC_LR_STKOFF(r1) - stdu r1, -PPC_MIN_STKFRM(r1) + stdu r1, -TM_FRAME_SIZE(r1) /* Turn on TM. */ mfmsr r8 @@ -2551,10 +2551,42 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) nop + /* + * It's possible that treclaim. may modify registers, if we have lost + * track of fake-suspend state in the guest due to it using rfscv. + * Save and restore registers in case this occurs. + */ + mfspr r3, SPRN_DSCR + mfspr r4, SPRN_XER + mfspr r5, SPRN_AMR + /* SPRN_TAR would need to be saved here if the kernel ever used it */ + mfcr r12 + SAVE_NVGPRS(r1) + SAVE_GPR(2, r1) + SAVE_GPR(3, r1) + SAVE_GPR(4, r1) + SAVE_GPR(5, r1) + stw r12, 8(r1) + std r1, HSTATE_HOST_R1(r13) + /* We have to treclaim here because that's the only way to do S->N */ li r3, TM_CAUSE_KVM_RESCHED TRECLAIM(R3) + GET_PACA(r13) + ld r1, HSTATE_HOST_R1(r13) + REST_GPR(2, r1) + REST_GPR(3, r1) + REST_GPR(4, r1) + REST_GPR(5, r1) + lwz r12, 8(r1) + REST_NVGPRS(r1) + mtspr SPRN_DSCR, r3 + mtspr SPRN_XER, r4 + mtspr SPRN_AMR, r5 + mtcr r12 + HMT_MEDIUM + /* * We were in fake suspend, so we are not going to save the * register state as the guest checkpointed state (since @@ -2582,7 +2614,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) std r5, VCPU_TFHAR(r9) std r6, VCPU_TFIAR(r9) - addi r1, r1, PPC_MIN_STKFRM + addi r1, r1, TM_FRAME_SIZE ld r0, PPC_LR_STKOFF(r1) mtlr r0 blr diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index 5c1a157a83b89..244a727c6ba49 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -348,9 +348,9 @@ static int xics_host_map(struct irq_domain *domain, unsigned int virq, if (xics_ics->check(xics_ics, hwirq)) return -EINVAL; - /* No chip data for the XICS domain */ + /* Let the ICS be the chip data for the XICS domain. For ICS native */ irq_domain_set_info(domain, virq, hwirq, xics_ics->chip, - NULL, handle_fasteoi_irq, NULL, NULL); + xics_ics, handle_fasteoi_irq, NULL, NULL); return 0; } diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2bd90c51efd34..b86de61b8caa2 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -685,16 +685,6 @@ config STACK_GUARD The minimum size for the stack guard should be 256 for 31 bit and 512 for 64 bit. -config WARN_DYNAMIC_STACK - def_bool n - prompt "Emit compiler warnings for function with dynamic stack usage" - help - This option enables the compiler option -mwarn-dynamicstack. If the - compiler supports this options generates warnings for functions - that dynamically allocate stack space using alloca. - - Say N if you are unsure. - endmenu menu "I/O subsystem" diff --git a/arch/s390/Makefile b/arch/s390/Makefile index a3cf33ad009fc..450b351dfa8ef 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -85,13 +85,6 @@ cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD) endif endif -ifdef CONFIG_WARN_DYNAMIC_STACK - ifneq ($(call cc-option,-mwarn-dynamicstack),) - KBUILD_CFLAGS += -mwarn-dynamicstack - KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack - endif -endif - ifdef CONFIG_EXPOLINE ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),) CC_FLAGS_EXPOLINE := -mindirect-branch=thunk diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 37b6115ed80e8..6aad18ee131d6 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -10,6 +10,7 @@ CONFIG_BPF_JIT=y CONFIG_BPF_JIT_ALWAYS_ON=y CONFIG_BPF_LSM=y CONFIG_PREEMPT=y +CONFIG_SCHED_CORE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -503,6 +504,7 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MICROSOFT is not set +# CONFIG_NET_VENDOR_LITEX is not set # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_MLX4_EN=m CONFIG_MLX5_CORE=m @@ -661,7 +663,6 @@ CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_NFSD_V4_SECURITY_LABEL=y CONFIG_CIFS=m -CONFIG_CIFS_WEAK_PW_HASH=y CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y @@ -720,6 +721,8 @@ CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_CRC32=m CONFIG_CRYPTO_BLAKE2S=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_SHA3=m @@ -774,7 +777,6 @@ CONFIG_RANDOM32_SELFTEST=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=0 CONFIG_DMA_API_DEBUG=y -CONFIG_STRING_SELFTEST=y CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y @@ -853,12 +855,12 @@ CONFIG_FAIL_FUNCTION=y CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y CONFIG_LKDTM=m CONFIG_TEST_MIN_HEAP=y -CONFIG_TEST_SORT=y CONFIG_KPROBES_SANITY_TEST=y CONFIG_RBTREE_TEST=y CONFIG_INTERVAL_TREE_TEST=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y +CONFIG_STRING_SELFTEST=y CONFIG_TEST_BITOPS=m CONFIG_TEST_BPF=m CONFIG_TEST_LIVEPATCH=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 56a1cc85c5d72..f08b161c94463 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -8,6 +8,7 @@ CONFIG_BPF_SYSCALL=y CONFIG_BPF_JIT=y CONFIG_BPF_JIT_ALWAYS_ON=y CONFIG_BPF_LSM=y +CONFIG_SCHED_CORE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -494,6 +495,7 @@ CONFIG_NLMON=m # CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MICROSOFT is not set +# CONFIG_NET_VENDOR_LITEX is not set # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_MLX4_EN=m CONFIG_MLX5_CORE=m @@ -648,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_NFSD_V4_SECURITY_LABEL=y CONFIG_CIFS=m -CONFIG_CIFS_WEAK_PW_HASH=y CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y @@ -708,6 +709,8 @@ CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_CRC32=m CONFIG_CRYPTO_BLAKE2S=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_SHA3=m diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 88419263a89a9..840d8594437d5 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -248,8 +248,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \ ({ \ - /* Branch instruction needs 6 bytes */ \ - int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\ + int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \ _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ @@ -761,10 +760,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT4(0xb9080000, dst_reg, src_reg); break; case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */ - if (!imm) - break; - /* alfi %dst,imm */ - EMIT6_IMM(0xc20b0000, dst_reg, imm); + if (imm != 0) { + /* alfi %dst,imm */ + EMIT6_IMM(0xc20b0000, dst_reg, imm); + } EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */ @@ -786,17 +785,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT4(0xb9090000, dst_reg, src_reg); break; case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */ - if (!imm) - break; - /* alfi %dst,-imm */ - EMIT6_IMM(0xc20b0000, dst_reg, -imm); + if (imm != 0) { + /* alfi %dst,-imm */ + EMIT6_IMM(0xc20b0000, dst_reg, -imm); + } EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */ if (!imm) break; - /* agfi %dst,-imm */ - EMIT6_IMM(0xc2080000, dst_reg, -imm); + if (imm == -0x80000000) { + /* algfi %dst,0x80000000 */ + EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000); + } else { + /* agfi %dst,-imm */ + EMIT6_IMM(0xc2080000, dst_reg, -imm); + } break; /* * BPF_MUL @@ -811,10 +815,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT4(0xb90c0000, dst_reg, src_reg); break; case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */ - if (imm == 1) - break; - /* msfi %r5,imm */ - EMIT6_IMM(0xc2010000, dst_reg, imm); + if (imm != 1) { + /* msfi %r5,imm */ + EMIT6_IMM(0xc2010000, dst_reg, imm); + } EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */ @@ -867,6 +871,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, if (BPF_OP(insn->code) == BPF_MOD) /* lhgi %dst,0 */ EMIT4_IMM(0xa7090000, dst_reg, 0); + else + EMIT_ZERO(dst_reg); break; } /* lhi %w0,0 */ @@ -999,10 +1005,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT4(0xb9820000, dst_reg, src_reg); break; case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */ - if (!imm) - break; - /* xilf %dst,imm */ - EMIT6_IMM(0xc0070000, dst_reg, imm); + if (imm != 0) { + /* xilf %dst,imm */ + EMIT6_IMM(0xc0070000, dst_reg, imm); + } EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */ @@ -1033,10 +1039,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0); break; case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */ - if (imm == 0) - break; - /* sll %dst,imm(%r0) */ - EMIT4_DISP(0x89000000, dst_reg, REG_0, imm); + if (imm != 0) { + /* sll %dst,imm(%r0) */ + EMIT4_DISP(0x89000000, dst_reg, REG_0, imm); + } EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */ @@ -1058,10 +1064,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0); break; case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */ - if (imm == 0) - break; - /* srl %dst,imm(%r0) */ - EMIT4_DISP(0x88000000, dst_reg, REG_0, imm); + if (imm != 0) { + /* srl %dst,imm(%r0) */ + EMIT4_DISP(0x88000000, dst_reg, REG_0, imm); + } EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */ @@ -1083,10 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0); break; case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */ - if (imm == 0) - break; - /* sra %dst,imm(%r0) */ - EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm); + if (imm != 0) { + /* sra %dst,imm(%r0) */ + EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm); + } EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */ diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index ae683aa623ace..c5b35ea129cfa 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -159,7 +159,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, mmap_read_lock(current->mm); ret = -EINVAL; - vma = find_vma(current->mm, mmio_addr); + vma = vma_lookup(current->mm, mmio_addr); if (!vma) goto out_unlock_mmap; if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) @@ -298,7 +298,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, mmap_read_lock(current->mm); ret = -EINVAL; - vma = find_vma(current->mm, mmio_addr); + vma = vma_lookup(current->mm, mmio_addr); if (!vma) goto out_unlock_mmap; if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile index 58592dfa5cb60..c081e7e2d6e7f 100644 --- a/arch/sh/boot/Makefile +++ b/arch/sh/boot/Makefile @@ -80,30 +80,30 @@ $(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE $(call if_changed,lzo) -$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 +$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE $(call if_changed,uimage,bzip2) -$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz +$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE $(call if_changed,uimage,gzip) -$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma +$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE $(call if_changed,uimage,lzma) -$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz +$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE $(call if_changed,uimage,xz) -$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo +$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE $(call if_changed,uimage,lzo) -$(obj)/uImage.bin: $(obj)/vmlinux.bin +$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE $(call if_changed,uimage,none) OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec -$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux +$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux FORCE $(call if_changed,objcopy) OBJCOPYFLAGS_uImage.srec := -I binary -O srec -$(obj)/uImage.srec: $(obj)/uImage +$(obj)/uImage.srec: $(obj)/uImage FORCE $(call if_changed,objcopy) $(obj)/uImage: $(obj)/uImage.$(suffix-y) diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 8e1d72a167594..7ceae24b0ca99 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -356,7 +356,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { - if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size))) + size = PAGE_ALIGN(size); + + if (!sparc_dma_free_resource(cpu_addr, size)) return; dma_make_coherent(dma_addr, size); diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 8e645ddac58e2..30f171b7b00c2 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -39,6 +39,7 @@ struct mdesc_hdr { u32 node_sz; /* node block size */ u32 name_sz; /* name block size */ u32 data_sz; /* data block size */ + char data[]; } __attribute__((aligned(16))); struct mdesc_elem { @@ -612,7 +613,7 @@ EXPORT_SYMBOL(mdesc_get_node_info); static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) { - return (struct mdesc_elem *) (mdesc + 1); + return (struct mdesc_elem *) mdesc->data; } static void *name_block(struct mdesc_hdr *mdesc) diff --git a/arch/sparc/lib/iomap.c b/arch/sparc/lib/iomap.c index c9da9f139694d..f3a8cd491ce0d 100644 --- a/arch/sparc/lib/iomap.c +++ b/arch/sparc/lib/iomap.c @@ -19,8 +19,10 @@ void ioport_unmap(void __iomem *addr) EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_unmap); +#ifdef CONFIG_PCI void pci_iounmap(struct pci_dev *dev, void __iomem * addr) { /* nothing to do */ } EXPORT_SYMBOL(pci_iounmap); +#endif diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 4e001bbbb4256..dad7f85dcdea0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -339,6 +339,11 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK config ARCH_HIBERNATION_POSSIBLE def_bool y +config ARCH_NR_GPIO + int + default 1024 if X86_64 + default 512 + config ARCH_SUSPEND_POSSIBLE def_bool y diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu index e7355f8b51c2b..94834c4b5e5ef 100644 --- a/arch/x86/Makefile_32.cpu +++ b/arch/x86/Makefile_32.cpu @@ -4,6 +4,12 @@ tune = $(call cc-option,-mtune=$(1),$(2)) +ifdef CONFIG_CC_IS_CLANG +align := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0) +else +align := -falign-functions=0 -falign-jumps=0 -falign-loops=0 +endif + cflags-$(CONFIG_M486SX) += -march=i486 cflags-$(CONFIG_M486) += -march=i486 cflags-$(CONFIG_M586) += -march=i586 @@ -19,11 +25,11 @@ cflags-$(CONFIG_MK6) += -march=k6 # They make zero difference whatsosever to performance at this time. cflags-$(CONFIG_MK7) += -march=athlon cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon) -cflags-$(CONFIG_MCRUSOE) += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0 -cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0 +cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align) +cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align) cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586) cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586) -cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) -falign-functions=0 -falign-jumps=0 -falign-loops=0 +cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align) cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686) cflags-$(CONFIG_MVIAC7) += -march=i686 cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2) diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index 90e682a92820d..32a1ad356c183 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c @@ -99,7 +99,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val) /* * IPI implementation on Hyper-V. */ -static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) +static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector, + bool exclude_self) { struct hv_send_ipi_ex **arg; struct hv_send_ipi_ex *ipi_arg; @@ -123,7 +124,10 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) if (!cpumask_equal(mask, cpu_present_mask)) { ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K; - nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); + if (exclude_self) + nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask); + else + nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); } if (nr_bank < 0) goto ipi_mask_ex_done; @@ -138,15 +142,25 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) return hv_result_success(status); } -static bool __send_ipi_mask(const struct cpumask *mask, int vector) +static bool __send_ipi_mask(const struct cpumask *mask, int vector, + bool exclude_self) { - int cur_cpu, vcpu; + int cur_cpu, vcpu, this_cpu = smp_processor_id(); struct hv_send_ipi ipi_arg; u64 status; + unsigned int weight; trace_hyperv_send_ipi_mask(mask, vector); - if (cpumask_empty(mask)) + weight = cpumask_weight(mask); + + /* + * Do nothing if + * 1. the mask is empty + * 2. the mask only contains self when exclude_self is true + */ + if (weight == 0 || + (exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask))) return true; if (!hv_hypercall_pg) @@ -172,6 +186,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector) ipi_arg.cpu_mask = 0; for_each_cpu(cur_cpu, mask) { + if (exclude_self && cur_cpu == this_cpu) + continue; vcpu = hv_cpu_number_to_vp_number(cur_cpu); if (vcpu == VP_INVAL) return false; @@ -191,7 +207,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector) return hv_result_success(status); do_ex_hypercall: - return __send_ipi_mask_ex(mask, vector); + return __send_ipi_mask_ex(mask, vector, exclude_self); } static bool __send_ipi_one(int cpu, int vector) @@ -208,7 +224,7 @@ static bool __send_ipi_one(int cpu, int vector) return false; if (vp >= 64) - return __send_ipi_mask_ex(cpumask_of(cpu), vector); + return __send_ipi_mask_ex(cpumask_of(cpu), vector, false); status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp)); return hv_result_success(status); @@ -222,20 +238,13 @@ static void hv_send_ipi(int cpu, int vector) static void hv_send_ipi_mask(const struct cpumask *mask, int vector) { - if (!__send_ipi_mask(mask, vector)) + if (!__send_ipi_mask(mask, vector, false)) orig_apic.send_IPI_mask(mask, vector); } static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) { - unsigned int this_cpu = smp_processor_id(); - struct cpumask new_mask; - const struct cpumask *local_mask; - - cpumask_copy(&new_mask, mask); - cpumask_clear_cpu(this_cpu, &new_mask); - local_mask = &new_mask; - if (!__send_ipi_mask(local_mask, vector)) + if (!__send_ipi_mask(mask, vector, true)) orig_apic.send_IPI_mask_allbutself(mask, vector); } @@ -246,7 +255,7 @@ static void hv_send_ipi_allbutself(int vector) static void hv_send_ipi_all(int vector) { - if (!__send_ipi_mask(cpu_online_mask, vector)) + if (!__send_ipi_mask(cpu_online_mask, vector, false)) orig_apic.send_IPI_all(vector); } diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 8cb7816d03b4c..193204aee8801 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin static void kill_me_now(struct callback_head *ch) { + struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me); + + p->mce_count = 0; force_sig(SIGBUS); } @@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb) int flags = MF_ACTION_REQUIRED; int ret; + p->mce_count = 0; pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr); if (!p->mce_ripv) @@ -1290,17 +1294,34 @@ static void kill_me_maybe(struct callback_head *cb) } } -static void queue_task_work(struct mce *m, int kill_current_task) +static void queue_task_work(struct mce *m, char *msg, int kill_current_task) { - current->mce_addr = m->addr; - current->mce_kflags = m->kflags; - current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); - current->mce_whole_page = whole_page(m); + int count = ++current->mce_count; - if (kill_current_task) - current->mce_kill_me.func = kill_me_now; - else - current->mce_kill_me.func = kill_me_maybe; + /* First call, save all the details */ + if (count == 1) { + current->mce_addr = m->addr; + current->mce_kflags = m->kflags; + current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); + current->mce_whole_page = whole_page(m); + + if (kill_current_task) + current->mce_kill_me.func = kill_me_now; + else + current->mce_kill_me.func = kill_me_maybe; + } + + /* Ten is likely overkill. Don't expect more than two faults before task_work() */ + if (count > 10) + mce_panic("Too many consecutive machine checks while accessing user data", m, msg); + + /* Second or later call, make sure page address matches the one from first call */ + if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT)) + mce_panic("Consecutive machine checks to different user pages", m, msg); + + /* Do not call task_work_add() more than once */ + if (count > 1) + return; task_work_add(current, ¤t->mce_kill_me, TWA_RESUME); } @@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs) /* If this triggers there is no way to recover. Die hard. */ BUG_ON(!on_thread_stack() || !user_mode(regs)); - queue_task_work(&m, kill_current_task); + queue_task_work(&m, msg, kill_current_task); } else { /* @@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs) } if (m.kflags & MCE_IN_KERNEL_COPYIN) - queue_task_work(&m, kill_current_task); + queue_task_work(&m, msg, kill_current_task); } out: mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 78a32b956e810..5afd985591939 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -135,7 +135,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) static void __init pcpu_fc_free(void *ptr, size_t size) { - memblock_free(__pa(ptr), size); + memblock_free_ptr(ptr, size); } static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index a6e11763763fb..36098226a9573 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1432,18 +1432,18 @@ int kern_addr_valid(unsigned long addr) return 0; p4d = p4d_offset(pgd, addr); - if (p4d_none(*p4d)) + if (!p4d_present(*p4d)) return 0; pud = pud_offset(p4d, addr); - if (pud_none(*pud)) + if (!pud_present(*pud)) return 0; if (pud_large(*pud)) return pfn_valid(pud_pfn(*pud)); pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) + if (!pmd_present(*pmd)) return 0; if (pmd_large(*pmd)) diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 1a50434c8a4da..ef885370719a6 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -49,8 +49,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, p = early_alloc(PMD_SIZE, nid, false); if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) return; - else if (p) - memblock_free(__pa(p), PMD_SIZE); + memblock_free_ptr(p, PMD_SIZE); } p = early_alloc(PAGE_SIZE, nid, true); @@ -86,8 +85,7 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr, p = early_alloc(PUD_SIZE, nid, false); if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) return; - else if (p) - memblock_free(__pa(p), PUD_SIZE); + memblock_free_ptr(p, PUD_SIZE); } p = early_alloc(PAGE_SIZE, nid, true); diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index a1b5c71099e61..1e9b93b088dbf 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -355,7 +355,7 @@ void __init numa_reset_distance(void) /* numa_distance could be 1LU marking allocation failure, test cnt */ if (numa_distance_cnt) - memblock_free(__pa(numa_distance), size); + memblock_free_ptr(numa_distance, size); numa_distance_cnt = 0; numa_distance = NULL; /* enable table creation */ } diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index 737491b13728c..e801e30089c43 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -517,8 +517,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) } /* free the copied physical distance table */ - if (phys_dist) - memblock_free(__pa(phys_dist), phys_size); + memblock_free_ptr(phys_dist, phys_size); return; no_emu: diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index 3112ca7786ed1..4ba2a3ee4bce1 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type, int err = 0; start = sanitize_phys(start); - end = sanitize_phys(end); + + /* + * The end address passed into this function is exclusive, but + * sanitize_phys() expects an inclusive address. + */ + end = sanitize_phys(end - 1) + 1; if (start >= end) { WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, start, end - 1, cattr_name(req_type)); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 753f63734c134..349f780a15674 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1214,6 +1214,11 @@ static void __init xen_dom0_set_legacy_features(void) x86_platform.legacy.rtc = 1; } +static void __init xen_domu_set_legacy_features(void) +{ + x86_platform.legacy.rtc = 0; +} + /* First C function to be called on Xen boot */ asmlinkage __visible void __init xen_start_kernel(void) { @@ -1359,6 +1364,8 @@ asmlinkage __visible void __init xen_start_kernel(void) add_preferred_console("xenboot", 0, NULL); if (pci_xen) x86_init.pci.arch_init = pci_xen_init; + x86_platform.set_legacy_features = + xen_domu_set_legacy_features; } else { const struct dom0_vga_console_info *info = (void *)((char *)xen_start_info + diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 1df5f01529e59..8d751939c6f30 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -1518,14 +1518,17 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, if (pinned) { struct page *page = pfn_to_page(pfn); - if (static_branch_likely(&xen_struct_pages_ready)) + pinned = false; + if (static_branch_likely(&xen_struct_pages_ready)) { + pinned = PagePinned(page); SetPagePinned(page); + } xen_mc_batch(); __set_pfn_prot(pfn, PAGE_KERNEL_RO); - if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) + if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned) __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); xen_mc_issue(PARAVIRT_LAZY_MMU); diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 3c88a79a319bb..38b9f7684952a 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1182,10 +1182,6 @@ int blkcg_init_queue(struct request_queue *q) if (preloaded) radix_tree_preload_end(); - ret = blk_iolatency_init(q); - if (ret) - goto err_destroy_all; - ret = blk_ioprio_init(q); if (ret) goto err_destroy_all; @@ -1194,6 +1190,12 @@ int blkcg_init_queue(struct request_queue *q) if (ret) goto err_destroy_all; + ret = blk_iolatency_init(q); + if (ret) { + blk_throtl_exit(q); + goto err_destroy_all; + } + return 0; err_destroy_all: @@ -1364,10 +1366,14 @@ int blkcg_activate_policy(struct request_queue *q, /* alloc failed, nothing's initialized yet, free everything */ spin_lock_irq(&q->queue_lock); list_for_each_entry(blkg, &q->blkg_list, q_node) { + struct blkcg *blkcg = blkg->blkcg; + + spin_lock(&blkcg->lock); if (blkg->pd[pol->plid]) { pol->pd_free_fn(blkg->pd[pol->plid]); blkg->pd[pol->plid] = NULL; } + spin_unlock(&blkcg->lock); } spin_unlock_irq(&q->queue_lock); ret = -ENOMEM; @@ -1399,12 +1405,16 @@ void blkcg_deactivate_policy(struct request_queue *q, __clear_bit(pol->plid, q->blkcg_pols); list_for_each_entry(blkg, &q->blkg_list, q_node) { + struct blkcg *blkcg = blkg->blkcg; + + spin_lock(&blkcg->lock); if (blkg->pd[pol->plid]) { if (pol->pd_offline_fn) pol->pd_offline_fn(blkg->pd[pol->plid]); pol->pd_free_fn(blkg->pd[pol->plid]); blkg->pd[pol->plid] = NULL; } + spin_unlock(&blkcg->lock); } spin_unlock_irq(&q->queue_lock); diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 69a12177dfb62..16d5d5338392a 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -426,8 +426,15 @@ EXPORT_SYMBOL(blk_integrity_register); */ void blk_integrity_unregister(struct gendisk *disk) { + struct blk_integrity *bi = &disk->queue->integrity; + + if (!bi->profile) + return; + + /* ensure all bios are off the integrity workqueue */ + blk_flush_integrity(); blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue); - memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity)); + memset(bi, 0, sizeof(*bi)); } EXPORT_SYMBOL(blk_integrity_unregister); diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 86f87346232a6..ff5caeb825429 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -208,7 +208,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags, spin_lock_irqsave(&tags->lock, flags); rq = tags->rqs[bitnr]; - if (!rq || !refcount_inc_not_zero(&rq->ref)) + if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref)) rq = NULL; spin_unlock_irqrestore(&tags->lock, flags); return rq; diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c index 46c503486e962..00fb4120a5b3a 100644 --- a/drivers/base/arch_numa.c +++ b/drivers/base/arch_numa.c @@ -264,7 +264,7 @@ void __init numa_free_distance(void) size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); - memblock_free(__pa(numa_distance), size); + memblock_free_ptr(numa_distance, size); numa_distance_cnt = 0; numa_distance = NULL; } diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index a97f33d0c59f9..94665037f4a35 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -13,6 +13,7 @@ #include #include #include +#include #include @@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user) const char *file = *(const char **)(tracedata + 2); unsigned int user_hash_value, file_hash_value; + if (!x86_platform.legacy.rtc) + return; + user_hash_value = user % USERHASH; file_hash_value = hash_string(lineno, file, FILEHASH); set_magic_time(user_hash_value, file_hash_value, dev_hash_value); @@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = { static int __init early_resume_init(void) { + if (!x86_platform.legacy.rtc) + return 0; + hash_value_early_read = read_magic_time(); register_pm_notifier(&pm_trace_nb); return 0; @@ -277,6 +284,9 @@ static int __init late_resume_init(void) unsigned int val = hash_value_early_read; unsigned int user, file, dev; + if (!x86_platform.legacy.rtc) + return 0; + user = val % USERHASH; val = val / USERHASH; file = val % FILEHASH; diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c index 66b05a326910e..a6f365b9cc1ad 100644 --- a/drivers/cpufreq/cpufreq_governor_attr_set.c +++ b/drivers/cpufreq/cpufreq_governor_attr_set.c @@ -74,8 +74,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l if (count) return count; - kobject_put(&attr_set->kobj); mutex_destroy(&attr_set->update_lock); + kobject_put(&attr_set->kobj); return 0; } EXPORT_SYMBOL_GPL(gov_attr_set_put); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 1097f826ad70e..8c176b7dae415 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -3205,11 +3205,15 @@ static int __init intel_pstate_init(void) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return -ENODEV; - if (no_load) - return -ENODEV; - id = x86_match_cpu(hwp_support_ids); if (id) { + bool hwp_forced = intel_pstate_hwp_is_enabled(); + + if (hwp_forced) + pr_info("HWP enabled by BIOS\n"); + else if (no_load) + return -ENODEV; + copy_cpu_funcs(&core_funcs); /* * Avoid enabling HWP for processors without EPP support, @@ -3219,8 +3223,7 @@ static int __init intel_pstate_init(void) * If HWP is enabled already, though, there is no choice but to * deal with it. */ - if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || - intel_pstate_hwp_is_enabled()) { + if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { hwp_active++; hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; @@ -3235,7 +3238,11 @@ static int __init intel_pstate_init(void) goto hwp_cpu_matched; } + pr_info("HWP not enabled\n"); } else { + if (no_load) + return -ENODEV; + id = x86_match_cpu(intel_pstate_cpu_ids); if (!id) { pr_info("CPU model not supported\n"); @@ -3314,10 +3321,9 @@ static int __init intel_pstate_setup(char *str) else if (!strcmp(str, "passive")) default_driver = &intel_cpufreq; - if (!strcmp(str, "no_hwp")) { - pr_info("HWP disabled\n"); + if (!strcmp(str, "no_hwp")) no_hwp = 1; - } + if (!strcmp(str, "force")) force_load = 1; if (!strcmp(str, "hwp_only")) diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index 284b6bd040b1a..d295f405c4bb0 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -451,7 +451,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy) static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy) { struct device *cpu_dev; - int cur_cluster = cpu_to_cluster(policy->cpu); cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index dc3c6b3a00e53..d356e329e6f89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -758,7 +758,7 @@ enum amd_hw_ip_block_type { MAX_HWIP }; -#define HWIP_MAX_INSTANCE 8 +#define HWIP_MAX_INSTANCE 10 struct amd_powerplay { void *pp_handle; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 3003ee1c94873..1d41c2c00623b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -192,6 +192,16 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm) kgd2kfd_suspend(adev->kfd.dev, run_pm); } +int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev) +{ + int r = 0; + + if (adev->kfd.dev) + r = kgd2kfd_resume_iommu(adev->kfd.dev); + + return r; +} + int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) { int r = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index ec028cf963f5a..3bc52b2c604fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -137,6 +137,7 @@ int amdgpu_amdkfd_init(void); void amdgpu_amdkfd_fini(void); void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); +int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev); int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, const void *ih_ring_entry); @@ -327,6 +328,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, const struct kgd2kfd_shared_resources *gpu_resources); void kgd2kfd_device_exit(struct kfd_dev *kfd); void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); +int kgd2kfd_resume_iommu(struct kfd_dev *kfd); int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); int kgd2kfd_pre_reset(struct kfd_dev *kfd); int kgd2kfd_post_reset(struct kfd_dev *kfd); @@ -365,6 +367,11 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { } +static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd) +{ + return 0; +} + static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) { return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 277128846dd12..463b9c0283f7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1544,20 +1544,18 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) struct dentry *ent; int r, i; - - ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, &fops_ib_preempt); - if (!ent) { + if (IS_ERR(ent)) { DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); - return -EIO; + return PTR_ERR(ent); } ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev, &fops_sclk_set); - if (!ent) { + if (IS_ERR(ent)) { DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); - return -EIO; + return PTR_ERR(ent); } /* Register debugfs entries for amdgpu_ttm */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 41c6b3aacd379..ab3794c42d363 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2394,6 +2394,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (r) goto init_failed; + r = amdgpu_amdkfd_resume_iommu(adev); + if (r) + goto init_failed; + r = amdgpu_device_ip_hw_init_phase1(adev); if (r) goto init_failed; @@ -3148,6 +3152,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) { int r; + r = amdgpu_amdkfd_resume_iommu(adev); + if (r) + return r; + r = amdgpu_device_ip_resume_phase1(adev); if (r) return r; @@ -4601,6 +4609,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, dev_warn(tmp_adev->dev, "asic atom init failed!"); } else { dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); + r = amdgpu_amdkfd_resume_iommu(tmp_adev); + if (r) + goto out; + r = amdgpu_device_ip_resume_phase1(tmp_adev); if (r) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index c7797eac83c3a..9ff600a38559d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -598,7 +598,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) break; default: adev->gmc.tmz_enabled = false; - dev_warn(adev->dev, + dev_info(adev->dev, "Trusted Memory Zone (TMZ) feature not supported\n"); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index dc44c946a2442..98732518543e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -757,7 +757,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, return res; } -inline uint32_t amdgpu_ras_eeprom_max_record_count(void) +uint32_t amdgpu_ras_eeprom_max_record_count(void) { return RAS_MAX_RECORD_COUNT; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index f95fc61b30219..6bb00578bfbbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -120,7 +120,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control, struct eeprom_table_record *records, const u32 num); -inline uint32_t amdgpu_ras_eeprom_max_record_count(void); +uint32_t amdgpu_ras_eeprom_max_record_count(void); void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 7b634a1517f9c..0554576d36955 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -428,8 +428,8 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, ent = debugfs_create_file(name, S_IFREG | S_IRUGO, root, ring, &amdgpu_debugfs_ring_fops); - if (!ent) - return -ENOMEM; + if (IS_ERR(ent)) + return PTR_ERR(ent); i_size_write(ent->d_inode, ring->ring_size + 12); ring->ent = ent; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 38dade421d465..94126dc396888 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -515,6 +515,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, goto out; } + if (bo->type == ttm_bo_type_device && + new_mem->mem_type == TTM_PL_VRAM && + old_mem->mem_type != TTM_PL_VRAM) { + /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU + * accesses the BO after it's moved. + */ + abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + } + if (adev->mman.buffer_funcs_enabled) { if (((old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_VRAM) || @@ -545,15 +554,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, return r; } - if (bo->type == ttm_bo_type_device && - new_mem->mem_type == TTM_PL_VRAM && - old_mem->mem_type != TTM_PL_VRAM) { - /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU - * accesses the BO after it's moved. - */ - abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - } - out: /* update statistics */ atomic64_add(bo->base.size, &adev->num_bytes_moved); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 16a57b70cc1a9..98d1b3ab3a464 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -468,6 +468,7 @@ static const struct kfd_device_info navi10_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, + .no_atomic_fw_version = 145, .num_sdma_engines = 2, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, @@ -487,6 +488,7 @@ static const struct kfd_device_info navi12_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, + .no_atomic_fw_version = 145, .num_sdma_engines = 2, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, @@ -506,6 +508,7 @@ static const struct kfd_device_info navi14_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, + .no_atomic_fw_version = 145, .num_sdma_engines = 2, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, @@ -525,6 +528,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, + .no_atomic_fw_version = 92, .num_sdma_engines = 4, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, @@ -544,6 +548,7 @@ static const struct kfd_device_info navy_flounder_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, + .no_atomic_fw_version = 92, .num_sdma_engines = 2, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, @@ -562,7 +567,8 @@ static const struct kfd_device_info vangogh_device_info = { .mqd_size_aligned = MQD_SIZE_ALIGNED, .needs_iommu_device = false, .supports_cwsr = true, - .needs_pci_atomics = false, + .needs_pci_atomics = true, + .no_atomic_fw_version = 92, .num_sdma_engines = 1, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, @@ -582,6 +588,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, + .no_atomic_fw_version = 92, .num_sdma_engines = 2, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, @@ -601,6 +608,7 @@ static const struct kfd_device_info beige_goby_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, + .no_atomic_fw_version = 92, .num_sdma_engines = 1, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, @@ -619,7 +627,8 @@ static const struct kfd_device_info yellow_carp_device_info = { .mqd_size_aligned = MQD_SIZE_ALIGNED, .needs_iommu_device = false, .supports_cwsr = true, - .needs_pci_atomics = false, + .needs_pci_atomics = true, + .no_atomic_fw_version = 92, .num_sdma_engines = 1, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, @@ -708,20 +717,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, if (!kfd) return NULL; - /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. - * 32 and 64-bit requests are possible and must be - * supported. - */ - kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd); - if (device_info->needs_pci_atomics && - !kfd->pci_atomic_requested) { - dev_info(kfd_device, - "skipped device %x:%x, PCI rejects atomics\n", - pdev->vendor, pdev->device); - kfree(kfd); - return NULL; - } - kfd->kgd = kgd; kfd->device_info = device_info; kfd->pdev = pdev; @@ -821,6 +816,23 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd - kfd->vm_info.first_vmid_kfd + 1; + /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. + * 32 and 64-bit requests are possible and must be + * supported. + */ + kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd); + if (!kfd->pci_atomic_requested && + kfd->device_info->needs_pci_atomics && + (!kfd->device_info->no_atomic_fw_version || + kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) { + dev_info(kfd_device, + "skipped device %x:%x, PCI rejects atomics %d<%d\n", + kfd->pdev->vendor, kfd->pdev->device, + kfd->mec_fw_version, + kfd->device_info->no_atomic_fw_version); + return false; + } + /* Verify module parameters regarding mapped process number*/ if ((hws_max_conc_proc < 0) || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) { @@ -1057,17 +1069,21 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) return ret; } -static int kfd_resume(struct kfd_dev *kfd) +int kgd2kfd_resume_iommu(struct kfd_dev *kfd) { int err = 0; err = kfd_iommu_resume(kfd); - if (err) { + if (err) dev_err(kfd_device, "Failed to resume IOMMU for device %x:%x\n", kfd->pdev->vendor, kfd->pdev->device); - return err; - } + return err; +} + +static int kfd_resume(struct kfd_dev *kfd) +{ + int err = 0; err = kfd->dqm->ops.start(kfd->dqm); if (err) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index ab83b0de6b22d..6d8f9bb2d9057 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -207,6 +207,7 @@ struct kfd_device_info { bool supports_cwsr; bool needs_iommu_device; bool needs_pci_atomics; + uint32_t no_atomic_fw_version; unsigned int num_sdma_engines; unsigned int num_xgmi_sdma_engines; unsigned int num_sdma_queues_per_engine; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 9b1fc54555ee6..66c799f5c7cf7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -998,6 +998,8 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ uint32_t agp_base, agp_bot, agp_top; PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; + memset(pa_config, 0, sizeof(*pa_config)); + logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); @@ -6024,21 +6026,23 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) return 0; #if defined(CONFIG_DRM_AMD_DC_DCN) - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) - return -ENOMEM; + if (dm->vblank_control_workqueue) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return -ENOMEM; - INIT_WORK(&work->work, vblank_control_worker); - work->dm = dm; - work->acrtc = acrtc; - work->enable = enable; + INIT_WORK(&work->work, vblank_control_worker); + work->dm = dm; + work->acrtc = acrtc; + work->enable = enable; - if (acrtc_state->stream) { - dc_stream_retain(acrtc_state->stream); - work->stream = acrtc_state->stream; - } + if (acrtc_state->stream) { + dc_stream_retain(acrtc_state->stream); + work->stream = acrtc_state->stream; + } - queue_work(dm->vblank_control_workqueue, &work->work); + queue_work(dm->vblank_control_workqueue, &work->work); + } #endif return 0; @@ -6792,14 +6796,15 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { #if defined(CONFIG_DRM_AMD_DC_DCN) static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, - struct dc_state *dc_state) + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars) { struct dc_stream_state *stream = NULL; struct drm_connector *connector; struct drm_connector_state *new_con_state; struct amdgpu_dm_connector *aconnector; struct dm_connector_state *dm_conn_state; - int i, j, clock, bpp; + int i, j, clock; int vcpi, pbn_div, pbn = 0; for_each_new_connector_in_state(state, connector, new_con_state, i) { @@ -6838,9 +6843,15 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, } pbn_div = dm_mst_get_pbn_divider(stream->link); - bpp = stream->timing.dsc_cfg.bits_per_pixel; clock = stream->timing.pix_clk_100hz / 10; - pbn = drm_dp_calc_pbn_mode(clock, bpp, true); + /* pbn is calculated by compute_mst_dsc_configs_for_state*/ + for (j = 0; j < dc_state->stream_count; j++) { + if (vars[j].aconnector == aconnector) { + pbn = vars[j].pbn; + break; + } + } + vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, pbn_div, @@ -7519,6 +7530,32 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, } } +static void amdgpu_set_panel_orientation(struct drm_connector *connector) +{ + struct drm_encoder *encoder; + struct amdgpu_encoder *amdgpu_encoder; + const struct drm_display_mode *native_mode; + + if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && + connector->connector_type != DRM_MODE_CONNECTOR_LVDS) + return; + + encoder = amdgpu_dm_connector_to_encoder(connector); + if (!encoder) + return; + + amdgpu_encoder = to_amdgpu_encoder(encoder); + + native_mode = &amdgpu_encoder->native_mode; + if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) + return; + + drm_connector_set_panel_orientation_with_quirk(connector, + DRM_MODE_PANEL_ORIENTATION_UNKNOWN, + native_mode->hdisplay, + native_mode->vdisplay); +} + static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, struct edid *edid) { @@ -7547,6 +7584,8 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, * restored here. */ amdgpu_dm_update_freesync_caps(connector, edid); + + amdgpu_set_panel_orientation(connector); } else { amdgpu_dm_connector->num_modes = 0; } @@ -8058,8 +8097,26 @@ static bool is_content_protection_different(struct drm_connector_state *state, state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; - /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled - * hot-plug, headless s3, dpms + /* Stream removed and re-enabled + * + * Can sometimes overlap with the HPD case, + * thus set update_hdcp to false to avoid + * setting HDCP multiple times. + * + * Handles: DESIRED -> DESIRED (Special case) + */ + if (!(old_state->crtc && old_state->crtc->enabled) && + state->crtc && state->crtc->enabled && + connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { + dm_con_state->update_hdcp = false; + return true; + } + + /* Hot-plug, headless s3, dpms + * + * Only start HDCP if the display is connected/enabled. + * update_hdcp flag will be set to false until the next + * HPD comes in. * * Handles: DESIRED -> DESIRED (Special case) */ @@ -8648,7 +8705,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * If PSR or idle optimizations are enabled then flush out * any pending work before hardware programming. */ - flush_workqueue(dm->vblank_control_workqueue); + if (dm->vblank_control_workqueue) + flush_workqueue(dm->vblank_control_workqueue); #endif bundle->stream_update.stream = acrtc_state->stream; @@ -8983,7 +9041,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) /* if there mode set or reset, disable eDP PSR */ if (mode_set_reset_required) { #if defined(CONFIG_DRM_AMD_DC_DCN) - flush_workqueue(dm->vblank_control_workqueue); + if (dm->vblank_control_workqueue) + flush_workqueue(dm->vblank_control_workqueue); #endif amdgpu_dm_psr_disable_all(dm); } @@ -10243,6 +10302,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, int ret, i; bool lock_and_validation_needed = false; struct dm_crtc_state *dm_old_crtc_state; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct dsc_mst_fairness_vars vars[MAX_PIPES]; +#endif trace_amdgpu_dm_atomic_check_begin(state); @@ -10473,10 +10535,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; #if defined(CONFIG_DRM_AMD_DC_DCN) - if (!compute_mst_dsc_configs_for_state(state, dm_state->context)) + if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) goto fail; - ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context); + ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); if (ret) goto fail; #endif @@ -10492,7 +10554,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; status = dc_validate_global_state(dc, dm_state->context, false); if (status != DC_OK) { - DC_LOG_WARNING("DC global validation failure: %s (%d)", + drm_dbg_atomic(dev, + "DC global validation failure: %s (%d)", dc_status_to_str(status), status); ret = -EINVAL; goto fail; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 1bcba6943fd7f..7af0d58c231b6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -518,12 +518,7 @@ struct dsc_mst_fairness_params { uint32_t num_slices_h; uint32_t num_slices_v; uint32_t bpp_overwrite; -}; - -struct dsc_mst_fairness_vars { - int pbn; - bool dsc_enabled; - int bpp_x16; + struct amdgpu_dm_connector *aconnector; }; static int kbps_to_peak_pbn(int kbps) @@ -750,12 +745,12 @@ static void try_disable_dsc(struct drm_atomic_state *state, static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, struct dc_state *dc_state, - struct dc_link *dc_link) + struct dc_link *dc_link, + struct dsc_mst_fairness_vars *vars) { int i; struct dc_stream_state *stream; struct dsc_mst_fairness_params params[MAX_PIPES]; - struct dsc_mst_fairness_vars vars[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; int count = 0; bool debugfs_overwrite = false; @@ -776,6 +771,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, params[count].timing = &stream->timing; params[count].sink = stream->sink; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + params[count].aconnector = aconnector; params[count].port = aconnector->port; params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE) @@ -798,6 +794,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, } /* Try no compression */ for (i = 0; i < count; i++) { + vars[i].aconnector = params[i].aconnector; vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i].dsc_enabled = false; vars[i].bpp_x16 = 0; @@ -851,7 +848,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, } bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, - struct dc_state *dc_state) + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars) { int i, j; struct dc_stream_state *stream; @@ -882,7 +880,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, return false; mutex_lock(&aconnector->mst_mgr.lock); - if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) { + if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) { mutex_unlock(&aconnector->mst_mgr.lock); return false; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index b38bd68121ceb..900d3f7a84989 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -39,8 +39,17 @@ void dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); #if defined(CONFIG_DRM_AMD_DC_DCN) + +struct dsc_mst_fairness_vars { + int pbn; + bool dsc_enabled; + int bpp_x16; + struct amdgpu_dm_connector *aconnector; +}; + bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, - struct dc_state *dc_state); + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars); #endif #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c index c9f47d1674723..b1bf80da3a55f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c @@ -62,7 +62,7 @@ inline void dc_assert_fp_enabled(void) depth = *pcpu; put_cpu_ptr(&fpu_recursion_depth); - ASSERT(depth > 1); + ASSERT(depth >= 1); } /** diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 8bd7f42a8053f..1e44b13c1c7de 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2586,13 +2586,21 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link) int dc_link_get_backlight_level(const struct dc_link *link) { - struct abm *abm = get_abm_from_stream_res(link); + struct panel_cntl *panel_cntl = link->panel_cntl; + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; + bool fw_set_brightness = true; - if (abm == NULL || abm->funcs->get_current_backlight == NULL) - return DC_ERROR_UNEXPECTED; + if (dmcu) + fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); - return (int) abm->funcs->get_current_backlight(abm); + if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight) + return panel_cntl->funcs->get_current_backlight(panel_cntl); + else if (abm != NULL && abm->funcs->get_current_backlight != NULL) + return (int) abm->funcs->get_current_backlight(abm); + else + return DC_ERROR_UNEXPECTED; } int dc_link_get_target_backlight_pwm(const struct dc_link *link) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 330edd666b7d8..f6dbc5a747576 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1,4 +1,26 @@ -/* Copyright 2015 Advanced Micro Devices, Inc. */ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + */ #include "dm_services.h" #include "dc.h" #include "dc_link_dp.h" @@ -1840,9 +1862,13 @@ bool perform_link_training_with_retries( dp_disable_link_phy(link, signal); /* Abort link training if failure due to sink being unplugged. */ - if (status == LINK_TRAINING_ABORT) - break; - else if (do_fallback) { + if (status == LINK_TRAINING_ABORT) { + enum dc_connection_type type = dc_connection_none; + + dc_link_detect_sink(link, &type); + if (type == dc_connection_none) + break; + } else if (do_fallback) { decide_fallback_link_setting(*link_setting, ¤t_setting, status); /* Fail link training if reduced link bandwidth no longer meets * stream requirements. diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c index e923392358631..e8570060d007b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c @@ -49,7 +49,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl) { uint64_t current_backlight; - uint32_t round_result; uint32_t bl_period, bl_int_count; uint32_t bl_pwm, fractional_duty_cycle_en; uint32_t bl_period_mask, bl_pwm_mask; @@ -84,15 +83,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c current_backlight = div_u64(current_backlight, bl_period); current_backlight = (current_backlight + 1) >> 1; - current_backlight = (uint64_t)(current_backlight) * bl_period; - - round_result = (uint32_t)(current_backlight & 0xFFFFFFFF); - - round_result = (round_result >> (bl_int_count-1)) & 1; - - current_backlight >>= bl_int_count; - current_backlight += round_result; - return (uint32_t)(current_backlight); } diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h index 8a08ecc34c699..4884a4e1f261c 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h +++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h @@ -33,63 +33,47 @@ #define TABLE_PMSTATUSLOG 3 // Called by Tools for Agm logging #define TABLE_DPMCLOCKS 4 // Called by Driver; defined here, but not used, for backward compatible #define TABLE_MOMENTARY_PM 5 // Called by Tools; defined here, but not used, for backward compatible -#define TABLE_COUNT 6 +#define TABLE_SMU_METRICS 6 // Called by Driver +#define TABLE_COUNT 7 -#define NUM_DSPCLK_LEVELS 8 -#define NUM_SOCCLK_DPM_LEVELS 8 -#define NUM_DCEFCLK_DPM_LEVELS 4 -#define NUM_FCLK_DPM_LEVELS 4 -#define NUM_MEMCLK_DPM_LEVELS 4 +typedef struct SmuMetricsTable_t { + //CPU status + uint16_t CoreFrequency[6]; //[MHz] + uint32_t CorePower[6]; //[mW] + uint16_t CoreTemperature[6]; //[centi-Celsius] + uint16_t L3Frequency[2]; //[MHz] + uint16_t L3Temperature[2]; //[centi-Celsius] + uint16_t C0Residency[6]; //Percentage -#define NUMBER_OF_PSTATES 8 -#define NUMBER_OF_CORES 8 + // GFX status + uint16_t GfxclkFrequency; //[MHz] + uint16_t GfxTemperature; //[centi-Celsius] -typedef enum { - S3_TYPE_ENTRY, - S5_TYPE_ENTRY, -} Sleep_Type_e; + // SOC IP info + uint16_t SocclkFrequency; //[MHz] + uint16_t VclkFrequency; //[MHz] + uint16_t DclkFrequency; //[MHz] + uint16_t MemclkFrequency; //[MHz] -typedef enum { - GFX_OFF = 0, - GFX_ON = 1, -} GFX_Mode_e; + // power, VF info for CPU/GFX telemetry rails, and then socket power total + uint32_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_GFX + uint32_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_GFX + uint32_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_GFX + uint32_t CurrentSocketPower; //[mW] -typedef enum { - CPU_P0 = 0, - CPU_P1, - CPU_P2, - CPU_P3, - CPU_P4, - CPU_P5, - CPU_P6, - CPU_P7 -} CPU_PState_e; + uint16_t SocTemperature; //[centi-Celsius] + uint16_t EdgeTemperature; + uint16_t ThrottlerStatus; + uint16_t Spare; -typedef enum { - CPU_CORE0 = 0, - CPU_CORE1, - CPU_CORE2, - CPU_CORE3, - CPU_CORE4, - CPU_CORE5, - CPU_CORE6, - CPU_CORE7 -} CORE_ID_e; +} SmuMetricsTable_t; -typedef enum { - DF_DPM0 = 0, - DF_DPM1, - DF_DPM2, - DF_DPM3, - DF_PState_Count -} DF_PState_e; - -typedef enum { - GFX_DPM0 = 0, - GFX_DPM1, - GFX_DPM2, - GFX_DPM3, - GFX_PState_Count -} GFX_PState_e; +typedef struct SmuMetrics_t { + SmuMetricsTable_t Current; + SmuMetricsTable_t Average; + uint32_t SampleStartTime; + uint32_t SampleStopTime; + uint32_t Accnt; +} SmuMetrics_t; #endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h index 6f1b1b50d527c..18b862a90fbe5 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h @@ -226,7 +226,10 @@ __SMU_DUMMY_MAP(SetUclkDpmMode), \ __SMU_DUMMY_MAP(LightSBR), \ __SMU_DUMMY_MAP(GfxDriverResetRecovery), \ - __SMU_DUMMY_MAP(BoardPowerCalibration), + __SMU_DUMMY_MAP(BoardPowerCalibration), \ + __SMU_DUMMY_MAP(RequestGfxclk), \ + __SMU_DUMMY_MAP(ForceGfxVid), \ + __SMU_DUMMY_MAP(UnforceGfxVid), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h index 6e6088760b180..909a86aa60f32 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h @@ -65,6 +65,13 @@ #define PPSMC_MSG_SetDriverTableVMID 0x34 #define PPSMC_MSG_SetSoftMinCclk 0x35 #define PPSMC_MSG_SetSoftMaxCclk 0x36 -#define PPSMC_Message_Count 0x37 +#define PPSMC_MSG_GetGfxFrequency 0x37 +#define PPSMC_MSG_GetGfxVid 0x38 +#define PPSMC_MSG_ForceGfxFreq 0x39 +#define PPSMC_MSG_UnForceGfxFreq 0x3A +#define PPSMC_MSG_ForceGfxVid 0x3B +#define PPSMC_MSG_UnforceGfxVid 0x3C +#define PPSMC_MSG_GetEnabledSmuFeatures 0x3D +#define PPSMC_Message_Count 0x3E #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 3ab1ce4d3419b..04863a7971155 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1404,7 +1404,7 @@ static int smu_disable_dpms(struct smu_context *smu) */ if (smu->uploading_custom_pp_table && (adev->asic_type >= CHIP_NAVI10) && - (adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) + (adev->asic_type <= CHIP_BEIGE_GOBY)) return smu_disable_all_features_with_exception(smu, true, SMU_FEATURE_COUNT); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index e343cc218990b..082f01893f3d1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -771,8 +771,12 @@ static int arcturus_print_clk_levels(struct smu_context *smu, struct smu_11_0_dpm_context *dpm_context = NULL; uint32_t gen_speed, lane_width; - if (amdgpu_ras_intr_triggered()) - return sysfs_emit(buf, "unavailable\n"); + smu_cmn_get_sysfs_buf(&buf, &size); + + if (amdgpu_ras_intr_triggered()) { + size += sysfs_emit_at(buf, size, "unavailable\n"); + return size; + } dpm_context = smu_dpm->dpm_context; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index b05f9541accc3..3d4c65bc29dc3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -44,6 +44,27 @@ #undef pr_info #undef pr_debug +/* unit: MHz */ +#define CYAN_SKILLFISH_SCLK_MIN 1000 +#define CYAN_SKILLFISH_SCLK_MAX 2000 +#define CYAN_SKILLFISH_SCLK_DEFAULT 1800 + +/* unit: mV */ +#define CYAN_SKILLFISH_VDDC_MIN 700 +#define CYAN_SKILLFISH_VDDC_MAX 1129 +#define CYAN_SKILLFISH_VDDC_MAGIC 5118 // 0x13fe + +static struct gfx_user_settings { + uint32_t sclk; + uint32_t vddc; +} cyan_skillfish_user_settings; + +#define FEATURE_MASK(feature) (1ULL << feature) +#define SMC_DPM_FEATURE ( \ + FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ + FEATURE_MASK(FEATURE_SOC_DPM_BIT) | \ + FEATURE_MASK(FEATURE_GFX_DPM_BIT)) + static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0), @@ -52,14 +73,473 @@ static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverTableDramAddrLow, 0), MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), + MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0), + MSG_MAP(RequestGfxclk, PPSMC_MSG_RequestGfxclk, 0), + MSG_MAP(ForceGfxVid, PPSMC_MSG_ForceGfxVid, 0), + MSG_MAP(UnforceGfxVid, PPSMC_MSG_UnforceGfxVid, 0), +}; + +static struct cmn2asic_mapping cyan_skillfish_table_map[SMU_TABLE_COUNT] = { + TAB_MAP_VALID(SMU_METRICS), }; +static int cyan_skillfish_tables_init(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *tables = smu_table->tables; + + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, + sizeof(SmuMetrics_t), + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); + + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + if (!smu_table->metrics_table) + goto err0_out; + + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); + smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); + if (!smu_table->gpu_metrics_table) + goto err1_out; + + smu_table->metrics_time = 0; + + return 0; + +err1_out: + smu_table->gpu_metrics_table_size = 0; + kfree(smu_table->metrics_table); +err0_out: + return -ENOMEM; +} + +static int cyan_skillfish_init_smc_tables(struct smu_context *smu) +{ + int ret = 0; + + ret = cyan_skillfish_tables_init(smu); + if (ret) + return ret; + + return smu_v11_0_init_smc_tables(smu); +} + +static int cyan_skillfish_finit_smc_tables(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + + kfree(smu_table->metrics_table); + smu_table->metrics_table = NULL; + + kfree(smu_table->gpu_metrics_table); + smu_table->gpu_metrics_table = NULL; + smu_table->gpu_metrics_table_size = 0; + + smu_table->metrics_time = 0; + + return 0; +} + +static int +cyan_skillfish_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table = &smu->smu_table; + SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; + int ret = 0; + + mutex_lock(&smu->metrics_lock); + + ret = smu_cmn_get_metrics_table_locked(smu, NULL, false); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; + } + + switch (member) { + case METRICS_CURR_GFXCLK: + *value = metrics->Current.GfxclkFrequency; + break; + case METRICS_CURR_SOCCLK: + *value = metrics->Current.SocclkFrequency; + break; + case METRICS_CURR_VCLK: + *value = metrics->Current.VclkFrequency; + break; + case METRICS_CURR_DCLK: + *value = metrics->Current.DclkFrequency; + break; + case METRICS_CURR_UCLK: + *value = metrics->Current.MemclkFrequency; + break; + case METRICS_AVERAGE_SOCKETPOWER: + *value = (metrics->Current.CurrentSocketPower << 8) / + 1000; + break; + case METRICS_TEMPERATURE_EDGE: + *value = metrics->Current.GfxTemperature / 100 * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = metrics->Current.SocTemperature / 100 * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_VOLTAGE_VDDSOC: + *value = metrics->Current.Voltage[0]; + break; + case METRICS_VOLTAGE_VDDGFX: + *value = metrics->Current.Voltage[1]; + break; + case METRICS_THROTTLER_STATUS: + *value = metrics->Current.ThrottlerStatus; + break; + default: + *value = UINT_MAX; + break; + } + + mutex_unlock(&smu->metrics_lock); + + return ret; +} + +static int cyan_skillfish_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, + uint32_t *size) +{ + int ret = 0; + + if (!data || !size) + return -EINVAL; + + mutex_lock(&smu->sensor_lock); + + switch (sensor) { + case AMDGPU_PP_SENSOR_GFX_SCLK: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_CURR_GFXCLK, + (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_MCLK: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_CURR_UCLK, + (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_POWER: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_AVERAGE_SOCKETPOWER, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_HOTSPOT, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_EDGE_TEMP: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_EDGE, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_VDDNB: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_VOLTAGE_VDDSOC, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_VDDGFX: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_VOLTAGE_VDDGFX, + (uint32_t *)data); + *size = 4; + break; + default: + ret = -EOPNOTSUPP; + break; + } + + mutex_unlock(&smu->sensor_lock); + + return ret; +} + +static int cyan_skillfish_get_current_clk_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + MetricsMember_t member_type; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + member_type = METRICS_CURR_GFXCLK; + break; + case SMU_FCLK: + case SMU_MCLK: + member_type = METRICS_CURR_UCLK; + break; + case SMU_SOCCLK: + member_type = METRICS_CURR_SOCCLK; + break; + case SMU_VCLK: + member_type = METRICS_CURR_VCLK; + break; + case SMU_DCLK: + member_type = METRICS_CURR_DCLK; + break; + default: + return -EINVAL; + } + + return cyan_skillfish_get_smu_metrics_data(smu, member_type, value); +} + +static int cyan_skillfish_print_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + char *buf) +{ + int ret = 0, size = 0; + uint32_t cur_value = 0; + + smu_cmn_get_sysfs_buf(&buf, &size); + + switch (clk_type) { + case SMU_OD_SCLK: + ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, &cur_value); + if (ret) + return ret; + size += sysfs_emit_at(buf, size,"%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value); + break; + case SMU_OD_VDDC_CURVE: + ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, &cur_value); + if (ret) + return ret; + size += sysfs_emit_at(buf, size,"%s:\n", "OD_VDDC"); + size += sysfs_emit_at(buf, size, "0: %umV *\n", cur_value); + break; + case SMU_OD_RANGE: + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX); + size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n", + CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); + break; + case SMU_GFXCLK: + case SMU_SCLK: + case SMU_FCLK: + case SMU_MCLK: + case SMU_SOCCLK: + case SMU_VCLK: + case SMU_DCLK: + ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value); + if (ret) + return ret; + size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value); + break; + default: + dev_warn(smu->adev->dev, "Unsupported clock type\n"); + return ret; + } + + return size; +} + +static bool cyan_skillfish_is_dpm_running(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0; + uint32_t feature_mask[2]; + uint64_t feature_enabled; + + /* we need to re-init after suspend so return false */ + if (adev->in_suspend) + return false; + + ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2); + + if (ret) + return false; + + feature_enabled = (uint64_t)feature_mask[0] | + ((uint64_t)feature_mask[1] << 32); + + return !!(feature_enabled & SMC_DPM_FEATURE); +} + +static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v2_2 *gpu_metrics = + (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; + SmuMetrics_t metrics; + int i, ret = 0; + + ret = smu_cmn_get_metrics_table(smu, &metrics, true); + if (ret) + return ret; + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); + + gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; + gpu_metrics->temperature_soc = metrics.Current.SocTemperature; + + gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; + gpu_metrics->average_soc_power = metrics.Current.Power[0]; + gpu_metrics->average_gfx_power = metrics.Current.Power[1]; + + gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; + gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; + gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; + gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; + gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; + + gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; + gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; + gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; + gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; + gpu_metrics->current_vclk = metrics.Current.VclkFrequency; + gpu_metrics->current_dclk = metrics.Current.DclkFrequency; + + for (i = 0; i < 6; i++) { + gpu_metrics->temperature_core[i] = metrics.Current.CoreTemperature[i]; + gpu_metrics->average_core_power[i] = metrics.Average.CorePower[i]; + gpu_metrics->current_coreclk[i] = metrics.Current.CoreFrequency[i]; + } + + for (i = 0; i < 2; i++) { + gpu_metrics->temperature_l3[i] = metrics.Current.L3Temperature[i]; + gpu_metrics->current_l3clk[i] = metrics.Current.L3Frequency[i]; + } + + gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v2_2); +} + +static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long input[], uint32_t size) +{ + int ret = 0; + uint32_t vid; + + switch (type) { + case PP_OD_EDIT_VDDC_CURVE: + if (size != 3 || input[0] != 0) { + dev_err(smu->adev->dev, "Invalid parameter!\n"); + return -EINVAL; + } + + if (input[1] <= CYAN_SKILLFISH_SCLK_MIN || + input[1] > CYAN_SKILLFISH_SCLK_MAX) { + dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n", + CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX); + return -EINVAL; + } + + if (input[2] <= CYAN_SKILLFISH_VDDC_MIN || + input[2] > CYAN_SKILLFISH_VDDC_MAX) { + dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n", + CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); + return -EINVAL; + } + + cyan_skillfish_user_settings.sclk = input[1]; + cyan_skillfish_user_settings.vddc = input[2]; + + break; + case PP_OD_RESTORE_DEFAULT_TABLE: + if (size != 0) { + dev_err(smu->adev->dev, "Invalid parameter!\n"); + return -EINVAL; + } + + cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT; + cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC; + + break; + case PP_OD_COMMIT_DPM_TABLE: + if (size != 0) { + dev_err(smu->adev->dev, "Invalid parameter!\n"); + return -EINVAL; + } + + if (cyan_skillfish_user_settings.sclk < CYAN_SKILLFISH_SCLK_MIN || + cyan_skillfish_user_settings.sclk > CYAN_SKILLFISH_SCLK_MAX) { + dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n", + CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX); + return -EINVAL; + } + + if ((cyan_skillfish_user_settings.vddc != CYAN_SKILLFISH_VDDC_MAGIC) && + (cyan_skillfish_user_settings.vddc < CYAN_SKILLFISH_VDDC_MIN || + cyan_skillfish_user_settings.vddc > CYAN_SKILLFISH_VDDC_MAX)) { + dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n", + CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); + return -EINVAL; + } + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestGfxclk, + cyan_skillfish_user_settings.sclk, NULL); + if (ret) { + dev_err(smu->adev->dev, "Set sclk failed!\n"); + return ret; + } + + if (cyan_skillfish_user_settings.vddc == CYAN_SKILLFISH_VDDC_MAGIC) { + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_UnforceGfxVid, NULL); + if (ret) { + dev_err(smu->adev->dev, "Unforce vddc failed!\n"); + return ret; + } + } else { + /* + * PMFW accepts SVI2 VID code, convert voltage to VID: + * vid = (uint32_t)((1.55 - voltage) * 160.0 + 0.00001) + */ + vid = (1550 - cyan_skillfish_user_settings.vddc) * 160 / 1000; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ForceGfxVid, vid, NULL); + if (ret) { + dev_err(smu->adev->dev, "Force vddc failed!\n"); + return ret; + } + } + + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + static const struct pptable_funcs cyan_skillfish_ppt_funcs = { .check_fw_status = smu_v11_0_check_fw_status, .check_fw_version = smu_v11_0_check_fw_version, .init_power = smu_v11_0_init_power, .fini_power = smu_v11_0_fini_power, + .init_smc_tables = cyan_skillfish_init_smc_tables, + .fini_smc_tables = cyan_skillfish_finit_smc_tables, + .read_sensor = cyan_skillfish_read_sensor, + .print_clk_levels = cyan_skillfish_print_clk_levels, + .is_dpm_running = cyan_skillfish_is_dpm_running, + .get_gpu_metrics = cyan_skillfish_get_gpu_metrics, + .od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table, .register_irq_handler = smu_v11_0_register_irq_handler, .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, @@ -72,5 +552,6 @@ void cyan_skillfish_set_ppt_funcs(struct smu_context *smu) { smu->ppt_funcs = &cyan_skillfish_ppt_funcs; smu->message_map = cyan_skillfish_message_map; + smu->table_map = cyan_skillfish_table_map; smu->is_apu = true; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index a5fc5d7cb6c79..b1ad451af06bd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1279,6 +1279,8 @@ static int navi10_print_clk_levels(struct smu_context *smu, struct smu_11_0_overdrive_table *od_settings = smu->od_settings; uint32_t min_value, max_value; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: @@ -1392,7 +1394,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, case SMU_OD_RANGE: if (!smu->od_enabled || !od_table || !od_settings) break; - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) { navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN, @@ -2272,7 +2274,27 @@ static int navi10_baco_enter(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - if (adev->in_runpm) + /* + * This aims the case below: + * amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded + * + * For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To + * make that possible, PMFW needs to acknowledge the dstate transition + * process for both gfx(function 0) and audio(function 1) function of + * the ASIC. + * + * The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the + * device representing the audio function of the ASIC. And that means + * even if the sound driver(snd_hda_intel) was not loaded yet, it's still + * possible runpm suspend kicked on the ASIC. However without the dstate + * transition notification from audio function, pmfw cannot handle the + * BACO in/exit correctly. And that will cause driver hang on runpm + * resuming. + * + * To address this, we revert to legacy message way(driver masters the + * timing for BACO in/exit) on sound driver missing. + */ + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); else return smu_v11_0_baco_enter(smu); @@ -2282,7 +2304,7 @@ static int navi10_baco_exit(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - if (adev->in_runpm) { + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { /* Wait for PMFW handling for the Dstate change */ msleep(10); return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 5e292c3f5050e..ca57221e39629 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1058,6 +1058,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, uint32_t min_value, max_value; uint32_t smu_version; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: @@ -1180,7 +1182,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, if (!smu->od_enabled || !od_table || !od_settings) break; - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) { sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN, @@ -2187,7 +2189,7 @@ static int sienna_cichlid_baco_enter(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - if (adev->in_runpm) + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); else return smu_v11_0_baco_enter(smu); @@ -2197,7 +2199,7 @@ static int sienna_cichlid_baco_exit(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - if (adev->in_runpm) { + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { /* Wait for PMFW handling for the Dstate change */ msleep(10); return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 3a3421452e57a..f6ef0ce6e9e2c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -589,10 +589,12 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, if (ret) return ret; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_OD_SCLK: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -601,7 +603,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, break; case SMU_OD_CCLK: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); + size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -610,7 +612,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, break; case SMU_OD_RANGE: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", @@ -688,10 +690,12 @@ static int vangogh_print_clk_levels(struct smu_context *smu, if (ret) return ret; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_OD_SCLK: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -700,7 +704,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, break; case SMU_OD_CCLK: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); + size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -709,7 +713,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, break; case SMU_OD_RANGE: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 5aa175e12a783..145f13b8c977d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -497,6 +497,8 @@ static int renoir_print_clk_levels(struct smu_context *smu, if (ret) return ret; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_OD_RANGE: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index ab652028e0034..5019903db492a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -733,15 +733,19 @@ static int aldebaran_print_clk_levels(struct smu_context *smu, uint32_t freq_values[3] = {0}; uint32_t min_clk, max_clk; - if (amdgpu_ras_intr_triggered()) - return sysfs_emit(buf, "unavailable\n"); + smu_cmn_get_sysfs_buf(&buf, &size); + + if (amdgpu_ras_intr_triggered()) { + size += sysfs_emit_at(buf, size, "unavailable\n"); + return size; + } dpm_context = smu_dpm->dpm_context; switch (type) { case SMU_OD_SCLK: - size = sysfs_emit(buf, "%s:\n", "GFXCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK"); fallthrough; case SMU_SCLK: ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now); @@ -795,7 +799,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu, break; case SMU_OD_MCLK: - size = sysfs_emit(buf, "%s:\n", "MCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "MCLK"); fallthrough; case SMU_MCLK: ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 627ba2eec7fda..a403657151ba1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -1052,16 +1052,18 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_OD_SCLK: - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); break; case SMU_OD_RANGE: - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 66711ab24c15a..843d2cbfc71d4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -1053,3 +1053,24 @@ int smu_cmn_set_mp1_state(struct smu_context *smu, return ret; } + +bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) +{ + struct pci_dev *p = NULL; + bool snd_driver_loaded; + + /* + * If the ASIC comes with no audio function, we always assume + * it is "enabled". + */ + p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), + adev->pdev->bus->number, 1); + if (!p) + return true; + + snd_driver_loaded = pci_is_enabled(p) ? true : false; + + pci_dev_put(p); + + return snd_driver_loaded; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 16993daa2ae04..beea03810bcaa 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -110,5 +110,20 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); int smu_cmn_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state); +/* + * Helper function to make sysfs_emit_at() happy. Align buf to + * the current page boundary and record the offset. + */ +static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset) +{ + if (!*buf || !offset) + return; + + *offset = offset_in_page(*buf); + *buf -= *offset; +} + +bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev); + #endif #endif diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 76d38561c9103..cf741c5c82d25 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, if (switch_mmu_context) { struct etnaviv_iommu_context *old_context = gpu->mmu_context; - etnaviv_iommu_context_get(mmu_context); - gpu->mmu_context = mmu_context; + gpu->mmu_context = etnaviv_iommu_context_get(mmu_context); etnaviv_iommu_context_put(old_context); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 8f1b5af47dd63..f0b2540e60e46 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -294,8 +294,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( list_del(&mapping->obj_node); } - etnaviv_iommu_context_get(mmu_context); - mapping->context = mmu_context; + mapping->context = etnaviv_iommu_context_get(mmu_context); mapping->use = 1; ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 4dd7d9d541c09..486259e154aff 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, goto err_submit_objects; submit->ctx = file->driver_priv; - etnaviv_iommu_context_get(submit->ctx->mmu); - submit->mmu_context = submit->ctx->mmu; + submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu); submit->exec_state = args->exec_state; submit->flags = args->flags; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index c297fffe06eb3..cc5b07f863463 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -569,6 +569,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) /* We rely on the GPU running, so program the clock */ etnaviv_gpu_update_clock(gpu); + gpu->fe_running = false; + gpu->exec_state = -1; + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + gpu->mmu_context = NULL; + return 0; } @@ -637,19 +643,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch) VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE | VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch)); } + + gpu->fe_running = true; } -static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu) +static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu, + struct etnaviv_iommu_context *context) { - u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer, - &gpu->mmu_context->cmdbuf_mapping); u16 prefetch; + u32 address; /* setup the MMU */ - etnaviv_iommu_restore(gpu, gpu->mmu_context); + etnaviv_iommu_restore(gpu, context); /* Start command processor */ prefetch = etnaviv_buffer_init(gpu); + address = etnaviv_cmdbuf_get_va(&gpu->buffer, + &gpu->mmu_context->cmdbuf_mapping); etnaviv_gpu_start_fe(gpu, address, prefetch); } @@ -832,7 +842,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) /* Now program the hardware */ mutex_lock(&gpu->lock); etnaviv_gpu_hw_init(gpu); - gpu->exec_state = -1; mutex_unlock(&gpu->lock); pm_runtime_mark_last_busy(gpu->dev); @@ -1057,8 +1066,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) spin_unlock(&gpu->event_spinlock); etnaviv_gpu_hw_init(gpu); - gpu->exec_state = -1; - gpu->mmu_context = NULL; mutex_unlock(&gpu->lock); pm_runtime_mark_last_busy(gpu->dev); @@ -1370,14 +1377,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) goto out_unlock; } - if (!gpu->mmu_context) { - etnaviv_iommu_context_get(submit->mmu_context); - gpu->mmu_context = submit->mmu_context; - etnaviv_gpu_start_fe_idleloop(gpu); - } else { - etnaviv_iommu_context_get(gpu->mmu_context); - submit->prev_mmu_context = gpu->mmu_context; - } + if (!gpu->fe_running) + etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context); + + if (submit->prev_mmu_context) + etnaviv_iommu_context_put(submit->prev_mmu_context); + submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context); if (submit->nr_pmrs) { gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre; @@ -1579,7 +1584,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms) static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) { - if (gpu->initialized && gpu->mmu_context) { + if (gpu->initialized && gpu->fe_running) { /* Replace the last WAIT with END */ mutex_lock(&gpu->lock); etnaviv_buffer_end(gpu); @@ -1592,8 +1597,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) */ etnaviv_gpu_wait_idle(gpu, 100); - etnaviv_iommu_context_put(gpu->mmu_context); - gpu->mmu_context = NULL; + gpu->fe_running = false; } gpu->exec_state = -1; @@ -1741,6 +1745,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, etnaviv_gpu_hw_suspend(gpu); #endif + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + if (gpu->initialized) { etnaviv_cmdbuf_free(&gpu->buffer); etnaviv_iommu_global_fini(gpu); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 8ea48697d1321..1c75c8ed5bcea 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -101,6 +101,7 @@ struct etnaviv_gpu { struct workqueue_struct *wq; struct drm_gpu_scheduler sched; bool initialized; + bool fe_running; /* 'ring'-buffer: */ struct etnaviv_cmdbuf buffer; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c index 1a7c89a67bea3..afe5dd6a9925b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c @@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu, struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); u32 pgtable; + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + gpu->mmu_context = etnaviv_iommu_context_get(context); + /* set base addresses */ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base); gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c index f8bf488e9d717..d664ae29ae209 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c @@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu, if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) return; + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + gpu->mmu_context = etnaviv_iommu_context_get(context); + prefetch = etnaviv_buffer_config_mmuv2(gpu, (u32)v2_context->mtlb_dma, (u32)context->global->bad_page_dma); @@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu, if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE) return; + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + gpu->mmu_context = etnaviv_iommu_context_get(context); + gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW, lower_32_bits(context->global->v2.pta_dma)); gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index dab1b58006d83..9fb1a2aadbcb0 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -199,6 +199,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context, */ list_for_each_entry_safe(m, n, &list, scan_node) { etnaviv_iommu_remove_mapping(context, m); + etnaviv_iommu_context_put(m->context); m->context = NULL; list_del_init(&m->mmu_node); list_del_init(&m->scan_node); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h index d1d6902fd13be..e4a0b7d09c2ea 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h @@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf); struct etnaviv_iommu_context * etnaviv_iommu_context_init(struct etnaviv_iommu_global *global, struct etnaviv_cmdbuf_suballoc *suballoc); -static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx) +static inline struct etnaviv_iommu_context * +etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx) { kref_get(&ctx->refcount); + return ctx; } void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx); void etnaviv_iommu_restore(struct etnaviv_gpu *gpu, diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 642a5b5a1b81c..335ba9f43d8f7 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -19,7 +19,6 @@ subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers) subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) # clang warnings subdir-ccflags-y += $(call cc-disable-warning, sign-compare) -subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized) subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides) subdir-ccflags-y += $(call cc-disable-warning, frame-address) subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 04175f359fd68..abe3d61b62432 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2445,11 +2445,14 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) */ if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == - sizeof(intel_dp->edp_dpcd)) + sizeof(intel_dp->edp_dpcd)) { drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", (int)sizeof(intel_dp->edp_dpcd), intel_dp->edp_dpcd); + intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14; + } + /* * This has to be called after intel_dp->edp_dpcd is filled, PSR checks * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 053a3c2f72677..508a514c5e37d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -848,7 +848,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp, } if (ret) - intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX); + ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX); if (intel_dp->set_idle_link_train) intel_dp->set_idle_link_train(intel_dp, crtc_state); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index cff72679ad7c3..9ccf4b29b82e1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -986,6 +986,9 @@ void i915_gem_context_release(struct kref *ref) trace_i915_context_free(ctx); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); + if (ctx->syncobj) + drm_syncobj_put(ctx->syncobj); + mutex_destroy(&ctx->engines_mutex); mutex_destroy(&ctx->lut_mutex); @@ -1205,9 +1208,6 @@ static void context_close(struct i915_gem_context *ctx) if (vm) i915_vm_close(vm); - if (ctx->syncobj) - drm_syncobj_put(ctx->syncobj); - ctx->file_priv = ERR_PTR(-EBADF); /* diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index ffae7df5e4d7d..4a6bb64c3a354 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -59,13 +59,13 @@ static int igt_dmabuf_import_self(void *arg) err = PTR_ERR(import); goto out_dmabuf; } + import_obj = to_intel_bo(import); if (import != &obj->base) { pr_err("i915_gem_prime_import created a new object!\n"); err = -EINVAL; goto out_import; } - import_obj = to_intel_bo(import); i915_gem_object_lock(import_obj, NULL); err = __i915_gem_object_get_pages(import_obj); @@ -128,6 +128,8 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg) pr_err("i915_gem_prime_import failed with the wrong err=%ld\n", PTR_ERR(import)); err = PTR_ERR(import); + } else { + err = 0; } dma_buf_put(dmabuf); @@ -176,6 +178,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915, err = PTR_ERR(import); goto out_dmabuf; } + import_obj = to_intel_bo(import); if (import == &obj->base) { pr_err("i915_gem_prime_import reused gem object!\n"); @@ -183,8 +186,6 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915, goto out_import; } - import_obj = to_intel_bo(import); - i915_gem_object_lock(import_obj, NULL); err = __i915_gem_object_get_pages(import_obj); if (err) { diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index b20f5621f62b1..a2c34e5a1c540 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -581,6 +581,20 @@ static enum i915_mmap_type default_mapping(struct drm_i915_private *i915) return I915_MMAP_TYPE_GTT; } +static struct drm_i915_gem_object * +create_sys_or_internal(struct drm_i915_private *i915, + unsigned long size) +{ + if (HAS_LMEM(i915)) { + struct intel_memory_region *sys_region = + i915->mm.regions[INTEL_REGION_SMEM]; + + return __i915_gem_object_create_user(i915, size, &sys_region, 1); + } + + return i915_gem_object_create_internal(i915, size); +} + static bool assert_mmap_offset(struct drm_i915_private *i915, unsigned long size, int expected) @@ -589,7 +603,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, u64 offset; int ret; - obj = i915_gem_object_create_internal(i915, size); + obj = create_sys_or_internal(i915, size); if (IS_ERR(obj)) return expected && expected == PTR_ERR(obj); @@ -633,6 +647,7 @@ static int igt_mmap_offset_exhaustion(void *arg) struct drm_mm_node *hole, *next; int loop, err = 0; u64 offset; + int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC; /* Disable background reaper */ disable_retire_worker(i915); @@ -683,14 +698,14 @@ static int igt_mmap_offset_exhaustion(void *arg) } /* Too large */ - if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) { + if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) { pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); err = -EINVAL; goto out; } /* Fill the hole, further allocation attempts should then fail */ - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + obj = create_sys_or_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); pr_err("Unable to create object for reclaimed hole\n"); @@ -703,7 +718,7 @@ static int igt_mmap_offset_exhaustion(void *arg) goto err_obj; } - if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) { + if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) { pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); err = -EINVAL; goto err_obj; @@ -839,10 +854,9 @@ static int wc_check(struct drm_i915_gem_object *obj) static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); bool no_map; - if (HAS_LMEM(i915)) + if (obj->ops->mmap_offset) return type == I915_MMAP_TYPE_FIXED; else if (type == I915_MMAP_TYPE_FIXED) return false; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index d812b27835f8c..591a5224287ea 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -1973,8 +1973,14 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps) u32 intel_rps_read_punit_req(struct intel_rps *rps) { struct intel_uncore *uncore = rps_to_uncore(rps); + struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; + intel_wakeref_t wakeref; + u32 freq = 0; - return intel_uncore_read(uncore, GEN6_RPNSWREQ); + with_intel_runtime_pm_if_in_use(rpm, wakeref) + freq = intel_uncore_read(uncore, GEN6_RPNSWREQ); + + return freq; } static u32 intel_rps_get_req(u32 pureq) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index b104fb7607eb2..86c318516e14f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -172,11 +172,6 @@ void intel_uc_driver_remove(struct intel_uc *uc) __uc_free_load_err_log(uc); } -static inline bool guc_communication_enabled(struct intel_guc *guc) -{ - return intel_guc_ct_enabled(&guc->ct); -} - /* * Events triggered while CT buffers are disabled are logged in the SCRATCH_15 * register using the same bits used in the CT message payload. Since our @@ -210,7 +205,7 @@ static void guc_get_mmio_msg(struct intel_guc *guc) static void guc_handle_mmio_msg(struct intel_guc *guc) { /* we need communication to be enabled to reply to GuC */ - GEM_BUG_ON(!guc_communication_enabled(guc)); + GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct)); spin_lock_irq(&guc->irq_lock); if (guc->mmio_msg) { @@ -226,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc) struct drm_i915_private *i915 = gt->i915; int ret; - GEM_BUG_ON(guc_communication_enabled(guc)); + GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct)); ret = i915_inject_probe_error(i915, -ENXIO); if (ret) @@ -662,7 +657,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication) return 0; /* Make sure we enable communication if and only if it's disabled */ - GEM_BUG_ON(enable_communication == guc_communication_enabled(guc)); + GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct)); if (enable_communication) guc_enable_communication(guc); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c index b0ece71aefdee..ce774579c89d1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c @@ -57,7 +57,7 @@ nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size) args->v0.count = 0; args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE; args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE; - args->v0.pwrsrc = -ENOSYS; + args->v0.pwrsrc = -ENODEV; args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN; } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 0473583dcdac2..482fb0ae6cb5d 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -119,7 +119,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) #endif if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) - rdev->agp = radeon_agp_head_init(rdev->ddev); + rdev->agp = radeon_agp_head_init(dev); if (rdev->agp) { rdev->agp->agp_mtrr = arch_phys_wc_add( rdev->agp->agp_info.aper_base, diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index 8ab3247dbc4aa..13c6b857158fc 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -1123,7 +1123,7 @@ static int cdn_dp_suspend(struct device *dev) return ret; } -static int cdn_dp_resume(struct device *dev) +static __maybe_unused int cdn_dp_resume(struct device *dev) { struct cdn_dp_device *dp = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index cb38b1a17b098..82cbb29a05aa3 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -383,7 +383,8 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, else gfp_flags |= GFP_HIGHUSER; - for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages; + for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages)); + num_pages; order = min_t(unsigned int, order, __fls(num_pages))) { bool apply_caching = false; struct ttm_pool_type *pt; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 4a11150431140..b4b4653fe301e 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -167,8 +167,6 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force) struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); bool connected = false; - WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev)); - if (vc4_hdmi->hpd_gpio && gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) { connected = true; @@ -189,12 +187,10 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force) } } - pm_runtime_put(&vc4_hdmi->pdev->dev); return connector_status_connected; } cec_phys_addr_invalidate(vc4_hdmi->cec_adap); - pm_runtime_put(&vc4_hdmi->pdev->dev); return connector_status_disconnected; } @@ -436,7 +432,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); struct drm_connector *connector = &vc4_hdmi->connector; struct drm_connector_state *cstate = connector->state; - struct drm_crtc *crtc = cstate->crtc; + struct drm_crtc *crtc = encoder->crtc; const struct drm_display_mode *mode = &crtc->state->adjusted_mode; union hdmi_infoframe frame; int ret; @@ -541,11 +537,8 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder, static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) { + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); - struct drm_connector *connector = &vc4_hdmi->connector; - struct drm_connector_state *cstate = connector->state; - struct drm_crtc *crtc = cstate->crtc; - struct drm_display_mode *mode = &crtc->state->adjusted_mode; if (!vc4_hdmi_supports_scrambling(encoder, mode)) return; @@ -566,18 +559,17 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); - struct drm_connector *connector = &vc4_hdmi->connector; - struct drm_connector_state *cstate = connector->state; + struct drm_crtc *crtc = encoder->crtc; /* - * At boot, connector->state will be NULL. Since we don't know the + * At boot, encoder->crtc will be NULL. Since we don't know the * state of the scrambler and in order to avoid any * inconsistency, let's disable it all the time. */ - if (cstate && !vc4_hdmi_supports_scrambling(encoder, &cstate->crtc->mode)) + if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode)) return; - if (cstate && !vc4_hdmi_mode_needs_scrambling(&cstate->crtc->mode)) + if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode)) return; if (delayed_work_pending(&vc4_hdmi->scrambling_work)) @@ -635,6 +627,7 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder, vc4_hdmi->variant->phy_disable(vc4_hdmi); clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock); + clk_disable_unprepare(vc4_hdmi->hsm_clock); clk_disable_unprepare(vc4_hdmi->pixel_clock); ret = pm_runtime_put(&vc4_hdmi->pdev->dev); @@ -898,9 +891,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, vc4_hdmi_encoder_get_connector_state(encoder, state); struct vc4_hdmi_connector_state *vc4_conn_state = conn_state_to_vc4_hdmi_conn_state(conn_state); - struct drm_crtc_state *crtc_state = - drm_atomic_get_new_crtc_state(state, conn_state->crtc); - struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); unsigned long bvb_rate, pixel_rate, hsm_rate; int ret; @@ -947,6 +938,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, return; } + ret = clk_prepare_enable(vc4_hdmi->hsm_clock); + if (ret) { + DRM_ERROR("Failed to turn on HSM clock: %d\n", ret); + clk_disable_unprepare(vc4_hdmi->pixel_clock); + return; + } + vc4_hdmi_cec_update_clk_div(vc4_hdmi); if (pixel_rate > 297000000) @@ -959,6 +957,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate); if (ret) { DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret); + clk_disable_unprepare(vc4_hdmi->hsm_clock); clk_disable_unprepare(vc4_hdmi->pixel_clock); return; } @@ -966,6 +965,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock); if (ret) { DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret); + clk_disable_unprepare(vc4_hdmi->hsm_clock); clk_disable_unprepare(vc4_hdmi->pixel_clock); return; } @@ -985,11 +985,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_connector_state *conn_state = - vc4_hdmi_encoder_get_connector_state(encoder, state); - struct drm_crtc_state *crtc_state = - drm_atomic_get_new_crtc_state(state, conn_state->crtc); - struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); @@ -1012,11 +1008,7 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder, static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_connector_state *conn_state = - vc4_hdmi_encoder_get_connector_state(encoder, state); - struct drm_crtc_state *crtc_state = - drm_atomic_get_new_crtc_state(state, conn_state->crtc); - struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; @@ -1204,8 +1196,8 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi, static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate) { - struct drm_connector *connector = &vc4_hdmi->connector; - struct drm_crtc *crtc = connector->state->crtc; + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; + struct drm_crtc *crtc = encoder->crtc; const struct drm_display_mode *mode = &crtc->state->adjusted_mode; u32 n, cts; u64 tmp; @@ -1238,13 +1230,13 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai) static int vc4_hdmi_audio_startup(struct device *dev, void *data) { struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); - struct drm_connector *connector = &vc4_hdmi->connector; + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; /* * If the HDMI encoder hasn't probed, or the encoder is * currently in DVI mode, treat the codec dai as missing. */ - if (!connector->state || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & + if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & VC4_HDMI_RAM_PACKET_ENABLE)) return -ENODEV; @@ -2114,29 +2106,6 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi) return 0; } -#ifdef CONFIG_PM -static int vc4_hdmi_runtime_suspend(struct device *dev) -{ - struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); - - clk_disable_unprepare(vc4_hdmi->hsm_clock); - - return 0; -} - -static int vc4_hdmi_runtime_resume(struct device *dev) -{ - struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); - int ret; - - ret = clk_prepare_enable(vc4_hdmi->hsm_clock); - if (ret) - return ret; - - return 0; -} -#endif - static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) { const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev); @@ -2391,18 +2360,11 @@ static const struct of_device_id vc4_hdmi_dt_match[] = { {} }; -static const struct dev_pm_ops vc4_hdmi_pm_ops = { - SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend, - vc4_hdmi_runtime_resume, - NULL) -}; - struct platform_driver vc4_hdmi_driver = { .probe = vc4_hdmi_dev_probe, .remove = vc4_hdmi_dev_remove, .driver = { .name = "vc4_hdmi", .of_match_table = vc4_hdmi_dt_match, - .pm = &vc4_hdmi_pm_ops, }, }; diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 2aee356840a2b..314015d9e912d 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -245,6 +245,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) mutex_unlock(&ring_info->ring_buffer_mutex); kfree(ring_info->pkt_buffer); + ring_info->pkt_buffer = NULL; ring_info->pkt_buffer_size = 0; } diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 94fb63a7b3573..fe63d5ee201b8 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -570,7 +570,7 @@ int __init smu_init (void) fail_db_node: of_node_put(smu->db_node); fail_bootmem: - memblock_free(__pa(smu), sizeof(struct smu_device)); + memblock_free_ptr(smu, sizeof(struct smu_device)); smu = NULL; fail_np: of_node_put(np); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 64d6dfa831220..267324889dd64 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -1885,6 +1885,12 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph reset_control_assert(gphy_fw->reset); + /* The vendor BSP uses a 200ms delay after asserting the reset line. + * Without this some users are observing that the PHY is not coming up + * on the MDIO bus. + */ + msleep(200); + ret = request_firmware(&fw, gphy_fw->fw_name, dev); if (ret) { dev_err(dev, "failed to load firmware: %s, error: %i\n", diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 1f63f50f73f17..bda5a9bf4f529 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -643,10 +643,8 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask) } static int -qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data) +qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data) { - struct qca8k_priv *priv = salve_bus->priv; - struct mii_bus *bus = priv->bus; u16 r1, r2, page; u32 val; int ret; @@ -682,10 +680,8 @@ qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data) } static int -qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum) +qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum) { - struct qca8k_priv *priv = salve_bus->priv; - struct mii_bus *bus = priv->bus; u16 r1, r2, page; u32 val; int ret; @@ -726,6 +722,24 @@ qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum) return ret; } +static int +qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data) +{ + struct qca8k_priv *priv = slave_bus->priv; + struct mii_bus *bus = priv->bus; + + return qca8k_mdio_write(bus, phy, regnum, data); +} + +static int +qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum) +{ + struct qca8k_priv *priv = slave_bus->priv; + struct mii_bus *bus = priv->bus; + + return qca8k_mdio_read(bus, phy, regnum); +} + static int qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data) { @@ -775,8 +789,8 @@ qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio) bus->priv = (void *)priv; bus->name = "qca8k slave mii"; - bus->read = qca8k_mdio_read; - bus->write = qca8k_mdio_write; + bus->read = qca8k_internal_mdio_read; + bus->write = qca8k_internal_mdio_write; snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d", ds->index); diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index 8d90fed5d33eb..6f0ea2facea9e 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -1050,7 +1050,7 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb, #ifdef VORTEX_BUS_MASTER if (vp->bus_master) { /* Set the bus-master controller to transfer the packet. */ - outl((int) (skb->data), ioaddr + Wn7_MasterAddr); + outl(isa_virt_to_bus(skb->data), ioaddr + Wn7_MasterAddr); outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); vp->tx_skb = skb; outw(StartDMADown, ioaddr + EL3_CMD); diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index 53660bc8d6ff6..9afc712f59484 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -922,13 +922,16 @@ static void __init ne_add_devices(void) } } -#ifdef MODULE static int __init ne_init(void) { int retval; - ne_add_devices(); + + if (IS_MODULE(CONFIG_NE2000)) + ne_add_devices(); + retval = platform_driver_probe(&ne_driver, ne_drv_probe); - if (retval) { + + if (IS_MODULE(CONFIG_NE2000) && retval) { if (io[0] == 0) pr_notice("ne.c: You must supply \"io=0xNNN\"" " value(s) for ISA cards.\n"); @@ -941,18 +944,8 @@ static int __init ne_init(void) return retval; } module_init(ne_init); -#else /* MODULE */ -static int __init ne_init(void) -{ - int retval = platform_driver_probe(&ne_driver, ne_drv_probe); - - /* Unregister unused platform_devices. */ - ne_loop_rm_unreg(0); - return retval; -} -module_init(ne_init); -#ifdef CONFIG_NETDEV_LEGACY_INIT +#if !defined(MODULE) && defined(CONFIG_NETDEV_LEGACY_INIT) struct net_device * __init ne_probe(int unit) { int this_dev; @@ -994,7 +987,6 @@ struct net_device * __init ne_probe(int unit) return ERR_PTR(-ENODEV); } #endif -#endif /* MODULE */ static void __exit ne_exit(void) { diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index b5df7ad5a83f1..032e8922b4829 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -748,7 +748,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p) #ifdef XMT_VIA_SKB skb_save[i] = p->tmd_skb[i]; #endif - buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer); + buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer); blen[i] = tmdp->blen; tmdp->u.s.status = 0x0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index f255fd0b16dbd..6fbf735fca31c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1224,7 +1224,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, /* SR-IOV capability was enabled but there are no VFs*/ if (iov->total == 0) { - err = -EINVAL; + err = 0; goto failed; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ea0c45d33814d..037767b370d54 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2213,12 +2213,11 @@ static int bnxt_async_event_process(struct bnxt *bp, DIV_ROUND_UP(fw_health->polling_dsecs * HZ, bp->current_interval * 10); fw_health->tmr_counter = fw_health->tmr_multiplier; - if (!fw_health->enabled) { + if (!fw_health->enabled) fw_health->last_fw_heartbeat = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); - fw_health->last_fw_reset_cnt = - bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); - } + fw_health->last_fw_reset_cnt = + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); netif_info(bp, drv, bp->dev, "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n", fw_health->master, fw_health->last_fw_reset_cnt, @@ -2730,6 +2729,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; int j; + if (!txr->tx_buf_ring) + continue; + for (j = 0; j < max_idx;) { struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; struct sk_buff *skb; @@ -2814,6 +2816,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) } skip_rx_tpa_free: + if (!rxr->rx_buf_ring) + goto skip_rx_buf_free; + for (i = 0; i < max_idx; i++) { struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; dma_addr_t mapping = rx_buf->mapping; @@ -2836,6 +2841,11 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) kfree(data); } } + +skip_rx_buf_free: + if (!rxr->rx_agg_ring) + goto skip_rx_agg_free; + for (i = 0; i < max_agg_idx; i++) { struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; struct page *page = rx_agg_buf->page; @@ -2852,6 +2862,8 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) __free_page(page); } + +skip_rx_agg_free: if (rxr->rx_page) { __free_page(rxr->rx_page); rxr->rx_page = NULL; @@ -2900,6 +2912,9 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) struct pci_dev *pdev = bp->pdev; int i; + if (!rmem->pg_arr) + goto skip_pages; + for (i = 0; i < rmem->nr_pages; i++) { if (!rmem->pg_arr[i]) continue; @@ -2909,6 +2924,7 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) rmem->pg_arr[i] = NULL; } +skip_pages: if (rmem->pg_tbl) { size_t pg_tbl_size = rmem->nr_pages * 8; @@ -3228,10 +3244,14 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) { + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + kfree(cpr->cp_desc_ring); cpr->cp_desc_ring = NULL; + ring->ring_mem.pg_arr = NULL; kfree(cpr->cp_desc_mapping); cpr->cp_desc_mapping = NULL; + ring->ring_mem.dma_arr = NULL; } static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) @@ -12207,6 +12227,11 @@ static void bnxt_fw_reset_task(struct work_struct *work) return; } + if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && + bp->fw_health->enabled) { + bp->fw_health->last_fw_reset_cnt = + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + } bp->fw_reset_state = 0; /* Make sure fw_reset_state is 0 before clearing the flag */ smp_mb__before_atomic(); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 46fae1acbeed6..e6a4a768b10b2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1884,9 +1884,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev) { struct bnxt_flower_indr_block_cb_priv *cb_priv; - /* All callback list access should be protected by RTNL. */ - ASSERT_RTNL(); - list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list) if (cb_priv->tunnel_netdev == netdev) return cb_priv; diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c index 8b7b59908a1ab..f66d22de5168d 100644 --- a/drivers/net/ethernet/cadence/macb_pci.c +++ b/drivers/net/ethernet/cadence/macb_pci.c @@ -111,9 +111,9 @@ static void macb_remove(struct pci_dev *pdev) struct platform_device *plat_dev = pci_get_drvdata(pdev); struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev); - platform_device_unregister(plat_dev); clk_unregister(plat_data->pclk); clk_unregister(plat_data->hclk); + platform_device_unregister(plat_dev); } static const struct pci_device_id dev_id_table[] = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 22af3d6ce1783..adc54a7266612 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -61,6 +61,9 @@ static unsigned int tx_sgl = 1; module_param(tx_sgl, uint, 0600); MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping"); +static bool page_pool_enabled = true; +module_param(page_pool_enabled, bool, 0400); + #define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \ sizeof(struct sg_table)) #define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \ @@ -73,6 +76,7 @@ MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to opt #define HNS3_OUTER_VLAN_TAG 2 #define HNS3_MIN_TX_LEN 33U +#define HNS3_MIN_TUN_PKT_LEN 65U /* hns3_pci_tbl - PCI Device ID Table * @@ -1424,8 +1428,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, l4.tcp->doff); break; case IPPROTO_UDP: - if (hns3_tunnel_csum_bug(skb)) - return skb_checksum_help(skb); + if (hns3_tunnel_csum_bug(skb)) { + int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN); + + return ret ? ret : skb_checksum_help(skb); + } hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, @@ -4753,7 +4760,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) goto out_with_desc_cb; if (!HNAE3_IS_TX_RING(ring)) { - hns3_alloc_page_pool(ring); + if (page_pool_enabled) + hns3_alloc_page_pool(ring); ret = hns3_alloc_ring_buffers(ring); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 68ed1715ac528..87d96f82c3182 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -1724,6 +1724,10 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len) } bd_num = le32_to_cpu(req->bd_num); + if (!bd_num) { + dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n"); + return -EINVAL; + } desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); if (!desc_src) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index e55ba2e511b1a..f1e46ba799f9d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1528,9 +1528,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) static int hclge_configure(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + const struct cpumask *cpumask = cpu_online_mask; struct hclge_cfg cfg; unsigned int i; - int ret; + int node, ret; ret = hclge_get_cfg(hdev, &cfg); if (ret) @@ -1595,11 +1596,12 @@ static int hclge_configure(struct hclge_dev *hdev) hclge_init_kdump_kernel_config(hdev); - /* Set the init affinity based on pci func number */ - i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev))); - i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0; - cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)), - &hdev->affinity_mask); + /* Set the affinity based on numa node */ + node = dev_to_node(&hdev->pdev->dev); + if (node != NUMA_NO_NODE) + cpumask = cpumask_of_node(node); + + cpumask_copy(&hdev->affinity_mask, cpumask); return ret; } @@ -8125,11 +8127,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle) hclge_clear_arfs_rules(hdev); spin_unlock_bh(&hdev->fd_rule_lock); - /* If it is not PF reset, the firmware will disable the MAC, + /* If it is not PF reset or FLR, the firmware will disable the MAC, * so it only need to stop phy here. */ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && - hdev->reset_type != HNAE3_FUNC_RESET) { + hdev->reset_type != HNAE3_FUNC_RESET && + hdev->reset_type != HNAE3_FLR_RESET) { hclge_mac_stop_phy(hdev); hclge_update_link_status(hdev); return; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 82e7270201203..a69e892277b31 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2465,6 +2465,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) hclgevf_enable_vector(&hdev->misc_vector, false); event_cause = hclgevf_check_evt_cause(hdev, &clearval); + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) + hclgevf_clear_event_cause(hdev, clearval); switch (event_cause) { case HCLGEVF_VECTOR0_EVENT_RST: @@ -2477,10 +2479,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) break; } - if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { - hclgevf_clear_event_cause(hdev, clearval); + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) hclgevf_enable_vector(&hdev->misc_vector, true); - } return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c index b8a40146b8958..b482f6f633bd5 100644 --- a/drivers/net/ethernet/i825xx/82596.c +++ b/drivers/net/ethernet/i825xx/82596.c @@ -1144,7 +1144,7 @@ static struct net_device * __init i82596_probe(void) err = -ENODEV; goto out; } - memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */ + memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */ dev->base_addr = MVME_I596_BASE; dev->irq = (unsigned) MVME16x_IRQ_I596; goto found; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index a775c69e4fd7f..a4579b3401204 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4700,6 +4700,22 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, return 0; } + if (adapter->failover_pending) { + adapter->init_done_rc = -EAGAIN; + netdev_dbg(netdev, "Failover pending, ignoring login response\n"); + complete(&adapter->init_done); + /* login response buffer will be released on reset */ + return 0; + } + + if (adapter->failover_pending) { + adapter->init_done_rc = -EAGAIN; + netdev_dbg(netdev, "Failover pending, ignoring login response\n"); + complete(&adapter->init_done); + /* login response buffer will be released on reset */ + return 0; + } + netdev->mtu = adapter->req_mtu - ETH_HLEN; netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index eadcb99583464..3c4f08d20414e 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -695,6 +695,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf) { if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { set_bit(ICE_FLAG_RDMA_ENA, pf->flags); + set_bit(ICE_FLAG_AUX_ENA, pf->flags); ice_plug_aux_dev(pf); } } @@ -707,5 +708,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf) { ice_unplug_aux_dev(pf); clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); + clear_bit(ICE_FLAG_AUX_ENA, pf->flags); } #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index 1f2afdf6cd483..adcc9a251595a 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -271,6 +271,12 @@ int ice_plug_aux_dev(struct ice_pf *pf) struct auxiliary_device *adev; int ret; + /* if this PF doesn't support a technology that requires auxiliary + * devices, then gracefully exit + */ + if (!ice_is_aux_ena(pf)) + return 0; + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); if (!iadev) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index b877efae61dfa..0e19b4d02e628 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6350,7 +6350,9 @@ static int igc_probe(struct pci_dev *pdev, if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= netdev->features; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index e84287ffc7cea..dcf9f27ba2efd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -658,11 +658,10 @@ static const struct devlink_param enable_rdma_param = static int mlx5_devlink_rdma_param_register(struct devlink *devlink) { - struct mlx5_core_dev *dev = devlink_priv(devlink); union devlink_param_value value; int err; - if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev)) + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) return 0; err = devlink_param_register(devlink, &enable_rdma_param); @@ -679,9 +678,7 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink) static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink) { - struct mlx5_core_dev *dev = devlink_priv(devlink); - - if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev)) + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) return; devlink_param_unpublish(devlink, &enable_rdma_param); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 3f8a98093f8cb..f9cf9fb315479 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn); if (err) { mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err); - return err; + goto err_cancel_work; } err = mlx5_fw_tracer_create_mkey(tracer); @@ -1031,6 +1031,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) mlx5_core_destroy_mkey(dev, &tracer->buff.mkey); err_dealloc_pd: mlx5_core_dealloc_pd(dev, tracer->buff.pdn); +err_cancel_work: cancel_work_sync(&tracer->read_fw_strings_work); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 669a75f3537a6..7b8c8187543a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -922,7 +922,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work); int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr); -int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val); +int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter); int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index 0c38c2e319be9..b5ddaa82755f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -137,7 +137,7 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr u16 vport_num, esw_owner_vhca_id; struct netlink_ext_ack *extack; int ifindex = upper->ifindex; - int err; + int err = 0; if (!netif_is_bridge_master(upper)) return 0; @@ -244,7 +244,7 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev, struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info); const struct switchdev_attr *attr = port_attr_info->attr; u16 vport_num, esw_owner_vhca_id; - int err; + int err = 0; if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, &esw_owner_vhca_id)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index 51a4d80f7fa34..de03684528bbf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -300,9 +300,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv, { struct mlx5e_rep_indr_block_priv *cb_priv; - /* All callback list access should be protected by RTNL. */ - ASSERT_RTNL(); - list_for_each_entry(cb_priv, &rpriv->uplink_priv.tc_indr_block_priv_list, list) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c index bf0313e2682ba..13056cb9757d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -572,7 +572,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann if (res->features & MLX5E_RX_RES_FEATURE_PTP) { u32 rqn; - if (mlx5e_channels_get_ptp_rqn(chs, &rqn)) + if (!mlx5e_channels_get_ptp_rqn(chs, &rqn)) rqn = res->drop_rqn; err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 2cfd12953909b..306fb5d6a36d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1884,7 +1884,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) return set_pflag_cqe_based_moder(netdev, enable, true); } -int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val) +int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val, bool rx_filter) { bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS); struct mlx5e_params new_params; @@ -1896,8 +1896,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val if (curr_val == new_val) return 0; - if (new_val && !priv->profile->rx_ptp_support && - priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) { + if (new_val && !priv->profile->rx_ptp_support && rx_filter) { netdev_err(priv->netdev, "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n"); return -EINVAL; @@ -1905,7 +1904,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val new_params = priv->channels.params; MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); - if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) + if (rx_filter) new_params.ptp_rx = new_val; if (new_params.ptp_rx == priv->channels.params.ptp_rx) @@ -1928,12 +1927,14 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + bool rx_filter; int err; if (!MLX5_CAP_GEN(mdev, cqe_compression)) return -EOPNOTSUPP; - err = mlx5e_modify_rx_cqe_compression_locked(priv, enable); + rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE; + err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 47efd858964d6..3fd515e7bf302 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3554,14 +3554,14 @@ static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filte if (!rx_filter) /* Reset CQE compression to Admin default */ - return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def); + return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false); if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) return 0; /* Disable CQE compression */ netdev_warn(priv->netdev, "Disabling RX cqe compression\n"); - err = mlx5e_modify_rx_cqe_compression_locked(priv, false); + err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true); if (err) netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 9fe8e3c204d63..fe501ba88bea9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1682,14 +1682,13 @@ static int build_match_list(struct match_list *match_head, curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); if (!curr_match) { + rcu_read_unlock(); free_match_list(match_head, ft_locked); - err = -ENOMEM; - goto out; + return -ENOMEM; } curr_match->g = g; list_add_tail(&curr_match->list, &match_head->list); } -out: rcu_read_unlock(); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 49ca57c6d31dc..ca5690b0a7abb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -927,9 +927,12 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev) struct mlx5_core_dev *dev1; struct mlx5_lag *ldev; + ldev = mlx5_lag_dev(dev); + if (!ldev) + return; + mlx5_dev_list_lock(); - ldev = mlx5_lag_dev(dev); dev0 = ldev->pf[MLX5_LAG_P1].dev; dev1 = ldev->pf[MLX5_LAG_P2].dev; @@ -946,8 +949,11 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev) { struct mlx5_lag *ldev; - mlx5_dev_list_lock(); ldev = mlx5_lag_dev(dev); + if (!ldev) + return; + + mlx5_dev_list_lock(); ldev->mode_changes_in_progress--; mlx5_dev_list_unlock(); mlx5_queue_bond_work(ldev, 0); diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index 3e85b17f58573..6704f5c1aa32e 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -142,6 +142,13 @@ static int mlxbf_gige_open(struct net_device *netdev) err = mlxbf_gige_clean_port(priv); if (err) goto free_irqs; + + /* Clear driver's valid_polarity to match hardware, + * since the above call to clean_port() resets the + * receive polarity used by hardware. + */ + priv->valid_polarity = 0; + err = mlxbf_gige_rx_init(priv); if (err) goto free_irqs; diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index c1310ea1c216f..d5c485a6d2845 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -398,9 +398,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth, int err; u16 i; - dma_buf = kzalloc(sizeof(*dma_buf) + - q_depth * sizeof(struct hwc_work_request), - GFP_KERNEL); + dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL); if (!dma_buf) return -ENOMEM; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 556c3495211da..64c0ef57ad426 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1767,9 +1767,6 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app, struct nfp_flower_indr_block_cb_priv *cb_priv; struct nfp_flower_priv *priv = app->priv; - /* All callback list access should be protected by RTNL. */ - ASSERT_RTNL(); - list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list) if (cb_priv->netdev == netdev) return cb_priv; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 6e5a6cc97d0eb..24cd415677756 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -3367,6 +3367,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, struct qed_nvm_image_att *p_image_att) { enum nvm_image_type type; + int rc; u32 i; /* Translate image_id into MFW definitions */ @@ -3395,7 +3396,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, return -EINVAL; } - qed_mcp_nvm_info_populate(p_hwfn); + rc = qed_mcp_nvm_info_populate(p_hwfn); + if (rc) + return rc; + for (i = 0; i < p_hwfn->nvm_info.num_images; i++) if (type == p_hwfn->nvm_info.image_att[i].image_type) break; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 0a2f34fc8b24a..27dffa299ca6f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1354,10 +1354,10 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; const struct firmware *fw = fw_info->fw; u32 dest, *p_cache, *temp; - int i, ret = -EIO; __le32 *temp_le; u8 data[16]; size_t size; + int i, ret; u64 addr; temp = vzalloc(fw->size); diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 4b2eca5e08e29..01ef5efd7bc2a 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -119,6 +119,8 @@ #define PHY_ST 0x8A /* PHY status register */ #define MAC_SM 0xAC /* MAC status machine */ #define MAC_SM_RST 0x0002 /* MAC status machine reset */ +#define MD_CSC 0xb6 /* MDC speed control register */ +#define MD_CSC_DEFAULT 0x0030 #define MAC_ID 0xBE /* Identifier register */ #define TX_DCNT 0x80 /* TX descriptor count */ @@ -355,8 +357,9 @@ static void r6040_reset_mac(struct r6040_private *lp) { void __iomem *ioaddr = lp->base; int limit = MAC_DEF_TIMEOUT; - u16 cmd; + u16 cmd, md_csc; + md_csc = ioread16(ioaddr + MD_CSC); iowrite16(MAC_RST, ioaddr + MCR1); while (limit--) { cmd = ioread16(ioaddr + MCR1); @@ -368,6 +371,10 @@ static void r6040_reset_mac(struct r6040_private *lp) iowrite16(MAC_SM_RST, ioaddr + MAC_SM); iowrite16(0, ioaddr + MAC_SM); mdelay(5); + + /* Restore MDIO clock frequency */ + if (md_csc != MD_CSC_DEFAULT) + iowrite16(md_csc, ioaddr + MD_CSC); } static void r6040_init_mac_regs(struct net_device *dev) diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index e5b0d795c3012..3dbea028b325c 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -166,32 +166,46 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, * We need a channel per event queue, plus a VI per tx queue. * This may be more pessimistic than it needs to be. */ - if (n_channels + n_xdp_ev > max_channels) { - netif_err(efx, drv, efx->net_dev, - "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", - n_xdp_ev, n_channels, max_channels); - netif_err(efx, drv, efx->net_dev, - "XDP_TX and XDP_REDIRECT will not work on this interface"); - efx->n_xdp_channels = 0; - efx->xdp_tx_per_channel = 0; - efx->xdp_tx_queue_count = 0; + if (n_channels >= max_channels) { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; + netif_warn(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", + n_xdp_ev, n_channels, max_channels); + netif_warn(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT might decrease device's performance\n"); } else if (n_channels + n_xdp_tx > efx->max_vis) { - netif_err(efx, drv, efx->net_dev, - "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", - n_xdp_tx, n_channels, efx->max_vis); - netif_err(efx, drv, efx->net_dev, - "XDP_TX and XDP_REDIRECT will not work on this interface"); - efx->n_xdp_channels = 0; - efx->xdp_tx_per_channel = 0; - efx->xdp_tx_queue_count = 0; + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; + netif_warn(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", + n_xdp_tx, n_channels, efx->max_vis); + netif_warn(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT might decrease device's performance\n"); + } else if (n_channels + n_xdp_ev > max_channels) { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED; + netif_warn(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", + n_xdp_ev, n_channels, max_channels); + + n_xdp_ev = max_channels - n_channels; + netif_warn(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n", + DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev)); } else { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED; + } + + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) { efx->n_xdp_channels = n_xdp_ev; efx->xdp_tx_per_channel = tx_per_ev; efx->xdp_tx_queue_count = n_xdp_tx; n_channels += n_xdp_ev; netif_dbg(efx, drv, efx->net_dev, "Allocating %d TX and %d event queues for XDP\n", - n_xdp_tx, n_xdp_ev); + n_xdp_ev * tx_per_ev, n_xdp_ev); + } else { + efx->n_xdp_channels = 0; + efx->xdp_tx_per_channel = 0; + efx->xdp_tx_queue_count = n_xdp_tx; } if (vec_count < n_channels) { @@ -858,6 +872,20 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) goto out; } +static inline int +efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, + struct efx_tx_queue *tx_queue) +{ + if (xdp_queue_number >= efx->xdp_tx_queue_count) + return -EINVAL; + + netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", + tx_queue->channel->channel, tx_queue->label, + xdp_queue_number, tx_queue->queue); + efx->xdp_tx_queues[xdp_queue_number] = tx_queue; + return 0; +} + int efx_set_channels(struct efx_nic *efx) { struct efx_tx_queue *tx_queue; @@ -896,20 +924,9 @@ int efx_set_channels(struct efx_nic *efx) if (efx_channel_is_xdp_tx(channel)) { efx_for_each_channel_tx_queue(tx_queue, channel) { tx_queue->queue = next_queue++; - - /* We may have a few left-over XDP TX - * queues owing to xdp_tx_queue_count - * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL. - * We still allocate and probe those - * TXQs, but never use them. - */ - if (xdp_queue_number < efx->xdp_tx_queue_count) { - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", - channel->channel, tx_queue->label, - xdp_queue_number, tx_queue->queue); - efx->xdp_tx_queues[xdp_queue_number] = tx_queue; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); + if (rc == 0) xdp_queue_number++; - } } } else { efx_for_each_channel_tx_queue(tx_queue, channel) { @@ -918,10 +935,35 @@ int efx_set_channels(struct efx_nic *efx) channel->channel, tx_queue->label, tx_queue->queue); } + + /* If XDP is borrowing queues from net stack, it must use the queue + * with no csum offload, which is the first one of the channel + * (note: channel->tx_queue_by_type is not initialized yet) + */ + if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { + tx_queue = &channel->tx_queue[0]; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); + if (rc == 0) + xdp_queue_number++; + } } } } - WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count); + WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number != efx->xdp_tx_queue_count); + WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number > efx->xdp_tx_queue_count); + + /* If we have more CPUs than assigned XDP TX queues, assign the already + * existing queues to the exceeding CPUs + */ + next_queue = 0; + while (xdp_queue_number < efx->xdp_tx_queue_count) { + tx_queue = efx->xdp_tx_queues[next_queue++]; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); + if (rc == 0) + xdp_queue_number++; + } rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); if (rc) diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 9b4b257042710..f6981810039d0 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -782,6 +782,12 @@ struct efx_async_filter_insertion { #define EFX_RPS_MAX_IN_FLIGHT 8 #endif /* CONFIG_RFS_ACCEL */ +enum efx_xdp_tx_queues_mode { + EFX_XDP_TX_QUEUES_DEDICATED, /* one queue per core, locking not needed */ + EFX_XDP_TX_QUEUES_SHARED, /* each queue used by more than 1 core */ + EFX_XDP_TX_QUEUES_BORROWED /* queues borrowed from net stack */ +}; + /** * struct efx_nic - an Efx NIC * @name: Device name (net device name or bus id before net device registered) @@ -820,6 +826,7 @@ struct efx_async_filter_insertion { * should be allocated for this NIC * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues. * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit. + * @xdp_txq_queues_mode: XDP TX queues sharing strategy. * @rxq_entries: Size of receive queues requested by user. * @txq_entries: Size of transmit queues requested by user. * @txq_stop_thresh: TX queue fill level at or above which we stop it. @@ -979,6 +986,7 @@ struct efx_nic { unsigned int xdp_tx_queue_count; struct efx_tx_queue **xdp_tx_queues; + enum efx_xdp_tx_queues_mode xdp_txq_queues_mode; unsigned rxq_entries; unsigned txq_entries; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 0c6650d2e2399..d16e031e95f44 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -428,23 +428,32 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, unsigned int len; int space; int cpu; - int i; + int i = 0; - cpu = raw_smp_processor_id(); + if (unlikely(n && !xdpfs)) + return -EINVAL; + if (unlikely(!n)) + return 0; - if (!efx->xdp_tx_queue_count || - unlikely(cpu >= efx->xdp_tx_queue_count)) + cpu = raw_smp_processor_id(); + if (unlikely(cpu >= efx->xdp_tx_queue_count)) return -EINVAL; tx_queue = efx->xdp_tx_queues[cpu]; if (unlikely(!tx_queue)) return -EINVAL; - if (unlikely(n && !xdpfs)) - return -EINVAL; + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) + HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu); - if (!n) - return 0; + /* If we're borrowing net stack queues we have to handle stop-restart + * or we might block the queue and it will be considered as frozen + */ + if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { + if (netif_tx_queue_stopped(tx_queue->core_txq)) + goto unlock; + efx_tx_maybe_stop_queue(tx_queue); + } /* Check for available space. We should never need multiple * descriptors per frame. @@ -484,6 +493,10 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, if (flush && i > 0) efx_nic_push_buffers(tx_queue); +unlock: + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) + HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq); + return i == 0 ? -EIO : i; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ece02b35a6cec..553c4403258aa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -309,7 +309,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) priv->clk_csr = STMMAC_CSR_100_150M; else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) priv->clk_csr = STMMAC_CSR_150_250M; - else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) + else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) priv->clk_csr = STMMAC_CSR_250_300M; } @@ -7118,7 +7118,6 @@ int stmmac_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); u32 chan; - int ret; if (!ndev || !netif_running(ndev)) return 0; @@ -7150,13 +7149,6 @@ int stmmac_suspend(struct device *dev) } else { stmmac_mac_set(priv, priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); - /* Disable clock in case of PWM is off */ - clk_disable_unprepare(priv->plat->clk_ptp_ref); - ret = pm_runtime_force_suspend(dev); - if (ret) { - mutex_unlock(&priv->lock); - return ret; - } } mutex_unlock(&priv->lock); @@ -7242,12 +7234,6 @@ int stmmac_resume(struct device *dev) priv->irq_wake = 0; } else { pinctrl_pm_select_default_state(priv->device); - /* enable the clk previously disabled */ - ret = pm_runtime_force_resume(dev); - if (ret) - return ret; - if (priv->plat->clk_ptp_ref) - clk_prepare_enable(priv->plat->clk_ptp_ref); /* reset the phy so that it's ready */ if (priv->mii) stmmac_mdio_reset(priv->mii); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 5ca710844cc1e..62cec9bfcd337 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -9,6 +9,7 @@ *******************************************************************************/ #include +#include #include #include #include @@ -771,9 +772,52 @@ static int __maybe_unused stmmac_runtime_resume(struct device *dev) return stmmac_bus_clks_config(priv, true); } +static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + int ret; + + if (!netif_running(ndev)) + return 0; + + if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { + /* Disable clock in case of PWM is off */ + clk_disable_unprepare(priv->plat->clk_ptp_ref); + + ret = pm_runtime_force_suspend(dev); + if (ret) + return ret; + } + + return 0; +} + +static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + int ret; + + if (!netif_running(ndev)) + return 0; + + if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { + /* enable the clk previously disabled */ + ret = pm_runtime_force_resume(dev); + if (ret) + return ret; + + clk_prepare_enable(priv->plat->clk_ptp_ref); + } + + return 0; +} + const struct dev_pm_ops stmmac_pltfr_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume) SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume) }; EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 8fe8887d506a3..6192244b304ab 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -68,9 +68,9 @@ #define SIXP_DAMA_OFF 0 /* default level 2 parameters */ -#define SIXP_TXDELAY (HZ/4) /* in 1 s */ +#define SIXP_TXDELAY 25 /* 250 ms */ #define SIXP_PERSIST 50 /* in 256ths */ -#define SIXP_SLOTTIME (HZ/10) /* in 1 s */ +#define SIXP_SLOTTIME 10 /* 100 ms */ #define SIXP_INIT_RESYNC_TIMEOUT (3*HZ/2) /* in 1 s */ #define SIXP_RESYNC_TIMEOUT 5*HZ /* in 1 s */ diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index b50b7fafd8d6c..f4c3efc3e0745 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -973,7 +973,7 @@ static inline void tx_on(struct scc_priv *priv) flags = claim_dma_lock(); set_dma_mode(priv->param.dma, DMA_MODE_WRITE); set_dma_addr(priv->param.dma, - (int) priv->tx_buf[priv->tx_tail] + n); + virt_to_bus(priv->tx_buf[priv->tx_tail]) + n); set_dma_count(priv->param.dma, priv->tx_len[priv->tx_tail] - n); release_dma_lock(flags); @@ -1020,7 +1020,7 @@ static inline void rx_on(struct scc_priv *priv) flags = claim_dma_lock(); set_dma_mode(priv->param.dma, DMA_MODE_READ); set_dma_addr(priv->param.dma, - (int) priv->rx_buf[priv->rx_head]); + virt_to_bus(priv->rx_buf[priv->rx_head])); set_dma_count(priv->param.dma, BUF_SIZE); release_dma_lock(flags); enable_dma(priv->param.dma); @@ -1233,7 +1233,7 @@ static void special_condition(struct scc_priv *priv, int rc) if (priv->param.dma >= 0) { flags = claim_dma_lock(); set_dma_addr(priv->param.dma, - (int) priv->rx_buf[priv->rx_head]); + virt_to_bus(priv->rx_buf[priv->rx_head])); set_dma_count(priv->param.dma, BUF_SIZE); release_dma_lock(flags); } else { diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index 2324e1b93e37d..1da334f54944a 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -430,7 +430,8 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter, * table region determines the number of entries it has. */ if (filter) { - count = hweight32(ipa->filter_map); + /* Include one extra "slot" to hold the filter map itself */ + count = 1 + hweight32(ipa->filter_map); hash_count = hash_mem->size ? count : 0; } else { count = mem->size / sizeof(__le64); diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h index 21aa24c741b96..daae7fa58fb82 100644 --- a/drivers/net/phy/dp83640_reg.h +++ b/drivers/net/phy/dp83640_reg.h @@ -5,7 +5,7 @@ #ifndef HAVE_DP83640_REGISTERS #define HAVE_DP83640_REGISTERS -#define PAGE0 0x0000 +/* #define PAGE0 0x0000 */ #define PHYCR2 0x001c /* PHY Control Register 2 */ #define PAGE4 0x0004 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 9e2891d8e8dd9..ba5ad86ec8261 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -233,9 +233,11 @@ static DEFINE_MUTEX(phy_fixup_lock); static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) { + struct device_driver *drv = phydev->mdio.dev.driver; + struct phy_driver *phydrv = to_phy_driver(drv); struct net_device *netdev = phydev->attached_dev; - if (!phydev->drv->suspend) + if (!drv || !phydrv->suspend) return false; /* PHY not attached? May suspend if the PHY has not already been diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index a1464b764d4dc..0a0abe8e4be0b 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1607,6 +1607,32 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, if (config.an_enabled && phylink_is_empty_linkmode(config.advertising)) return -EINVAL; + /* If this link is with an SFP, ensure that changes to advertised modes + * also cause the associated interface to be selected such that the + * link can be configured correctly. + */ + if (pl->sfp_port && pl->sfp_bus) { + config.interface = sfp_select_interface(pl->sfp_bus, + config.advertising); + if (config.interface == PHY_INTERFACE_MODE_NA) { + phylink_err(pl, + "selection of interface failed, advertisement %*pb\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, + config.advertising); + return -EINVAL; + } + + /* Revalidate with the selected interface */ + linkmode_copy(support, pl->supported); + if (phylink_validate(pl, support, &config)) { + phylink_err(pl, "validation of %s/%s with support %*pb failed\n", + phylink_an_mode_str(pl->cur_link_an_mode), + phy_modes(config.interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, support); + return -EINVAL; + } + } + mutex_lock(&pl->state_mutex); pl->link_config.speed = config.speed; pl->link_config.duplex = config.duplex; @@ -2186,7 +2212,9 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode, if (phy_interface_mode_is_8023z(iface) && pl->phydev) return -EINVAL; - changed = !linkmode_equal(pl->supported, support); + changed = !linkmode_equal(pl->supported, support) || + !linkmode_equal(pl->link_config.advertising, + config.advertising); if (changed) { linkmode_copy(pl->supported, support); linkmode_copy(pl->link_config.advertising, config.advertising); diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index f6b92efffc94c..480bcd1f6c1cb 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -34,6 +34,8 @@ obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o clean-files := wanxlfw.inc $(obj)/wanxl.o: $(obj)/wanxlfw.inc +CROSS_COMPILE_M68K = m68k-linux-gnu- + ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y) ifeq ($(ARCH),m68k) M68KCC = $(CC) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 7efb31b87f37a..6600e138945e2 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3524,7 +3524,9 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, lockdep_assert_held(&subsys->lock); list_for_each_entry(h, &subsys->nsheads, entry) { - if (h->ns_id == nsid && nvme_tryget_ns_head(h)) + if (h->ns_id != nsid) + continue; + if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) return h; } @@ -3843,6 +3845,10 @@ static void nvme_ns_remove(struct nvme_ns *ns) mutex_lock(&ns->ctrl->subsys->lock); list_del_rcu(&ns->siblings); + if (list_empty(&ns->head->list)) { + list_del_init(&ns->head->entry); + last_path = true; + } mutex_unlock(&ns->ctrl->subsys->lock); /* guarantee not available in head->list */ @@ -3856,20 +3862,11 @@ static void nvme_ns_remove(struct nvme_ns *ns) nvme_cdev_del(&ns->cdev, &ns->cdev_device); del_gendisk(ns->disk); blk_cleanup_queue(ns->queue); - if (blk_get_integrity(ns->disk)) - blk_integrity_unregister(ns->disk); down_write(&ns->ctrl->namespaces_rwsem); list_del_init(&ns->list); up_write(&ns->ctrl->namespaces_rwsem); - /* Synchronize with nvme_init_ns_head() */ - mutex_lock(&ns->head->subsys->lock); - if (list_empty(&ns->head->list)) { - list_del_init(&ns->head->entry); - last_path = true; - } - mutex_unlock(&ns->head->subsys->lock); if (last_path) nvme_mpath_shutdown_disk(ns->head); nvme_put_ns(ns); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 5d7bc58a27bd9..e8ccdd398f784 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -600,14 +600,17 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { - unsigned nsid = le32_to_cpu(desc->nsids[n]); - + unsigned nsid; +again: + nsid = le32_to_cpu(desc->nsids[n]); if (ns->head->ns_id < nsid) continue; if (ns->head->ns_id == nsid) nvme_update_ns_ana_state(desc, ns); if (++n == nr_nsids) break; + if (ns->head->ns_id > nsid) + goto again; } up_read(&ctrl->namespaces_rwsem); return 0; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index a68704e39084e..042c594bc57e2 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -656,8 +656,8 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) return; - nvme_rdma_destroy_queue_ib(queue); rdma_destroy_id(queue->cm_id); + nvme_rdma_destroy_queue_ib(queue); mutex_destroy(&queue->queue_lock); } @@ -1815,14 +1815,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue) for (i = 0; i < queue->queue_size; i++) { ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); if (ret) - goto out_destroy_queue_ib; + return ret; } return 0; - -out_destroy_queue_ib: - nvme_rdma_destroy_queue_ib(queue); - return ret; } static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, @@ -1916,14 +1912,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) if (ret) { dev_err(ctrl->ctrl.device, "rdma_connect_locked failed (%d).\n", ret); - goto out_destroy_queue_ib; + return ret; } return 0; - -out_destroy_queue_ib: - nvme_rdma_destroy_queue_ib(queue); - return ret; } static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, @@ -1954,8 +1946,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_UNREACHABLE: - nvme_rdma_destroy_queue_ib(queue); - fallthrough; case RDMA_CM_EVENT_ADDR_ERROR: dev_dbg(queue->ctrl->ctrl.device, "CM error event %d\n", ev->event); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index e2ab12f3f51c9..e4249b7dc0568 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -274,6 +274,12 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) } while (ret > 0); } +static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) +{ + return !list_empty(&queue->send_list) || + !llist_empty(&queue->req_list) || queue->more_requests; +} + static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, bool sync, bool last) { @@ -294,9 +300,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, nvme_tcp_send_all(queue); queue->more_requests = false; mutex_unlock(&queue->send_mutex); - } else if (last) { - queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); } + + if (last && nvme_tcp_queue_more(queue)) + queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); } static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue) @@ -906,12 +913,6 @@ static void nvme_tcp_state_change(struct sock *sk) read_unlock_bh(&sk->sk_callback_lock); } -static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) -{ - return !list_empty(&queue->send_list) || - !llist_empty(&queue->req_list) || queue->more_requests; -} - static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) { queue->request = NULL; @@ -1145,8 +1146,7 @@ static void nvme_tcp_io_work(struct work_struct *w) pending = true; else if (unlikely(result < 0)) break; - } else - pending = !llist_empty(&queue->req_list); + } result = nvme_tcp_try_recv(queue); if (result > 0) diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index d784f3c200b4c..be5d82421e3a4 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1067,7 +1067,7 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item, { struct nvmet_subsys *subsys = to_subsys(item); - return snprintf(page, PAGE_SIZE, "%*s\n", + return snprintf(page, PAGE_SIZE, "%.*s\n", NVMET_SN_MAX_SIZE, subsys->serial); } diff --git a/drivers/of/device.c b/drivers/of/device.c index 5b043ee308240..b0800c260f64a 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -85,7 +85,11 @@ of_dma_set_restricted_buffer(struct device *dev, struct device_node *np) break; } - if (i != count && of_reserved_mem_device_init_by_idx(dev, of_node, i)) + /* + * Attempt to initialize a restricted-dma-pool region if one was found. + * Note that count can hold a negative error code. + */ + if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i)) dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n"); } diff --git a/drivers/of/property.c b/drivers/of/property.c index 3fd74bb34819b..a3483484a5a2a 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -1291,7 +1291,6 @@ DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells") DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells") DEFINE_SIMPLE_PROP(leds, "leds", NULL) DEFINE_SIMPLE_PROP(backlight, "backlight", NULL) -DEFINE_SIMPLE_PROP(phy_handle, "phy-handle", NULL) DEFINE_SUFFIX_PROP(regulators, "-supply", NULL) DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells") @@ -1380,7 +1379,6 @@ static const struct supplier_bindings of_supplier_bindings[] = { { .parse_prop = parse_resets, }, { .parse_prop = parse_leds, }, { .parse_prop = parse_backlight, }, - { .parse_prop = parse_phy_handle, }, { .parse_prop = parse_gpio_compat, }, { .parse_prop = parse_interrupts, }, { .parse_prop = parse_regulators, }, diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index a1b1e2a016329..0f40943a9a18a 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -937,7 +937,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev); void pci_set_acpi_fwnode(struct pci_dev *dev) { - if (!ACPI_COMPANION(&dev->dev) && !pci_dev_is_added(dev)) + if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev)) ACPI_COMPANION_SET(&dev->dev, acpi_pci_find_companion(&dev->dev)); } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index e5089af8ad90a..4537d1ea14fdc 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -5435,7 +5435,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); /* - * Create device link for NVIDIA GPU with integrated USB xHCI Host + * Create device link for GPUs with integrated USB xHCI Host * controller to VGA. */ static void quirk_gpu_usb(struct pci_dev *usb) @@ -5444,9 +5444,11 @@ static void quirk_gpu_usb(struct pci_dev *usb) } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb); /* - * Create device link for NVIDIA GPU with integrated Type-C UCSI controller + * Create device link for GPUs with integrated Type-C UCSI controller * to VGA. Currently there is no class code defined for UCSI device over PCI * so using UNKNOWN class for now and it will be updated when UCSI * over PCI gets a class code. @@ -5459,6 +5461,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi) DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_SERIAL_UNKNOWN, 8, quirk_gpu_usb_typec_ucsi); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, + PCI_CLASS_SERIAL_UNKNOWN, 8, + quirk_gpu_usb_typec_ucsi); /* * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c index 25557b272a4f9..4be24890132ed 100644 --- a/drivers/pci/vpd.c +++ b/drivers/pci/vpd.c @@ -99,6 +99,24 @@ static size_t pci_vpd_size(struct pci_dev *dev) return off ?: PCI_VPD_SZ_INVALID; } +static bool pci_vpd_available(struct pci_dev *dev) +{ + struct pci_vpd *vpd = &dev->vpd; + + if (!vpd->cap) + return false; + + if (vpd->len == 0) { + vpd->len = pci_vpd_size(dev); + if (vpd->len == PCI_VPD_SZ_INVALID) { + vpd->cap = 0; + return false; + } + } + + return true; +} + /* * Wait for last operation to complete. * This code has to spin since there is no other notification from the PCI @@ -145,7 +163,7 @@ static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count, loff_t end = pos + count; u8 *buf = arg; - if (!vpd->cap) + if (!pci_vpd_available(dev)) return -ENODEV; if (pos < 0) @@ -206,7 +224,7 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count, loff_t end = pos + count; int ret = 0; - if (!vpd->cap) + if (!pci_vpd_available(dev)) return -ENODEV; if (pos < 0 || (pos & 3) || (count & 3)) @@ -242,14 +260,11 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count, void pci_vpd_init(struct pci_dev *dev) { + if (dev->vpd.len == PCI_VPD_SZ_INVALID) + return; + dev->vpd.cap = pci_find_capability(dev, PCI_CAP_ID_VPD); mutex_init(&dev->vpd.lock); - - if (!dev->vpd.len) - dev->vpd.len = pci_vpd_size(dev); - - if (dev->vpd.len == PCI_VPD_SZ_INVALID) - dev->vpd.cap = 0; } static ssize_t vpd_read(struct file *filp, struct kobject *kobj, @@ -294,13 +309,14 @@ const struct attribute_group pci_dev_vpd_attr_group = { void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size) { - unsigned int len = dev->vpd.len; + unsigned int len; void *buf; int cnt; - if (!dev->vpd.cap) + if (!pci_vpd_available(dev)) return ERR_PTR(-ENODEV); + len = dev->vpd.len; buf = kmalloc(len, GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM); diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c index 1d78b455cc48c..e34face736f48 100644 --- a/drivers/regulator/max14577-regulator.c +++ b/drivers/regulator/max14577-regulator.c @@ -269,5 +269,3 @@ module_exit(max14577_regulator_exit); MODULE_AUTHOR("Krzysztof Kozlowski "); MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:max14577-regulator"); -MODULE_ALIAS("platform:max77836-regulator"); diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c index 6cca910a76ded..7f458d510483f 100644 --- a/drivers/regulator/qcom-rpmh-regulator.c +++ b/drivers/regulator/qcom-rpmh-regulator.c @@ -991,7 +991,7 @@ static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = { RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"), RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"), RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"), - RPMH_VREG("ldo7", "ldo%s6", &pmic5_pldo_lv, "vdd-l7"), + RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7"), {} }; diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index eb15067a605ef..4eb53412b8085 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -1047,7 +1047,9 @@ static void cmos_check_wkalrm(struct device *dev) * ACK the rtc irq here */ if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) { + local_irq_disable(); cmos_interrupt(0, (void *)cmos->rtc); + local_irq_enable(); return; } diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index 2f3515fa242a3..f3d5c7f4c13d2 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -45,13 +45,14 @@ static void __init sclp_early_facilities_detect(void) sclp.has_gisaf = !!(sccb->fac118 & 0x08); sclp.has_hvs = !!(sccb->fac119 & 0x80); sclp.has_kss = !!(sccb->fac98 & 0x01); - sclp.has_sipl = !!(sccb->cbl & 0x4000); if (sccb->fac85 & 0x02) S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; if (sccb->fac91 & 0x40) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST; if (sccb->cpuoff > 134) sclp.has_diag318 = !!(sccb->byte_134 & 0x80); + if (sccb->cpuoff > 137) + sclp.has_sipl = !!(sccb->cbl & 0x4000); sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; sclp.rzm <<= 20; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index f433428057d9c..d9b804943d192 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -213,7 +213,6 @@ static inline int ap_fetch_qci_info(struct ap_config_info *info) * ap_init_qci_info(): Allocate and query qci config info. * Does also update the static variables ap_max_domain_id * and ap_max_adapter_id if this info is available. - */ static void __init ap_init_qci_info(void) { @@ -439,6 +438,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) /** * ap_interrupt_handler() - Schedule ap_tasklet on interrupt * @airq: pointer to adapter interrupt descriptor + * @floating: ignored */ static void ap_interrupt_handler(struct airq_struct *airq, bool floating) { @@ -1786,6 +1786,7 @@ static inline void ap_scan_adapter(int ap) /** * ap_scan_bus(): Scan the AP bus for new devices * Runs periodically, workqueue timer (ap_config_time) + * @unused: Unused pointer. */ static void ap_scan_bus(struct work_struct *unused) { diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index d70c4d3d0907f..9ea48bf0ee40d 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -20,7 +20,7 @@ static void __ap_flush_queue(struct ap_queue *aq); /** * ap_queue_enable_irq(): Enable interrupt support on this AP queue. - * @qid: The AP queue number + * @aq: The AP queue * @ind: the notification indicator byte * * Enables interruption on AP queue via ap_aqic(). Based on the return @@ -311,7 +311,7 @@ static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq) /** * ap_sm_reset(): Reset an AP queue. - * @qid: The AP queue number + * @aq: The AP queue * * Submit the Reset command to an AP queue. */ diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 540861ca2ba37..553b6b9d02222 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -600,6 +600,12 @@ static int rockchip_spi_transfer_one( int ret; bool use_dma; + /* Zero length transfers won't trigger an interrupt on completion */ + if (!xfer->len) { + spi_finalize_current_transfer(ctlr); + return 1; + } + WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) && (readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY)); diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index ebd27f8830331..8ce840c7ecc82 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -204,9 +204,6 @@ struct tegra_slink_data { struct dma_async_tx_descriptor *tx_dma_desc; }; -static int tegra_slink_runtime_suspend(struct device *dev); -static int tegra_slink_runtime_resume(struct device *dev); - static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi, unsigned long reg) { @@ -1185,6 +1182,7 @@ static int tegra_slink_resume(struct device *dev) } #endif +#ifdef CONFIG_PM static int tegra_slink_runtime_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); @@ -1210,6 +1208,7 @@ static int tegra_slink_runtime_resume(struct device *dev) } return 0; } +#endif /* CONFIG_PM */ static const struct dev_pm_ops slink_pm_ops = { SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend, diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 3a249ee7e1440..28ef323882fb2 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -467,7 +467,7 @@ static void vhost_tx_batch(struct vhost_net *net, .num = nvq->batched_xdp, .ptr = nvq->xdp, }; - int err; + int i, err; if (nvq->batched_xdp == 0) goto signal_used; @@ -476,6 +476,15 @@ static void vhost_tx_batch(struct vhost_net *net, err = sock->ops->sendmsg(sock, msghdr, 0); if (unlikely(err < 0)) { vq_err(&nvq->vq, "Fail to batch sending packets\n"); + + /* free pages owned by XDP; since this is an unlikely error path, + * keep it simple and avoid more complex bulk update for the + * used pages + */ + for (i = 0; i < nvq->batched_xdp; ++i) + put_page(virt_to_head_page(nvq->xdp[i].data)); + nvq->batched_xdp = 0; + nvq->done_idx = 0; return; } diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index d33c5cd684c0b..b26b79dfcac92 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -582,7 +582,9 @@ config FB_HP300 config FB_TGA tristate "TGA/SFB+ framebuffer support" - depends on FB && (ALPHA || TC) + depends on FB + depends on PCI || TC + depends on ALPHA || TC select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 5f1ce59b44b95..a37eb52fb4010 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -214,7 +214,7 @@ config XEN_PVCALLS_FRONTEND implements them. config XEN_PVCALLS_BACKEND - bool "XEN PV Calls backend driver" + tristate "XEN PV Calls backend driver" depends on INET && XEN && XEN_BACKEND help Experimental backend for the Xen PV Calls protocol diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 671c71245a7b2..2d2803883306d 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -43,6 +43,8 @@ #include #include #include +#include +#include #include #include #include @@ -115,7 +117,7 @@ static struct ctl_table xen_root[] = { #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1) /* - * balloon_process() state: + * balloon_thread() state: * * BP_DONE: done or nothing to do, * BP_WAIT: wait to be rescheduled, @@ -130,6 +132,8 @@ enum bp_state { BP_ECANCELED }; +/* Main waiting point for xen-balloon thread. */ +static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq); static DEFINE_MUTEX(balloon_mutex); @@ -144,10 +148,6 @@ static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)]; static LIST_HEAD(ballooned_pages); static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); -/* Main work function, always executed in process context. */ -static void balloon_process(struct work_struct *work); -static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); - /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ @@ -366,7 +366,7 @@ static void xen_online_page(struct page *page, unsigned int order) static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v) { if (val == MEM_ONLINE) - schedule_delayed_work(&balloon_worker, 0); + wake_up(&balloon_thread_wq); return NOTIFY_OK; } @@ -491,18 +491,43 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) } /* - * As this is a work item it is guaranteed to run as a single instance only. + * Stop waiting if either state is not BP_EAGAIN and ballooning action is + * needed, or if the credit has changed while state is BP_EAGAIN. + */ +static bool balloon_thread_cond(enum bp_state state, long credit) +{ + if (state != BP_EAGAIN) + credit = 0; + + return current_credit() != credit || kthread_should_stop(); +} + +/* + * As this is a kthread it is guaranteed to run as a single instance only. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ -static void balloon_process(struct work_struct *work) +static int balloon_thread(void *unused) { enum bp_state state = BP_DONE; long credit; + unsigned long timeout; + + set_freezable(); + for (;;) { + if (state == BP_EAGAIN) + timeout = balloon_stats.schedule_delay * HZ; + else + timeout = 3600 * HZ; + credit = current_credit(); + wait_event_interruptible_timeout(balloon_thread_wq, + balloon_thread_cond(state, credit), timeout); + + if (kthread_should_stop()) + return 0; - do { mutex_lock(&balloon_mutex); credit = current_credit(); @@ -529,12 +554,7 @@ static void balloon_process(struct work_struct *work) mutex_unlock(&balloon_mutex); cond_resched(); - - } while (credit && state == BP_DONE); - - /* Schedule more work if there is some still to be done. */ - if (state == BP_EAGAIN) - schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); + } } /* Resets the Xen limit, sets new target, and kicks off processing. */ @@ -542,7 +562,7 @@ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ balloon_stats.target_pages = target; - schedule_delayed_work(&balloon_worker, 0); + wake_up(&balloon_thread_wq); } EXPORT_SYMBOL_GPL(balloon_set_new_target); @@ -647,7 +667,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages) /* The balloon may be too large now. Shrink it if needed. */ if (current_credit()) - schedule_delayed_work(&balloon_worker, 0); + wake_up(&balloon_thread_wq); mutex_unlock(&balloon_mutex); } @@ -679,6 +699,8 @@ static void __init balloon_add_region(unsigned long start_pfn, static int __init balloon_init(void) { + struct task_struct *task; + if (!xen_domain()) return -ENODEV; @@ -722,6 +744,12 @@ static int __init balloon_init(void) } #endif + task = kthread_run(balloon_thread, NULL, "xen-balloon"); + if (IS_ERR(task)) { + pr_err("xen-balloon thread could not be started, ballooning will not work!\n"); + return PTR_ERR(task); + } + /* Init the xen-balloon driver. */ xen_balloon_init(); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 643fe440c46e5..8c10edf9efe6a 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -106,27 +106,26 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr) static int xen_swiotlb_fixup(void *buf, unsigned long nslabs) { - int i, rc; - int dma_bits; + int rc; + unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT); + unsigned int i, dma_bits = order + PAGE_SHIFT; dma_addr_t dma_handle; phys_addr_t p = virt_to_phys(buf); - dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; + BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1)); + BUG_ON(nslabs % IO_TLB_SEGSIZE); i = 0; do { - int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE); - do { rc = xen_create_contiguous_region( - p + (i << IO_TLB_SHIFT), - get_order(slabs << IO_TLB_SHIFT), + p + (i << IO_TLB_SHIFT), order, dma_bits, &dma_handle); } while (rc && dma_bits++ < MAX_DMA_BITS); if (rc) return rc; - i += slabs; + i += IO_TLB_SEGSIZE; } while (i < nslabs); return 0; } @@ -153,9 +152,7 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err) return ""; } -#define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE) - -int __ref xen_swiotlb_init(void) +int xen_swiotlb_init(void) { enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; unsigned long bytes = swiotlb_size_or_default(); @@ -185,7 +182,7 @@ int __ref xen_swiotlb_init(void) order--; } if (!start) - goto error; + goto exit; if (order != get_order(bytes)) { pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", (PAGE_SIZE << order) >> 20); @@ -208,15 +205,15 @@ int __ref xen_swiotlb_init(void) swiotlb_set_max_segment(PAGE_SIZE); return 0; error: - if (repeat--) { + if (nslabs > 1024 && repeat--) { /* Min is 2MB */ - nslabs = max(1024UL, (nslabs >> 1)); - pr_info("Lowering to %luMB\n", - (nslabs << IO_TLB_SHIFT) >> 20); + nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE)); + bytes = nslabs << IO_TLB_SHIFT; + pr_info("Lowering to %luMB\n", bytes >> 20); goto retry; } +exit: pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc); - free_pages((unsigned long)start, order); return rc; } @@ -244,9 +241,9 @@ void __init xen_swiotlb_init_early(void) rc = xen_swiotlb_fixup(start, nslabs); if (rc) { memblock_free(__pa(start), PAGE_ALIGN(bytes)); - if (repeat--) { + if (nslabs > 1024 && repeat--) { /* Min is 2MB */ - nslabs = max(1024UL, (nslabs >> 1)); + nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE)); bytes = nslabs << IO_TLB_SHIFT; pr_info("Lowering to %luMB\n", bytes >> 20); goto retry; @@ -254,7 +251,7 @@ void __init xen_swiotlb_init_early(void) panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc); } - if (swiotlb_init_with_tbl(start, nslabs, false)) + if (swiotlb_init_with_tbl(start, nslabs, true)) panic("Cannot allocate SWIOTLB buffer"); swiotlb_set_max_segment(PAGE_SIZE); } diff --git a/fs/afs/callback.c b/fs/afs/callback.c index 7d9b23d981bf1..1b4d5809808d0 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c @@ -20,6 +20,37 @@ #include #include "internal.h" +/* + * Handle invalidation of an mmap'd file. We invalidate all the PTEs referring + * to the pages in this file's pagecache, forcing the kernel to go through + * ->fault() or ->page_mkwrite() - at which point we can handle invalidation + * more fully. + */ +void afs_invalidate_mmap_work(struct work_struct *work) +{ + struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work); + + unmap_mapping_pages(vnode->vfs_inode.i_mapping, 0, 0, false); +} + +void afs_server_init_callback_work(struct work_struct *work) +{ + struct afs_server *server = container_of(work, struct afs_server, initcb_work); + struct afs_vnode *vnode; + struct afs_cell *cell = server->cell; + + down_read(&cell->fs_open_mmaps_lock); + + list_for_each_entry(vnode, &cell->fs_open_mmaps, cb_mmap_link) { + if (vnode->cb_server == server) { + clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); + queue_work(system_unbound_wq, &vnode->cb_work); + } + } + + up_read(&cell->fs_open_mmaps_lock); +} + /* * Allow the fileserver to request callback state (re-)initialisation. * Unfortunately, UUIDs are not guaranteed unique. @@ -29,8 +60,11 @@ void afs_init_callback_state(struct afs_server *server) rcu_read_lock(); do { server->cb_s_break++; - server = rcu_dereference(server->uuid_next); - } while (0); + atomic_inc(&server->cell->fs_s_break); + if (!list_empty(&server->cell->fs_open_mmaps)) + queue_work(system_unbound_wq, &server->initcb_work); + + } while ((server = rcu_dereference(server->uuid_next))); rcu_read_unlock(); } @@ -44,11 +78,17 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { vnode->cb_break++; + vnode->cb_v_break = vnode->volume->cb_v_break; afs_clear_permits(vnode); if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB) afs_lock_may_be_available(vnode); + if (reason != afs_cb_break_for_deleted && + vnode->status.type == AFS_FTYPE_FILE && + atomic_read(&vnode->cb_nr_mmap)) + queue_work(system_unbound_wq, &vnode->cb_work); + trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true); } else { trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, false); diff --git a/fs/afs/cell.c b/fs/afs/cell.c index 887b673f62230..d88407fb9bc09 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c @@ -166,6 +166,8 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, seqlock_init(&cell->volume_lock); cell->fs_servers = RB_ROOT; seqlock_init(&cell->fs_lock); + INIT_LIST_HEAD(&cell->fs_open_mmaps); + init_rwsem(&cell->fs_open_mmaps_lock); rwlock_init(&cell->vl_servers_lock); cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS); diff --git a/fs/afs/dir.c b/fs/afs/dir.c index ac829e63c5704..4579bbda46346 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -1077,9 +1077,9 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, */ static int afs_d_revalidate_rcu(struct dentry *dentry) { - struct afs_vnode *dvnode, *vnode; + struct afs_vnode *dvnode; struct dentry *parent; - struct inode *dir, *inode; + struct inode *dir; long dir_version, de_version; _enter("%p", dentry); @@ -1109,18 +1109,6 @@ static int afs_d_revalidate_rcu(struct dentry *dentry) return -ECHILD; } - /* Check to see if the vnode referred to by the dentry still - * has a callback. - */ - if (d_really_is_positive(dentry)) { - inode = d_inode_rcu(dentry); - if (inode) { - vnode = AFS_FS_I(inode); - if (!afs_check_validity(vnode)) - return -ECHILD; - } - } - return 1; /* Still valid */ } @@ -1156,17 +1144,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) if (IS_ERR(key)) key = NULL; - if (d_really_is_positive(dentry)) { - inode = d_inode(dentry); - if (inode) { - vnode = AFS_FS_I(inode); - afs_validate(vnode, key); - if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) - goto out_bad; - } - } - - /* lock down the parent dentry so we can peer at it */ + /* Hold the parent dentry so we can peer at it */ parent = dget_parent(dentry); dir = AFS_FS_I(d_inode(parent)); @@ -1175,7 +1153,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) if (test_bit(AFS_VNODE_DELETED, &dir->flags)) { _debug("%pd: parent dir deleted", dentry); - goto out_bad_parent; + goto not_found; } /* We only need to invalidate a dentry if the server's copy changed @@ -1201,12 +1179,12 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) case 0: /* the filename maps to something */ if (d_really_is_negative(dentry)) - goto out_bad_parent; + goto not_found; inode = d_inode(dentry); if (is_bad_inode(inode)) { printk("kAFS: afs_d_revalidate: %pd2 has bad inode\n", dentry); - goto out_bad_parent; + goto not_found; } vnode = AFS_FS_I(inode); @@ -1228,9 +1206,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) dentry, fid.unique, vnode->fid.unique, vnode->vfs_inode.i_generation); - write_seqlock(&vnode->cb_lock); - set_bit(AFS_VNODE_DELETED, &vnode->flags); - write_sequnlock(&vnode->cb_lock); goto not_found; } goto out_valid; @@ -1245,7 +1220,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) default: _debug("failed to iterate dir %pd: %d", parent, ret); - goto out_bad_parent; + goto not_found; } out_valid: @@ -1256,16 +1231,9 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) _leave(" = 1 [valid]"); return 1; - /* the dirent, if it exists, now points to a different vnode */ not_found: - spin_lock(&dentry->d_lock); - dentry->d_flags |= DCACHE_NFSFS_RENAMED; - spin_unlock(&dentry->d_lock); - -out_bad_parent: _debug("dropping dentry %pd2", dentry); dput(parent); -out_bad: key_put(key); _leave(" = 0 [bad]"); @@ -1792,6 +1760,10 @@ static int afs_link(struct dentry *from, struct inode *dir, goto error; } + ret = afs_validate(vnode, op->key); + if (ret < 0) + goto error_op; + afs_op_set_vnode(op, 0, dvnode); afs_op_set_vnode(op, 1, vnode); op->file[0].dv_delta = 1; @@ -1805,6 +1777,8 @@ static int afs_link(struct dentry *from, struct inode *dir, op->create.reason = afs_edit_dir_for_link; return afs_do_sync_operation(op); +error_op: + afs_put_operation(op); error: d_drop(dentry); _leave(" = %d", ret); @@ -1989,6 +1963,11 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir, if (IS_ERR(op)) return PTR_ERR(op); + ret = afs_validate(vnode, op->key); + op->error = ret; + if (ret < 0) + goto error; + afs_op_set_vnode(op, 0, orig_dvnode); afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */ op->file[0].dv_delta = 1; diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c index f4600c1353adf..540b9fc96824a 100644 --- a/fs/afs/dir_edit.c +++ b/fs/afs/dir_edit.c @@ -263,7 +263,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode, if (b == nr_blocks) { _debug("init %u", b); afs_edit_init_block(meta, block, b); - i_size_write(&vnode->vfs_inode, (b + 1) * AFS_DIR_BLOCK_SIZE); + afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE); } /* Only lower dir pages have a counter in the header. */ @@ -296,7 +296,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode, new_directory: afs_edit_init_block(meta, meta, 0); i_size = AFS_DIR_BLOCK_SIZE; - i_size_write(&vnode->vfs_inode, i_size); + afs_set_i_size(vnode, i_size); slot = AFS_DIR_RESV_BLOCKS0; page = page0; block = meta; diff --git a/fs/afs/file.c b/fs/afs/file.c index db035ae2a1345..e6c447ae91f38 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -24,12 +24,16 @@ static void afs_invalidatepage(struct page *page, unsigned int offset, static int afs_releasepage(struct page *page, gfp_t gfp_flags); static void afs_readahead(struct readahead_control *ractl); +static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter); +static void afs_vm_open(struct vm_area_struct *area); +static void afs_vm_close(struct vm_area_struct *area); +static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); const struct file_operations afs_file_operations = { .open = afs_open, .release = afs_release, .llseek = generic_file_llseek, - .read_iter = generic_file_read_iter, + .read_iter = afs_file_read_iter, .write_iter = afs_file_write, .mmap = afs_file_mmap, .splice_read = generic_file_splice_read, @@ -59,8 +63,10 @@ const struct address_space_operations afs_fs_aops = { }; static const struct vm_operations_struct afs_vm_ops = { + .open = afs_vm_open, + .close = afs_vm_close, .fault = filemap_fault, - .map_pages = filemap_map_pages, + .map_pages = afs_vm_map_pages, .page_mkwrite = afs_page_mkwrite, }; @@ -295,7 +301,7 @@ static void afs_req_issue_op(struct netfs_read_subrequest *subreq) fsreq->subreq = subreq; fsreq->pos = subreq->start + subreq->transferred; fsreq->len = subreq->len - subreq->transferred; - fsreq->key = subreq->rreq->netfs_priv; + fsreq->key = key_get(subreq->rreq->netfs_priv); fsreq->vnode = vnode; fsreq->iter = &fsreq->def_iter; @@ -304,6 +310,7 @@ static void afs_req_issue_op(struct netfs_read_subrequest *subreq) fsreq->pos, fsreq->len); afs_fetch_data(fsreq->vnode, fsreq); + afs_put_read(fsreq); } static int afs_symlink_readpage(struct page *page) @@ -490,15 +497,88 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags) return 1; } +static void afs_add_open_mmap(struct afs_vnode *vnode) +{ + if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) { + down_write(&vnode->volume->cell->fs_open_mmaps_lock); + + list_add_tail(&vnode->cb_mmap_link, + &vnode->volume->cell->fs_open_mmaps); + + up_write(&vnode->volume->cell->fs_open_mmaps_lock); + } +} + +static void afs_drop_open_mmap(struct afs_vnode *vnode) +{ + if (!atomic_dec_and_test(&vnode->cb_nr_mmap)) + return; + + down_write(&vnode->volume->cell->fs_open_mmaps_lock); + + if (atomic_read(&vnode->cb_nr_mmap) == 0) + list_del_init(&vnode->cb_mmap_link); + + up_write(&vnode->volume->cell->fs_open_mmaps_lock); + flush_work(&vnode->cb_work); +} + /* * Handle setting up a memory mapping on an AFS file. */ static int afs_file_mmap(struct file *file, struct vm_area_struct *vma) { + struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); int ret; + afs_add_open_mmap(vnode); + ret = generic_file_mmap(file, vma); if (ret == 0) vma->vm_ops = &afs_vm_ops; + else + afs_drop_open_mmap(vnode); return ret; } + +static void afs_vm_open(struct vm_area_struct *vma) +{ + afs_add_open_mmap(AFS_FS_I(file_inode(vma->vm_file))); +} + +static void afs_vm_close(struct vm_area_struct *vma) +{ + afs_drop_open_mmap(AFS_FS_I(file_inode(vma->vm_file))); +} + +static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff) +{ + struct afs_vnode *vnode = AFS_FS_I(file_inode(vmf->vma->vm_file)); + struct afs_file *af = vmf->vma->vm_file->private_data; + + switch (afs_validate(vnode, af->key)) { + case 0: + return filemap_map_pages(vmf, start_pgoff, end_pgoff); + case -ENOMEM: + return VM_FAULT_OOM; + case -EINTR: + case -ERESTARTSYS: + return VM_FAULT_RETRY; + case -ESTALE: + default: + return VM_FAULT_SIGBUS; + } +} + +static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); + struct afs_file *af = iocb->ki_filp->private_data; + int ret; + + ret = afs_validate(vnode, af->key); + if (ret < 0) + return ret; + + return generic_file_read_iter(iocb, iter); +} diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c index e7e98ad63a91a..c0031a3ab42f5 100644 --- a/fs/afs/fs_probe.c +++ b/fs/afs/fs_probe.c @@ -9,6 +9,7 @@ #include #include "afs_fs.h" #include "internal.h" +#include "protocol_afs.h" #include "protocol_yfs.h" static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ; @@ -102,7 +103,7 @@ void afs_fileserver_probe_result(struct afs_call *call) struct afs_addr_list *alist = call->alist; struct afs_server *server = call->server; unsigned int index = call->addr_ix; - unsigned int rtt_us = 0; + unsigned int rtt_us = 0, cap0; int ret = call->error; _enter("%pU,%u", &server->uuid, index); @@ -159,6 +160,11 @@ void afs_fileserver_probe_result(struct afs_call *call) clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags); alist->addrs[index].srx_service = call->service_id; } + cap0 = ntohl(call->tmp); + if (cap0 & AFS3_VICED_CAPABILITY_64BITFILES) + set_bit(AFS_SERVER_FL_HAS_FS64, &server->flags); + else + clear_bit(AFS_SERVER_FL_HAS_FS64, &server->flags); } if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) && diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index dd3f45d906d23..4943413d9c5f7 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -456,9 +456,7 @@ void afs_fs_fetch_data(struct afs_operation *op) struct afs_read *req = op->fetch.req; __be32 *bp; - if (upper_32_bits(req->pos) || - upper_32_bits(req->len) || - upper_32_bits(req->pos + req->len)) + if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags)) return afs_fs_fetch_data64(op); _enter(""); @@ -1113,9 +1111,7 @@ void afs_fs_store_data(struct afs_operation *op) (unsigned long long)op->store.pos, (unsigned long long)op->store.i_size); - if (upper_32_bits(op->store.pos) || - upper_32_bits(op->store.size) || - upper_32_bits(op->store.i_size)) + if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags)) return afs_fs_store_data64(op); call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData, @@ -1229,7 +1225,7 @@ static void afs_fs_setattr_size(struct afs_operation *op) key_serial(op->key), vp->fid.vid, vp->fid.vnode); ASSERT(attr->ia_valid & ATTR_SIZE); - if (upper_32_bits(attr->ia_size)) + if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags)) return afs_fs_setattr_size64(op); call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData_as_Status, @@ -1657,20 +1653,33 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call) return ret; count = ntohl(call->tmp); - call->count = count; call->count2 = count; - afs_extract_discard(call, count * sizeof(__be32)); + if (count == 0) { + call->unmarshall = 4; + call->tmp = 0; + break; + } + + /* Extract the first word of the capabilities to call->tmp */ + afs_extract_to_tmp(call); call->unmarshall++; fallthrough; - /* Extract capabilities words */ case 2: ret = afs_extract_data(call, false); if (ret < 0) return ret; - /* TODO: Examine capabilities */ + afs_extract_discard(call, (count - 1) * sizeof(__be32)); + call->unmarshall++; + fallthrough; + + /* Extract remaining capabilities words */ + case 3: + ret = afs_extract_data(call, false); + if (ret < 0) + return ret; call->unmarshall++; break; diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 80b6c8d967d5c..8fcffea2daf50 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -53,16 +53,6 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren dump_stack(); } -/* - * Set the file size and block count. Estimate the number of 512 bytes blocks - * used, rounded up to nearest 1K for consistency with other AFS clients. - */ -static void afs_set_i_size(struct afs_vnode *vnode, u64 size) -{ - i_size_write(&vnode->vfs_inode, size); - vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1; -} - /* * Initialise an inode from the vnode status. */ @@ -587,22 +577,32 @@ static void afs_zap_data(struct afs_vnode *vnode) } /* - * Get the server reinit counter for a vnode's current server. + * Check to see if we have a server currently serving this volume and that it + * hasn't been reinitialised or dropped from the list. */ -static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break) +static bool afs_check_server_good(struct afs_vnode *vnode) { - struct afs_server_list *slist = rcu_dereference(vnode->volume->servers); + struct afs_server_list *slist; struct afs_server *server; + bool good; int i; + if (vnode->cb_fs_s_break == atomic_read(&vnode->volume->cell->fs_s_break)) + return true; + + rcu_read_lock(); + + slist = rcu_dereference(vnode->volume->servers); for (i = 0; i < slist->nr_servers; i++) { server = slist->servers[i].server; if (server == vnode->cb_server) { - *_s_break = READ_ONCE(server->cb_s_break); - return true; + good = (vnode->cb_s_break == server->cb_s_break); + rcu_read_unlock(); + return good; } } + rcu_read_unlock(); return false; } @@ -611,57 +611,46 @@ static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break) */ bool afs_check_validity(struct afs_vnode *vnode) { - struct afs_volume *volume = vnode->volume; enum afs_cb_break_reason need_clear = afs_cb_break_no_break; time64_t now = ktime_get_real_seconds(); - bool valid; - unsigned int cb_break, cb_s_break, cb_v_break; + unsigned int cb_break; int seq = 0; do { read_seqbegin_or_lock(&vnode->cb_lock, &seq); - cb_v_break = READ_ONCE(volume->cb_v_break); cb_break = vnode->cb_break; - if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) && - afs_get_s_break_rcu(vnode, &cb_s_break)) { - if (vnode->cb_s_break != cb_s_break || - vnode->cb_v_break != cb_v_break) { - vnode->cb_s_break = cb_s_break; - vnode->cb_v_break = cb_v_break; - need_clear = afs_cb_break_for_vsbreak; - valid = false; - } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) { + if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { + if (vnode->cb_v_break != vnode->volume->cb_v_break) + need_clear = afs_cb_break_for_v_break; + else if (!afs_check_server_good(vnode)) + need_clear = afs_cb_break_for_s_reinit; + else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) need_clear = afs_cb_break_for_zap; - valid = false; - } else if (vnode->cb_expires_at - 10 <= now) { + else if (vnode->cb_expires_at - 10 <= now) need_clear = afs_cb_break_for_lapsed; - valid = false; - } else { - valid = true; - } } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { - valid = true; + ; } else { - vnode->cb_v_break = cb_v_break; - valid = false; + need_clear = afs_cb_break_no_promise; } } while (need_seqretry(&vnode->cb_lock, seq)); done_seqretry(&vnode->cb_lock, seq); - if (need_clear != afs_cb_break_no_break) { - write_seqlock(&vnode->cb_lock); - if (cb_break == vnode->cb_break) - __afs_break_callback(vnode, need_clear); - else - trace_afs_cb_miss(&vnode->fid, need_clear); - write_sequnlock(&vnode->cb_lock); - valid = false; - } + if (need_clear == afs_cb_break_no_break) + return true; - return valid; + write_seqlock(&vnode->cb_lock); + if (need_clear == afs_cb_break_no_promise) + vnode->cb_v_break = vnode->volume->cb_v_break; + else if (cb_break == vnode->cb_break) + __afs_break_callback(vnode, need_clear); + else + trace_afs_cb_miss(&vnode->fid, need_clear); + write_sequnlock(&vnode->cb_lock); + return false; } /* @@ -675,21 +664,20 @@ bool afs_check_validity(struct afs_vnode *vnode) */ int afs_validate(struct afs_vnode *vnode, struct key *key) { - bool valid; int ret; _enter("{v={%llx:%llu} fl=%lx},%x", vnode->fid.vid, vnode->fid.vnode, vnode->flags, key_serial(key)); - rcu_read_lock(); - valid = afs_check_validity(vnode); - rcu_read_unlock(); - - if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) - clear_nlink(&vnode->vfs_inode); + if (unlikely(test_bit(AFS_VNODE_DELETED, &vnode->flags))) { + if (vnode->vfs_inode.i_nlink) + clear_nlink(&vnode->vfs_inode); + goto valid; + } - if (valid) + if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) && + afs_check_validity(vnode)) goto valid; down_write(&vnode->validate_lock); diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 5ed416f4ff335..0ad97a8fc0d49 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -390,6 +390,9 @@ struct afs_cell { /* Active fileserver interaction state. */ struct rb_root fs_servers; /* afs_server (by server UUID) */ seqlock_t fs_lock; /* For fs_servers */ + struct rw_semaphore fs_open_mmaps_lock; + struct list_head fs_open_mmaps; /* List of vnodes that are mmapped */ + atomic_t fs_s_break; /* Counter of CB.InitCallBackState messages */ /* VL server list. */ rwlock_t vl_servers_lock; /* Lock on vl_servers */ @@ -503,6 +506,7 @@ struct afs_server { struct hlist_node addr4_link; /* Link in net->fs_addresses4 */ struct hlist_node addr6_link; /* Link in net->fs_addresses6 */ struct hlist_node proc_link; /* Link in net->fs_proc */ + struct work_struct initcb_work; /* Work for CB.InitCallBackState* */ struct afs_server *gc_next; /* Next server in manager's list */ time64_t unuse_time; /* Time at which last unused */ unsigned long flags; @@ -516,6 +520,7 @@ struct afs_server { #define AFS_SERVER_FL_IS_YFS 16 /* Server is YFS not AFS */ #define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */ #define AFS_SERVER_FL_NO_RM2 18 /* Fileserver doesn't support YFS.RemoveFile2 */ +#define AFS_SERVER_FL_HAS_FS64 19 /* Fileserver supports FS.{Fetch,Store}Data64 */ atomic_t ref; /* Object refcount */ atomic_t active; /* Active user count */ u32 addr_version; /* Address list version */ @@ -657,7 +662,11 @@ struct afs_vnode { afs_lock_type_t lock_type : 8; /* outstanding callback notification on this file */ + struct work_struct cb_work; /* Work for mmap'd files */ + struct list_head cb_mmap_link; /* Link in cell->fs_open_mmaps */ void *cb_server; /* Server with callback/filelock */ + atomic_t cb_nr_mmap; /* Number of mmaps */ + unsigned int cb_fs_s_break; /* Mass server break counter (cell->fs_s_break) */ unsigned int cb_s_break; /* Mass break counter on ->server */ unsigned int cb_v_break; /* Mass break counter on ->volume */ unsigned int cb_break; /* Break counter on vnode */ @@ -965,6 +974,8 @@ extern struct fscache_cookie_def afs_vnode_cache_index_def; /* * callback.c */ +extern void afs_invalidate_mmap_work(struct work_struct *); +extern void afs_server_init_callback_work(struct work_struct *work); extern void afs_init_callback_state(struct afs_server *); extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason); extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason); @@ -1585,6 +1596,16 @@ static inline void afs_update_dentry_version(struct afs_operation *op, (void *)(unsigned long)dir_vp->scb.status.data_version; } +/* + * Set the file size and block count. Estimate the number of 512 bytes blocks + * used, rounded up to nearest 1K for consistency with other AFS clients. + */ +static inline void afs_set_i_size(struct afs_vnode *vnode, u64 size) +{ + i_size_write(&vnode->vfs_inode, size); + vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1; +} + /* * Check for a conflicting operation on a directory that we just unlinked from. * If someone managed to sneak a link or an unlink in on the file we just diff --git a/fs/afs/protocol_afs.h b/fs/afs/protocol_afs.h new file mode 100644 index 0000000000000..0c39358c8b702 --- /dev/null +++ b/fs/afs/protocol_afs.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* AFS protocol bits + * + * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + + +#define AFSCAPABILITIESMAX 196 /* Maximum number of words in a capability set */ + +/* AFS3 Fileserver capabilities word 0 */ +#define AFS3_VICED_CAPABILITY_ERRORTRANS 0x0001 /* Uses UAE errors */ +#define AFS3_VICED_CAPABILITY_64BITFILES 0x0002 /* FetchData64 & StoreData64 supported */ +#define AFS3_VICED_CAPABILITY_WRITELOCKACL 0x0004 /* Can lock a file even without lock perm */ +#define AFS3_VICED_CAPABILITY_SANEACLS 0x0008 /* ACLs reviewed for sanity - don't use */ diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h index b5bd03b1d3c7f..e4cd89c44c465 100644 --- a/fs/afs/protocol_yfs.h +++ b/fs/afs/protocol_yfs.h @@ -168,3 +168,9 @@ enum yfs_lock_type { yfs_LockMandatoryWrite = 0x101, yfs_LockMandatoryExtend = 0x102, }; + +/* RXYFS Viced Capability Flags */ +#define YFS_VICED_CAPABILITY_ERRORTRANS 0x0001 /* Deprecated v0.195 */ +#define YFS_VICED_CAPABILITY_64BITFILES 0x0002 /* Deprecated v0.195 */ +#define YFS_VICED_CAPABILITY_WRITELOCKACL 0x0004 /* Can lock a file even without lock perm */ +#define YFS_VICED_CAPABILITY_SANEACLS 0x0008 /* Deprecated v0.195 */ diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c index d83f13c44b921..79e1a5f6701be 100644 --- a/fs/afs/rotate.c +++ b/fs/afs/rotate.c @@ -374,6 +374,7 @@ bool afs_select_fileserver(struct afs_operation *op) if (vnode->cb_server != server) { vnode->cb_server = server; vnode->cb_s_break = server->cb_s_break; + vnode->cb_fs_s_break = atomic_read(&server->cell->fs_s_break); vnode->cb_v_break = vnode->volume->cb_v_break; clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); } diff --git a/fs/afs/server.c b/fs/afs/server.c index 684a2b02b9ff7..6e5b9a19b234e 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -235,6 +235,7 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell, server->addr_version = alist->version; server->uuid = *uuid; rwlock_init(&server->fs_lock); + INIT_WORK(&server->initcb_work, afs_server_init_callback_work); init_waitqueue_head(&server->probe_wq); INIT_LIST_HEAD(&server->probe_link); spin_lock_init(&server->probe_lock); @@ -467,6 +468,7 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server) if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags)) afs_give_up_callbacks(net, server); + flush_work(&server->initcb_work); afs_put_server(net, server, afs_server_trace_destroy); } diff --git a/fs/afs/super.c b/fs/afs/super.c index e38bb1e7a4d22..d110def8aa8eb 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -698,6 +698,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb) vnode->lock_state = AFS_VNODE_LOCK_NONE; init_rwsem(&vnode->rmdir_lock); + INIT_WORK(&vnode->cb_work, afs_invalidate_mmap_work); _leave(" = %p", &vnode->vfs_inode); return &vnode->vfs_inode; diff --git a/fs/afs/write.c b/fs/afs/write.c index c0534697268ef..2dfe3b3a53d69 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -137,7 +137,7 @@ int afs_write_end(struct file *file, struct address_space *mapping, write_seqlock(&vnode->cb_lock); i_size = i_size_read(&vnode->vfs_inode); if (maybe_i_size > i_size) - i_size_write(&vnode->vfs_inode, maybe_i_size); + afs_set_i_size(vnode, maybe_i_size); write_sequnlock(&vnode->cb_lock); } @@ -471,13 +471,18 @@ static void afs_extend_writeback(struct address_space *mapping, } /* Has the page moved or been split? */ - if (unlikely(page != xas_reload(&xas))) + if (unlikely(page != xas_reload(&xas))) { + put_page(page); break; + } - if (!trylock_page(page)) + if (!trylock_page(page)) { + put_page(page); break; + } if (!PageDirty(page) || PageWriteback(page)) { unlock_page(page); + put_page(page); break; } @@ -487,6 +492,7 @@ static void afs_extend_writeback(struct address_space *mapping, t = afs_page_dirty_to(page, priv); if (f != 0 && !new_content) { unlock_page(page); + put_page(page); break; } @@ -801,6 +807,7 @@ int afs_writepages(struct address_space *mapping, ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) { struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp)); + struct afs_file *af = iocb->ki_filp->private_data; ssize_t result; size_t count = iov_iter_count(from); @@ -816,6 +823,10 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) if (!count) return 0; + result = afs_validate(vnode, af->key); + if (result < 0) + return result; + result = generic_file_write_iter(iocb, from); _leave(" = %zd", result); @@ -829,13 +840,18 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) */ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { - struct inode *inode = file_inode(file); - struct afs_vnode *vnode = AFS_FS_I(inode); + struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); + struct afs_file *af = file->private_data; + int ret; _enter("{%llx:%llu},{n=%pD},%d", vnode->fid.vid, vnode->fid.vnode, file, datasync); + ret = afs_validate(vnode, af->key); + if (ret < 0) + return ret; + return file_write_and_wait_range(file, start, end); } @@ -849,11 +865,14 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) struct file *file = vmf->vma->vm_file; struct inode *inode = file_inode(file); struct afs_vnode *vnode = AFS_FS_I(inode); + struct afs_file *af = file->private_data; unsigned long priv; vm_fault_t ret = VM_FAULT_RETRY; _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index); + afs_validate(vnode, af->key); + sb_start_pagefault(inode->i_sb); /* Wait for the page to be written to the cache before we allow it to diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c index 8a3b30ec860ca..8be57aaedab60 100644 --- a/fs/cifs/cache.c +++ b/fs/cifs/cache.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/cache.c - CIFS filesystem cache index structure definitions + * CIFS filesystem cache index structure definitions * * Copyright (c) 2010 Novell, Inc. * Authors(s): Suresh Jayaraman (sjayaraman@suse.de> diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 51a824fc926ad..de2c12bcfa4bc 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * fs/cifs_debug.c * * Copyright (C) International Business Machines Corp., 2000,2005 * diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h index 4fd788586399c..f97407520ea15 100644 --- a/fs/cifs/cifs_fs_sb.h +++ b/fs/cifs/cifs_fs_sb.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/cifs_fs_sb.h * * Copyright (c) International Business Machines Corp., 2002,2004 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h index ef723be358afd..b87cbbe6d2d4b 100644 --- a/fs/cifs/cifs_ioctl.h +++ b/fs/cifs/cifs_ioctl.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/cifs_ioctl.h * * Structure definitions for io control for cifs/smb3 * diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index 8fa26a8530f8e..353bd0dd70260 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/cifs_spnego.c -- SPNEGO upcall management for CIFS + * SPNEGO upcall management for CIFS * * Copyright (c) 2007 Red Hat, Inc. * Author(s): Jeff Layton (jlayton@redhat.com) diff --git a/fs/cifs/cifs_spnego.h b/fs/cifs/cifs_spnego.h index 31387d0ea32ea..e6a0451877d47 100644 --- a/fs/cifs/cifs_spnego.h +++ b/fs/cifs/cifs_spnego.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/cifs_spnego.h -- SPNEGO upcall management for CIFS + * SPNEGO upcall management for CIFS * * Copyright (c) 2007 Red Hat, Inc. * Author(s): Jeff Layton (jlayton@redhat.com) diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 171ad8b42107e..e7582dd791794 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * fs/cifs/cifs_unicode.c * * Copyright (c) International Business Machines Corp., 2000,2009 * Modified by Steve French (sfrench@us.ibm.com) diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 388eb536cff15..ee3aab3dd4ac6 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/cifsacl.c * * Copyright (C) International Business Machines Corp., 2007,2008 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h index f8292bcf8594f..ccbfc754bd3c7 100644 --- a/fs/cifs/cifsacl.h +++ b/fs/cifs/cifsacl.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/cifsacl.h * * Copyright (c) International Business Machines Corp., 2007 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 2e6f403440376..d118282071b37 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/cifsencrypt.c * * Encryption and hashing operations relating to NTLM, NTLMv2. See MS-NLMP * for more detailed information diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 8c20bfa187acd..9fa930dfd78d6 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/cifsfs.c * * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index d25a4099b32e7..b50da1901ebd2 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/cifsfs.h * * Copyright (c) International Business Machines Corp., 2002, 2007 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index c068f7d8d879d..e916470468ea9 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/cifsglob.h * * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) @@ -1400,6 +1399,7 @@ struct cifsInodeInfo { #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */ #define CIFS_INO_LOCK (5) /* lock bit for synchronization */ #define CIFS_INO_MODIFIED_ATTR (6) /* Indicate change in mtime/ctime */ +#define CIFS_INO_CLOSE_ON_LOCK (7) /* Not to defer the close when lock is set */ unsigned long flags; spinlock_t writers_lock; unsigned int writers; /* Number of writers on this inode */ diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index 98e8e5aa06137..d2ff438fd31f8 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/cifspdu.h * * Copyright (c) International Business Machines Corp., 2002,2009 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index f9740c21ca3d6..d0f85b666662d 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/cifsproto.h * * Copyright (c) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) @@ -268,6 +267,9 @@ extern void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode); extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon); +extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon, + const char *path); + extern struct TCP_Server_Info *cifs_get_tcp_session(struct smb3_fs_context *ctx); extern void cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index a8e41c1e80cad..243d17696f061 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/cifssmb.c * * Copyright (C) International Business Machines Corp., 2002,2010 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 0db344807ef13..7881115cfbee1 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/connect.c * * Copyright (C) International Business Machines Corp., 2002,2011 * Author(s): Steve French (sfrench@us.ibm.com) @@ -1090,7 +1089,7 @@ cifs_demultiplex_thread(void *p) module_put_and_exit(0); } -/** +/* * Returns true if srcaddr isn't specified and rhs isn't specified, or * if srcaddr is specified and matches the IP address of the rhs argument */ @@ -1550,6 +1549,9 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx) /** * cifs_setup_ipc - helper to setup the IPC tcon for the session + * @ses: smb session to issue the request on + * @ctx: the superblock configuration context to use for building the + * new tree connection for the IPC (interprocess communication RPC) * * A new IPC connection is made and stored in the session * tcon_ipc. The IPC tcon has the same lifetime as the session. @@ -1605,6 +1607,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx) /** * cifs_free_ipc - helper to release the session IPC tcon + * @ses: smb session to unmount the IPC from * * Needs to be called everytime a session is destroyed. * @@ -1855,6 +1858,8 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)), /** * cifs_get_smb_ses - get a session matching @ctx data from @server + * @server: server to setup the session to + * @ctx: superblock configuration context to use to setup the session * * This function assumes it is being called from cifs_mount() where we * already got a server reference (server refcount +1). See @@ -2065,6 +2070,8 @@ cifs_put_tcon(struct cifs_tcon *tcon) /** * cifs_get_tcon - get a tcon matching @ctx data from @ses + * @ses: smb session to issue the request on + * @ctx: the superblock configuration context to use for building the * * - tcon refcount is the number of mount points using the tcon. * - ses refcount is the number of tcon using the session. @@ -3030,7 +3037,7 @@ build_unc_path_to_root(const struct smb3_fs_context *ctx, return full_path; } -/** +/* * expand_dfs_referral - Perform a dfs referral query and update the cifs_sb * * If a referral is found, cifs_sb->ctx->mount_options will be (re-)allocated diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 5f8a302ffcb2f..6e8e7cc26ae24 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/dir.c * * vfs operations that deal with dentries * diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c index 8c616aaeb7c41..0458d28d71aa6 100644 --- a/fs/cifs/dns_resolve.c +++ b/fs/cifs/dns_resolve.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/dns_resolve.c * * Copyright (c) 2007 Igor Mammedov * Author(s): Igor Mammedov (niallain@gmail.com) diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h index 9fa2807ef79e0..afc0df381246b 100644 --- a/fs/cifs/dns_resolve.h +++ b/fs/cifs/dns_resolve.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/dns_resolve.h -- DNS Resolver upcall management for CIFS DFS - * Handles host name to IP address resolution + * DNS Resolver upcall management for CIFS DFS + * Handles host name to IP address resolution * * Copyright (c) International Business Machines Corp., 2008 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/fs/cifs/export.c b/fs/cifs/export.c index 747a540db9544..37c28415df1e0 100644 --- a/fs/cifs/export.c +++ b/fs/cifs/export.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/export.c * * Copyright (C) International Business Machines Corp., 2007 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index d0216472f1c6c..6796fc73b3045 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/file.c * * vfs operations that deal with files * @@ -883,6 +882,7 @@ int cifs_close(struct inode *inode, struct file *file) dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL); if ((cinode->oplock == CIFS_CACHE_RHW_FLG) && cinode->lease_granted && + !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) && dclose) { if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) { inode->i_ctime = inode->i_mtime = current_time(inode); @@ -1865,6 +1865,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock) cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag, tcon->ses->server); cifs_sb = CIFS_FILE_SB(file); + set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags); if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c index fab47fa7df745..8eedd20c44abd 100644 --- a/fs/cifs/fscache.c +++ b/fs/cifs/fscache.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/fscache.c - CIFS filesystem cache interface + * CIFS filesystem cache interface * * Copyright (c) 2010 Novell, Inc. * Author(s): Suresh Jayaraman diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h index 82e856b9cf894..9baa1d0f22bde 100644 --- a/fs/cifs/fscache.h +++ b/fs/cifs/fscache.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/fscache.h - CIFS filesystem cache interface definitions + * CIFS filesystem cache interface definitions * * Copyright (c) 2010 Novell, Inc. * Authors(s): Suresh Jayaraman (sjayaraman@suse.de> diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 50c01cff4c84f..82848412ad852 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/inode.c * * Copyright (C) International Business Machines Corp., 2002,2010 * Author(s): Steve French (sfrench@us.ibm.com) @@ -1625,7 +1624,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) goto unlink_out; } - cifs_close_deferred_file(CIFS_I(inode)); + cifs_close_deferred_file_under_dentry(tcon, full_path); if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { rc = CIFSPOSIXDelFile(xid, tcon, full_path, @@ -2114,9 +2113,9 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir, goto cifs_rename_exit; } - cifs_close_deferred_file(CIFS_I(d_inode(source_dentry))); + cifs_close_deferred_file_under_dentry(tcon, from_name); if (d_inode(target_dentry) != NULL) - cifs_close_deferred_file(CIFS_I(d_inode(target_dentry))); + cifs_close_deferred_file_under_dentry(tcon, to_name); rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry, to_name); diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index 42c6a0bac6c82..0359b604bdbc0 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/ioctl.c * * vfs operations that deal with io control * @@ -359,7 +358,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) if (pSMBFile == NULL) break; tcon = tlink_tcon(pSMBFile->tlink); - caps = le64_to_cpu(tcon->fsUnixInfo.Capability); + /* caps = le64_to_cpu(tcon->fsUnixInfo.Capability); */ if (get_user(ExtAttrBits, (int __user *)arg)) { rc = -EFAULT; diff --git a/fs/cifs/link.c b/fs/cifs/link.c index f0a6d63bc08cb..852e54ee82c28 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/link.c * * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 9469f1cf0b46a..03da00eb7c04b 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/misc.c * * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) @@ -736,7 +735,7 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode) if (cancel_delayed_work(&cfile->deferred)) { tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); if (tmp_list == NULL) - continue; + break; tmp_list->cfile = cfile; list_add_tail(&tmp_list->list, &file_head); } @@ -767,7 +766,7 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon) if (cancel_delayed_work(&cfile->deferred)) { tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); if (tmp_list == NULL) - continue; + break; tmp_list->cfile = cfile; list_add_tail(&tmp_list->list, &file_head); } @@ -781,6 +780,43 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon) kfree(tmp_list); } } +void +cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path) +{ + struct cifsFileInfo *cfile; + struct list_head *tmp; + struct file_list *tmp_list, *tmp_next_list; + struct list_head file_head; + void *page; + const char *full_path; + + INIT_LIST_HEAD(&file_head); + page = alloc_dentry_path(); + spin_lock(&tcon->open_file_lock); + list_for_each(tmp, &tcon->openFileList) { + cfile = list_entry(tmp, struct cifsFileInfo, tlist); + full_path = build_path_from_dentry(cfile->dentry, page); + if (strstr(full_path, path)) { + if (delayed_work_pending(&cfile->deferred)) { + if (cancel_delayed_work(&cfile->deferred)) { + tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); + if (tmp_list == NULL) + break; + tmp_list->cfile = cfile; + list_add_tail(&tmp_list->list, &file_head); + } + } + } + } + spin_unlock(&tcon->open_file_lock); + + list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) { + _cifsFileInfo_put(tmp_list->cfile, true, false); + list_del(&tmp_list->list); + kfree(tmp_list); + } + free_dentry_path(page); +} /* parses DFS refferal V3 structure * caller is responsible for freeing target_nodes diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index 0e728aac67e9f..fa9fbd6a819cb 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * fs/cifs/netmisc.c * * Copyright (c) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h index 378133ce8869c..25a2b8ef88b99 100644 --- a/fs/cifs/ntlmssp.h +++ b/fs/cifs/ntlmssp.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/ntlmssp.h * * Copyright (c) International Business Machines Corp., 2002,2007 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 54d77c99e21c0..1929e80c09ee1 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/readdir.c * * Directory search handling * diff --git a/fs/cifs/rfc1002pdu.h b/fs/cifs/rfc1002pdu.h index 137f7c95afd66..ae1d025da294a 100644 --- a/fs/cifs/rfc1002pdu.h +++ b/fs/cifs/rfc1002pdu.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/rfc1002pdu.h * * Protocol Data Unit definitions for RFC 1001/1002 support * diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 118403fbeda24..23e02db7923f6 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/sess.c * * SMB/CIFS session setup handling routines * diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index c9d8a50062b81..f5dcc4940b6da 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/smb2file.c * * Copyright (C) International Business Machines Corp., 2002, 2011 * Author(s): Steve French (sfrench@us.ibm.com), diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h index d0e9f3782bd9c..ca692b2283cd3 100644 --- a/fs/cifs/smb2glob.h +++ b/fs/cifs/smb2glob.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/smb2glob.h * * Definitions for various global variables and structures * diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index 957b2594f02ed..8297703492eea 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/smb2inode.c * * Copyright (C) International Business Machines Corp., 2002, 2011 * Etersoft, 2012 diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 668f771088318..29b5554f6263f 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/smb2misc.c * * Copyright (C) International Business Machines Corp., 2002,2011 * Etersoft, 2012 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index b6d2e35919278..672ae78e866a3 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/smb2pdu.c * * Copyright (C) International Business Machines Corp., 2009, 2013 * Etersoft, 2012 diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index e9cac7970b66b..f32c99c9ba131 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/smb2pdu.h * * Copyright (c) International Business Machines Corp., 2009, 2013 * Etersoft, 2012 diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 263767f644f82..547945443fa7d 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/smb2proto.h * * Copyright (c) International Business Machines Corp., 2002, 2011 * Etersoft, 2012 diff --git a/fs/cifs/smb2status.h b/fs/cifs/smb2status.h index 0215ef36e2401..a9e958166fc53 100644 --- a/fs/cifs/smb2status.h +++ b/fs/cifs/smb2status.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/smb2status.h * * SMB2 Status code (network error) definitions * Definitions are from MS-ERREF diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 6f7952ea49410..f59b956f9d250 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/smb2transport.c * * Copyright (C) International Business Machines Corp., 2002, 2011 * Etersoft, 2012 diff --git a/fs/cifs/smberr.h b/fs/cifs/smberr.h index 60189efb32362..aeffdad829e2e 100644 --- a/fs/cifs/smberr.h +++ b/fs/cifs/smberr.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: LGPL-2.1 */ /* - * fs/cifs/smberr.h * * Copyright (c) International Business Machines Corp., 2002,2004 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 75a95de320cfe..b7379329b741c 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/transport.c * * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/fs/cifs/winucase.c b/fs/cifs/winucase.c index 59b6c577aa0a1..2f075b5b50df0 100644 --- a/fs/cifs/winucase.c +++ b/fs/cifs/winucase.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * fs/cifs/winucase.c * * Copyright (c) Jeffrey Layton , 2013 * diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 9ed481e79ce0c..7d8b72d67c803 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: LGPL-2.1 /* - * fs/cifs/xattr.c * * Copyright (c) International Business Machines Corp., 2003, 2007 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/fs/io-wq.c b/fs/io-wq.c index 6c55362c1f99a..c2e0e8e80949a 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "io-wq.h" @@ -176,7 +177,6 @@ static void io_worker_ref_put(struct io_wq *wq) static void io_worker_exit(struct io_worker *worker) { struct io_wqe *wqe = worker->wqe; - struct io_wqe_acct *acct = io_wqe_get_acct(worker); if (refcount_dec_and_test(&worker->ref)) complete(&worker->ref_done); @@ -186,7 +186,6 @@ static void io_worker_exit(struct io_worker *worker) if (worker->flags & IO_WORKER_F_FREE) hlist_nulls_del_rcu(&worker->nulls_node); list_del_rcu(&worker->all_list); - acct->nr_workers--; preempt_disable(); io_wqe_dec_running(worker); worker->flags = 0; @@ -246,8 +245,6 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe, */ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) { - bool do_create = false; - /* * Most likely an attempt to queue unbounded work on an io_wq that * wasn't setup with any unbounded workers. @@ -256,18 +253,15 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct) pr_warn_once("io-wq is not configured for unbound workers"); raw_spin_lock(&wqe->lock); - if (acct->nr_workers < acct->max_workers) { - acct->nr_workers++; - do_create = true; + if (acct->nr_workers == acct->max_workers) { + raw_spin_unlock(&wqe->lock); + return true; } + acct->nr_workers++; raw_spin_unlock(&wqe->lock); - if (do_create) { - atomic_inc(&acct->nr_running); - atomic_inc(&wqe->wq->worker_refs); - return create_io_worker(wqe->wq, wqe, acct->index); - } - - return true; + atomic_inc(&acct->nr_running); + atomic_inc(&wqe->wq->worker_refs); + return create_io_worker(wqe->wq, wqe, acct->index); } static void io_wqe_inc_running(struct io_worker *worker) @@ -574,6 +568,7 @@ static int io_wqe_worker(void *data) } /* timed out, exit unless we're the last worker */ if (last_timeout && acct->nr_workers > 1) { + acct->nr_workers--; raw_spin_unlock(&wqe->lock); __set_current_state(TASK_RUNNING); break; @@ -1287,6 +1282,10 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count) { int i, node, prev = 0; + BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND); + BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND); + BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2); + for (i = 0; i < 2; i++) { if (new_count[i] > task_rlimit(current, RLIMIT_NPROC)) new_count[i] = task_rlimit(current, RLIMIT_NPROC); diff --git a/fs/io_uring.c b/fs/io_uring.c index 16fb7436043c2..e372d5b9f6dc0 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -712,6 +712,7 @@ struct io_async_rw { struct iovec fast_iov[UIO_FASTIOV]; const struct iovec *free_iovec; struct iov_iter iter; + struct iov_iter_state iter_state; size_t bytes_done; struct wait_page_queue wpq; }; @@ -735,7 +736,6 @@ enum { REQ_F_BUFFER_SELECTED_BIT, REQ_F_COMPLETE_INLINE_BIT, REQ_F_REISSUE_BIT, - REQ_F_DONT_REISSUE_BIT, REQ_F_CREDS_BIT, REQ_F_REFCOUNT_BIT, REQ_F_ARM_LTIMEOUT_BIT, @@ -782,8 +782,6 @@ enum { REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT), /* caller should reissue async */ REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), - /* don't attempt request reissue, see io_rw_reissue() */ - REQ_F_DONT_REISSUE = BIT(REQ_F_DONT_REISSUE_BIT), /* supports async reads */ REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT), /* supports async writes */ @@ -2444,13 +2442,6 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, req = list_first_entry(done, struct io_kiocb, inflight_entry); list_del(&req->inflight_entry); - if (READ_ONCE(req->result) == -EAGAIN && - !(req->flags & REQ_F_DONT_REISSUE)) { - req->iopoll_completed = 0; - io_req_task_queue_reissue(req); - continue; - } - __io_cqring_fill_event(ctx, req->user_data, req->result, io_put_rw_kbuf(req)); (*nr_events)++; @@ -2613,8 +2604,7 @@ static bool io_resubmit_prep(struct io_kiocb *req) if (!rw) return !io_req_prep_async(req); - /* may have left rw->iter inconsistent on -EIOCBQUEUED */ - iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter)); + iov_iter_restore(&rw->iter, &rw->iter_state); return true; } @@ -2714,10 +2704,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) if (kiocb->ki_flags & IOCB_WRITE) kiocb_end_write(req); if (unlikely(res != req->result)) { - if (!(res == -EAGAIN && io_rw_should_reissue(req) && - io_resubmit_prep(req))) { - req_set_fail(req); - req->flags |= REQ_F_DONT_REISSUE; + if (res == -EAGAIN && io_rw_should_reissue(req)) { + req->flags |= REQ_F_REISSUE; + return; } } @@ -2843,7 +2832,8 @@ static bool io_file_supports_nowait(struct io_kiocb *req, int rw) return __io_file_supports_nowait(req->file, rw); } -static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, + int rw) { struct io_ring_ctx *ctx = req->ctx; struct kiocb *kiocb = &req->rw.kiocb; @@ -2865,8 +2855,13 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (unlikely(ret)) return ret; - /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */ - if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK)) + /* + * If the file is marked O_NONBLOCK, still allow retry for it if it + * supports async. Otherwise it's impossible to use O_NONBLOCK files + * reliably. If not, or it IOCB_NOWAIT is set, don't retry. + */ + if ((kiocb->ki_flags & IOCB_NOWAIT) || + ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw))) req->flags |= REQ_F_NOWAIT; ioprio = READ_ONCE(sqe->ioprio); @@ -2931,7 +2926,6 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); struct io_async_rw *io = req->async_data; - bool check_reissue = kiocb->ki_complete == io_complete_rw; /* add previously done IO, if any */ if (io && io->bytes_done > 0) { @@ -2943,19 +2937,27 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, if (req->flags & REQ_F_CUR_POS) req->file->f_pos = kiocb->ki_pos; - if (ret >= 0 && check_reissue) + if (ret >= 0 && (kiocb->ki_complete == io_complete_rw)) __io_complete_rw(req, ret, 0, issue_flags); else io_rw_done(kiocb, ret); - if (check_reissue && (req->flags & REQ_F_REISSUE)) { + if (req->flags & REQ_F_REISSUE) { req->flags &= ~REQ_F_REISSUE; if (io_resubmit_prep(req)) { io_req_task_queue_reissue(req); } else { + unsigned int cflags = io_put_rw_kbuf(req); + struct io_ring_ctx *ctx = req->ctx; + req_set_fail(req); - __io_req_complete(req, issue_flags, ret, - io_put_rw_kbuf(req)); + if (issue_flags & IO_URING_F_NONBLOCK) { + mutex_lock(&ctx->uring_lock); + __io_req_complete(req, issue_flags, ret, cflags); + mutex_unlock(&ctx->uring_lock); + } else { + __io_req_complete(req, issue_flags, ret, cflags); + } } } } @@ -3263,12 +3265,15 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) ret = nr; break; } + if (!iov_iter_is_bvec(iter)) { + iov_iter_advance(iter, nr); + } else { + req->rw.len -= nr; + req->rw.addr += nr; + } ret += nr; if (nr != iovec.iov_len) break; - req->rw.len -= nr; - req->rw.addr += nr; - iov_iter_advance(iter, nr); } return ret; @@ -3315,12 +3320,17 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, if (!force && !io_op_defs[req->opcode].needs_async_setup) return 0; if (!req->async_data) { + struct io_async_rw *iorw; + if (io_alloc_async_data(req)) { kfree(iovec); return -ENOMEM; } io_req_map_rw(req, iovec, fast_iov, iter); + iorw = req->async_data; + /* we've copied and mapped the iter, ensure state is saved */ + iov_iter_save_state(&iorw->iter, &iorw->iter_state); } return 0; } @@ -3339,6 +3349,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw) iorw->free_iovec = iov; if (iov) req->flags |= REQ_F_NEED_CLEANUP; + iov_iter_save_state(&iorw->iter, &iorw->iter_state); return 0; } @@ -3346,7 +3357,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { if (unlikely(!(req->file->f_mode & FMODE_READ))) return -EBADF; - return io_prep_rw(req, sqe); + return io_prep_rw(req, sqe, READ); } /* @@ -3442,19 +3453,28 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; struct io_async_rw *rw = req->async_data; - ssize_t io_size, ret, ret2; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + struct iov_iter_state __state, *state; + ssize_t ret, ret2; if (rw) { iter = &rw->iter; + state = &rw->iter_state; + /* + * We come here from an earlier attempt, restore our state to + * match in case it doesn't. It's cheap enough that we don't + * need to make this conditional. + */ + iov_iter_restore(iter, state); iovec = NULL; } else { ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + state = &__state; + iov_iter_save_state(iter, state); } - io_size = iov_iter_count(iter); - req->result = io_size; + req->result = iov_iter_count(iter); /* Ensure we clear previously set non-block flag */ if (!force_nonblock) @@ -3468,7 +3488,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) return ret ?: -EAGAIN; } - ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size); + ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result); if (unlikely(ret)) { kfree(iovec); return ret; @@ -3484,30 +3504,49 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) /* no retry on NONBLOCK nor RWF_NOWAIT */ if (req->flags & REQ_F_NOWAIT) goto done; - /* some cases will consume bytes even on error returns */ - iov_iter_reexpand(iter, iter->count + iter->truncated); - iov_iter_revert(iter, io_size - iov_iter_count(iter)); ret = 0; } else if (ret == -EIOCBQUEUED) { goto out_free; - } else if (ret <= 0 || ret == io_size || !force_nonblock || + } else if (ret <= 0 || ret == req->result || !force_nonblock || (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) { /* read all, failed, already did sync or don't want to retry */ goto done; } + /* + * Don't depend on the iter state matching what was consumed, or being + * untouched in case of error. Restore it and we'll advance it + * manually if we need to. + */ + iov_iter_restore(iter, state); + ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true); if (ret2) return ret2; iovec = NULL; rw = req->async_data; - /* now use our persistent iterator, if we aren't already */ - iter = &rw->iter; + /* + * Now use our persistent iterator and state, if we aren't already. + * We've restored and mapped the iter to match. + */ + if (iter != &rw->iter) { + iter = &rw->iter; + state = &rw->iter_state; + } do { - io_size -= ret; + /* + * We end up here because of a partial read, either from + * above or inside this loop. Advance the iter by the bytes + * that were consumed. + */ + iov_iter_advance(iter, ret); + if (!iov_iter_count(iter)) + break; rw->bytes_done += ret; + iov_iter_save_state(iter, state); + /* if we can retry, do so with the callbacks armed */ if (!io_rw_should_retry(req)) { kiocb->ki_flags &= ~IOCB_WAITQ; @@ -3525,7 +3564,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) return 0; /* we got some bytes, but not all. retry. */ kiocb->ki_flags &= ~IOCB_WAITQ; - } while (ret > 0 && ret < io_size); + iov_iter_restore(iter, state); + } while (ret > 0); done: kiocb_done(kiocb, ret, issue_flags); out_free: @@ -3539,7 +3579,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { if (unlikely(!(req->file->f_mode & FMODE_WRITE))) return -EBADF; - return io_prep_rw(req, sqe); + return io_prep_rw(req, sqe, WRITE); } static int io_write(struct io_kiocb *req, unsigned int issue_flags) @@ -3548,19 +3588,24 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; struct io_async_rw *rw = req->async_data; - ssize_t ret, ret2, io_size; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + struct iov_iter_state __state, *state; + ssize_t ret, ret2; if (rw) { iter = &rw->iter; + state = &rw->iter_state; + iov_iter_restore(iter, state); iovec = NULL; } else { ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + state = &__state; + iov_iter_save_state(iter, state); } - io_size = iov_iter_count(iter); - req->result = io_size; + req->result = iov_iter_count(iter); + ret2 = 0; /* Ensure we clear previously set non-block flag */ if (!force_nonblock) @@ -3577,7 +3622,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) (req->flags & REQ_F_ISREG)) goto copy_iov; - ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size); + ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result); if (unlikely(ret)) goto out_free; @@ -3624,9 +3669,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) kiocb_done(kiocb, ret2, issue_flags); } else { copy_iov: - /* some cases will consume bytes even on error returns */ - iov_iter_reexpand(iter, iter->count + iter->truncated); - iov_iter_revert(iter, io_size - iov_iter_count(iter)); + iov_iter_restore(iter, state); + if (ret2 > 0) + iov_iter_advance(iter, ret2); ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); return ret ?: -EAGAIN; } @@ -7515,6 +7560,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, break; } while (1); + if (uts) { + struct timespec64 ts; + + if (get_timespec64(&ts, uts)) + return -EFAULT; + timeout = timespec64_to_jiffies(&ts); + } + if (sig) { #ifdef CONFIG_COMPAT if (in_compat_syscall()) @@ -7528,14 +7581,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, return ret; } - if (uts) { - struct timespec64 ts; - - if (get_timespec64(&ts, uts)) - return -EFAULT; - timeout = timespec64_to_jiffies(&ts); - } - init_waitqueue_func_entry(&iowq.wq, io_wake_function); iowq.wq.private = current; INIT_LIST_HEAD(&iowq.wq.entry); @@ -8284,11 +8329,27 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file, #endif } +static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, + struct io_rsrc_node *node, void *rsrc) +{ + struct io_rsrc_put *prsrc; + + prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL); + if (!prsrc) + return -ENOMEM; + + prsrc->tag = *io_get_tag_slot(data, idx); + prsrc->rsrc = rsrc; + list_add(&prsrc->list, &node->rsrc_list); + return 0; +} + static int io_install_fixed_file(struct io_kiocb *req, struct file *file, unsigned int issue_flags, u32 slot_index) { struct io_ring_ctx *ctx = req->ctx; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + bool needs_switch = false; struct io_fixed_file *file_slot; int ret = -EBADF; @@ -8304,9 +8365,22 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file, slot_index = array_index_nospec(slot_index, ctx->nr_user_files); file_slot = io_fixed_file_slot(&ctx->file_table, slot_index); - ret = -EBADF; - if (file_slot->file_ptr) - goto err; + + if (file_slot->file_ptr) { + struct file *old_file; + + ret = io_rsrc_node_switch_start(ctx); + if (ret) + goto err; + + old_file = (struct file *)(file_slot->file_ptr & FFS_MASK); + ret = io_queue_rsrc_removal(ctx->file_data, slot_index, + ctx->rsrc_node, old_file); + if (ret) + goto err; + file_slot->file_ptr = 0; + needs_switch = true; + } *io_get_tag_slot(ctx->file_data, slot_index) = 0; io_fixed_file_set(file_slot, file); @@ -8318,27 +8392,14 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file, ret = 0; err: + if (needs_switch) + io_rsrc_node_switch(ctx, ctx->file_data); io_ring_submit_unlock(ctx, !force_nonblock); if (ret) fput(file); return ret; } -static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, - struct io_rsrc_node *node, void *rsrc) -{ - struct io_rsrc_put *prsrc; - - prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL); - if (!prsrc) - return -ENOMEM; - - prsrc->tag = *io_get_tag_slot(data, idx); - prsrc->rsrc = rsrc; - list_add(&prsrc->list, &node->rsrc_list); - return 0; -} - static int __io_sqe_files_update(struct io_ring_ctx *ctx, struct io_uring_rsrc_update2 *up, unsigned nr_args) @@ -10560,10 +10621,12 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx, * ordering. Fine to drop uring_lock here, we hold * a ref to the ctx. */ + refcount_inc(&sqd->refs); mutex_unlock(&ctx->uring_lock); mutex_lock(&sqd->lock); mutex_lock(&ctx->uring_lock); - tctx = sqd->thread->io_uring; + if (sqd->thread) + tctx = sqd->thread->io_uring; } } else { tctx = current->io_uring; @@ -10577,16 +10640,20 @@ static int io_register_iowq_max_workers(struct io_ring_ctx *ctx, if (ret) goto err; - if (sqd) + if (sqd) { mutex_unlock(&sqd->lock); + io_put_sq_data(sqd); + } if (copy_to_user(arg, new_count, sizeof(new_count))) return -EFAULT; return 0; err: - if (sqd) + if (sqd) { mutex_unlock(&sqd->lock); + io_put_sq_data(sqd); + } return ret; } diff --git a/fs/ksmbd/misc.c b/fs/ksmbd/misc.c index 0b307ca28a19d..3eac3c01749fa 100644 --- a/fs/ksmbd/misc.c +++ b/fs/ksmbd/misc.c @@ -191,19 +191,77 @@ int get_nlink(struct kstat *st) return nlink; } -void ksmbd_conv_path_to_unix(char *path) +char *ksmbd_conv_path_to_unix(char *path) { + size_t path_len, remain_path_len, out_path_len; + char *out_path, *out_next; + int i, pre_dotdot_cnt = 0, slash_cnt = 0; + bool is_last; + strreplace(path, '\\', '/'); -} + path_len = strlen(path); + remain_path_len = path_len; + if (path_len == 0) + return ERR_PTR(-EINVAL); -void ksmbd_strip_last_slash(char *path) -{ - int len = strlen(path); + out_path = kzalloc(path_len + 2, GFP_KERNEL); + if (!out_path) + return ERR_PTR(-ENOMEM); + out_path_len = 0; + out_next = out_path; + + do { + char *name = path + path_len - remain_path_len; + char *next = strchrnul(name, '/'); + size_t name_len = next - name; + + is_last = !next[0]; + if (name_len == 2 && name[0] == '.' && name[1] == '.') { + pre_dotdot_cnt++; + /* handle the case that path ends with "/.." */ + if (is_last) + goto follow_dotdot; + } else { + if (pre_dotdot_cnt) { +follow_dotdot: + slash_cnt = 0; + for (i = out_path_len - 1; i >= 0; i--) { + if (out_path[i] == '/' && + ++slash_cnt == pre_dotdot_cnt + 1) + break; + } + + if (i < 0 && + slash_cnt != pre_dotdot_cnt) { + kfree(out_path); + return ERR_PTR(-EINVAL); + } + + out_next = &out_path[i+1]; + *out_next = '\0'; + out_path_len = i + 1; - while (len && path[len - 1] == '/') { - path[len - 1] = '\0'; - len--; - } + } + + if (name_len != 0 && + !(name_len == 1 && name[0] == '.') && + !(name_len == 2 && name[0] == '.' && name[1] == '.')) { + next[0] = '\0'; + sprintf(out_next, "%s/", name); + out_next += name_len + 1; + out_path_len += name_len + 1; + next[0] = '/'; + } + pre_dotdot_cnt = 0; + } + + remain_path_len -= name_len + 1; + } while (!is_last); + + if (out_path_len > 0) + out_path[out_path_len-1] = '\0'; + path[path_len] = '\0'; + return out_path; } void ksmbd_conv_path_to_windows(char *path) diff --git a/fs/ksmbd/misc.h b/fs/ksmbd/misc.h index af8717d4d85b3..b7b10139ada21 100644 --- a/fs/ksmbd/misc.h +++ b/fs/ksmbd/misc.h @@ -16,8 +16,7 @@ int ksmbd_validate_filename(char *filename); int parse_stream_name(char *filename, char **stream_name, int *s_type); char *convert_to_nt_pathname(char *filename, char *sharepath); int get_nlink(struct kstat *st); -void ksmbd_conv_path_to_unix(char *path); -void ksmbd_strip_last_slash(char *path); +char *ksmbd_conv_path_to_unix(char *path); void ksmbd_conv_path_to_windows(char *path); char *ksmbd_extract_sharename(char *treename); char *convert_to_unix_name(struct ksmbd_share_config *share, char *name); diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c index c86164dc70bb7..6304c9bda479c 100644 --- a/fs/ksmbd/smb2pdu.c +++ b/fs/ksmbd/smb2pdu.c @@ -634,7 +634,7 @@ static char * smb2_get_name(struct ksmbd_share_config *share, const char *src, const int maxlen, struct nls_table *local_nls) { - char *name, *unixname; + char *name, *norm_name, *unixname; name = smb_strndup_from_utf16(src, maxlen, 1, local_nls); if (IS_ERR(name)) { @@ -643,11 +643,15 @@ smb2_get_name(struct ksmbd_share_config *share, const char *src, } /* change it to absolute unix name */ - ksmbd_conv_path_to_unix(name); - ksmbd_strip_last_slash(name); - - unixname = convert_to_unix_name(share, name); + norm_name = ksmbd_conv_path_to_unix(name); + if (IS_ERR(norm_name)) { + kfree(name); + return norm_name; + } kfree(name); + + unixname = convert_to_unix_name(share, norm_name); + kfree(norm_name); if (!unixname) { pr_err("can not convert absolute name\n"); return ERR_PTR(-ENOMEM); @@ -4041,6 +4045,10 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp, path = &fp->filp->f_path; /* single EA entry is requested with given user.* name */ if (req->InputBufferLength) { + if (le32_to_cpu(req->InputBufferLength) < + sizeof(struct smb2_ea_info_req)) + return -EINVAL; + ea_req = (struct smb2_ea_info_req *)req->Buffer; } else { /* need to send all EAs, if no specific EA is requested*/ diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c index 52b2556e76b1b..3a7fa23ba8508 100644 --- a/fs/ksmbd/transport_rdma.c +++ b/fs/ksmbd/transport_rdma.c @@ -20,7 +20,6 @@ #define SUBMOD_NAME "smb_direct" #include -#include #include #include #include diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c index a6ee23aadd283..66645a5a35f30 100644 --- a/fs/qnx4/dir.c +++ b/fs/qnx4/dir.c @@ -15,13 +15,48 @@ #include #include "qnx4.h" +/* + * A qnx4 directory entry is an inode entry or link info + * depending on the status field in the last byte. The + * first byte is where the name start either way, and a + * zero means it's empty. + * + * Also, due to a bug in gcc, we don't want to use the + * real (differently sized) name arrays in the inode and + * link entries, but always the 'de_name[]' one in the + * fake struct entry. + * + * See + * + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99578#c6 + * + * for details, but basically gcc will take the size of the + * 'name' array from one of the used union entries randomly. + * + * This use of 'de_name[]' (48 bytes) avoids the false positive + * warnings that would happen if gcc decides to use 'inode.di_name' + * (16 bytes) even when the pointer and size were to come from + * 'link.dl_name' (48 bytes). + * + * In all cases the actual name pointer itself is the same, it's + * only the gcc internal 'what is the size of this field' logic + * that can get confused. + */ +union qnx4_directory_entry { + struct { + const char de_name[48]; + u8 de_pad[15]; + u8 de_status; + }; + struct qnx4_inode_entry inode; + struct qnx4_link_info link; +}; + static int qnx4_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); unsigned int offset; struct buffer_head *bh; - struct qnx4_inode_entry *de; - struct qnx4_link_info *le; unsigned long blknum; int ix, ino; int size; @@ -38,27 +73,27 @@ static int qnx4_readdir(struct file *file, struct dir_context *ctx) } ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK; for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) { + union qnx4_directory_entry *de; + offset = ix * QNX4_DIR_ENTRY_SIZE; - de = (struct qnx4_inode_entry *) (bh->b_data + offset); - if (!de->di_fname[0]) + de = (union qnx4_directory_entry *) (bh->b_data + offset); + + if (!de->de_name[0]) continue; - if (!(de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK))) + if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK))) continue; - if (!(de->di_status & QNX4_FILE_LINK)) - size = QNX4_SHORT_NAME_MAX; - else - size = QNX4_NAME_MAX; - size = strnlen(de->di_fname, size); - QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname)); - if (!(de->di_status & QNX4_FILE_LINK)) + if (!(de->de_status & QNX4_FILE_LINK)) { + size = sizeof(de->inode.di_fname); ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1; - else { - le = (struct qnx4_link_info*)de; - ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) * + } else { + size = sizeof(de->link.dl_fname); + ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) * QNX4_INODES_PER_BLOCK + - le->dl_inode_ndx; + de->link.dl_inode_ndx; } - if (!dir_emit(ctx, de->di_fname, size, ino, DT_UNKNOWN)) { + size = strnlen(de->de_name, size); + QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name)); + if (!dir_emit(ctx, de->de_name, size, ino, DT_UNKNOWN)) { brelse(bh); return 0; } diff --git a/fs/smbfs_common/smbfsctl.h b/fs/smbfs_common/smbfsctl.h index d01e8c9d7a311..926f87cd6af02 100644 --- a/fs/smbfs_common/smbfsctl.h +++ b/fs/smbfs_common/smbfsctl.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: LGPL-2.1+ */ /* - * fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions + * SMB, CIFS, SMB2 FSCTL definitions * * Copyright (c) International Business Machines Corp., 2002,2013 * Author(s): Steve French (sfrench@us.ibm.com) diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index e93375c710b93..cc7338f9e0d1b 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -1023,16 +1023,7 @@ static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) port &= IO_SPACE_LIMIT; return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; } -#define __pci_ioport_unmap __pci_ioport_unmap -static inline void __pci_ioport_unmap(void __iomem *p) -{ - uintptr_t start = (uintptr_t) PCI_IOBASE; - uintptr_t addr = (uintptr_t) p; - - if (addr >= start && addr < start + IO_SPACE_LIMIT) - return; - iounmap(p); -} +#define ARCH_HAS_GENERIC_IOPORT_MAP #endif #ifndef ioport_unmap @@ -1048,21 +1039,10 @@ extern void ioport_unmap(void __iomem *p); #endif /* CONFIG_HAS_IOPORT_MAP */ #ifndef CONFIG_GENERIC_IOMAP -struct pci_dev; -extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); - -#ifndef __pci_ioport_unmap -static inline void __pci_ioport_unmap(void __iomem *p) {} -#endif - #ifndef pci_iounmap -#define pci_iounmap pci_iounmap -static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) -{ - __pci_ioport_unmap(p); -} +#define ARCH_WANTS_GENERIC_PCI_IOUNMAP +#endif #endif -#endif /* CONFIG_GENERIC_IOMAP */ #ifndef xlate_dev_mem_ptr #define xlate_dev_mem_ptr xlate_dev_mem_ptr diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h index 9b3eb6d862001..08237ae8b840d 100644 --- a/include/asm-generic/iomap.h +++ b/include/asm-generic/iomap.h @@ -110,16 +110,6 @@ static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size) } #endif -#ifdef CONFIG_PCI -/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */ -struct pci_dev; -extern void pci_iounmap(struct pci_dev *dev, void __iomem *); -#elif defined(CONFIG_GENERIC_IOMAP) -struct pci_dev; -static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) -{ } -#endif - #include #endif diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index c1ab6a6e72b53..d3eae6cdbacbb 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -197,10 +197,12 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number) return hv_vp_index[cpu_number]; } -static inline int cpumask_to_vpset(struct hv_vpset *vpset, - const struct cpumask *cpus) +static inline int __cpumask_to_vpset(struct hv_vpset *vpset, + const struct cpumask *cpus, + bool exclude_self) { int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; + int this_cpu = smp_processor_id(); /* valid_bank_mask can represent up to 64 banks */ if (hv_max_vp_index / 64 >= 64) @@ -218,6 +220,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, * Some banks may end up being empty but this is acceptable. */ for_each_cpu(cpu, cpus) { + if (exclude_self && cpu == this_cpu) + continue; vcpu = hv_cpu_number_to_vp_number(cpu); if (vcpu == VP_INVAL) return -1; @@ -232,6 +236,19 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, return nr_bank; } +static inline int cpumask_to_vpset(struct hv_vpset *vpset, + const struct cpumask *cpus) +{ + return __cpumask_to_vpset(vpset, cpus, false); +} + +static inline int cpumask_to_vpset_noself(struct hv_vpset *vpset, + const struct cpumask *cpus) +{ + WARN_ON_ONCE(preemptible()); + return __cpumask_to_vpset(vpset, cpus, true); +} + void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die); bool hv_is_hyperv_initialized(void); bool hv_is_hibernation_supported(void); diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h index df636c6d8e6c3..5a2f9bf53384b 100644 --- a/include/asm-generic/pci_iomap.h +++ b/include/asm-generic/pci_iomap.h @@ -18,6 +18,7 @@ extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, unsigned long offset, unsigned long maxlen); +extern void pci_iounmap(struct pci_dev *dev, void __iomem *); /* Create a virtual mapping cookie for a port on a given PCI device. * Do not call this directly, it exists to make it easier for architectures * to override */ @@ -50,6 +51,8 @@ static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, { return NULL; } +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +{ } #endif #endif /* __ASM_GENERIC_PCI_IOMAP_H */ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index e1c705fdfa7c5..db2e147e069fe 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -752,107 +752,54 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains * per-socket cgroup information except for memcg association. * - * On legacy hierarchies, net_prio and net_cls controllers directly set - * attributes on each sock which can then be tested by the network layer. - * On the default hierarchy, each sock is associated with the cgroup it was - * created in and the networking layer can match the cgroup directly. - * - * To avoid carrying all three cgroup related fields separately in sock, - * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer. - * On boot, sock_cgroup_data records the cgroup that the sock was created - * in so that cgroup2 matches can be made; however, once either net_prio or - * net_cls starts being used, the area is overridden to carry prioidx and/or - * classid. The two modes are distinguished by whether the lowest bit is - * set. Clear bit indicates cgroup pointer while set bit prioidx and - * classid. - * - * While userland may start using net_prio or net_cls at any time, once - * either is used, cgroup2 matching no longer works. There is no reason to - * mix the two and this is in line with how legacy and v2 compatibility is - * handled. On mode switch, cgroup references which are already being - * pointed to by socks may be leaked. While this can be remedied by adding - * synchronization around sock_cgroup_data, given that the number of leaked - * cgroups is bound and highly unlikely to be high, this seems to be the - * better trade-off. + * On legacy hierarchies, net_prio and net_cls controllers directly + * set attributes on each sock which can then be tested by the network + * layer. On the default hierarchy, each sock is associated with the + * cgroup it was created in and the networking layer can match the + * cgroup directly. */ struct sock_cgroup_data { - union { -#ifdef __LITTLE_ENDIAN - struct { - u8 is_data : 1; - u8 no_refcnt : 1; - u8 unused : 6; - u8 padding; - u16 prioidx; - u32 classid; - } __packed; -#else - struct { - u32 classid; - u16 prioidx; - u8 padding; - u8 unused : 6; - u8 no_refcnt : 1; - u8 is_data : 1; - } __packed; + struct cgroup *cgroup; /* v2 */ +#ifdef CONFIG_CGROUP_NET_CLASSID + u32 classid; /* v1 */ +#endif +#ifdef CONFIG_CGROUP_NET_PRIO + u16 prioidx; /* v1 */ #endif - u64 val; - }; }; -/* - * There's a theoretical window where the following accessors race with - * updaters and return part of the previous pointer as the prioidx or - * classid. Such races are short-lived and the result isn't critical. - */ static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) { - /* fallback to 1 which is always the ID of the root cgroup */ - return (skcd->is_data & 1) ? skcd->prioidx : 1; +#ifdef CONFIG_CGROUP_NET_PRIO + return READ_ONCE(skcd->prioidx); +#else + return 1; +#endif } static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) { - /* fallback to 0 which is the unconfigured default classid */ - return (skcd->is_data & 1) ? skcd->classid : 0; +#ifdef CONFIG_CGROUP_NET_CLASSID + return READ_ONCE(skcd->classid); +#else + return 0; +#endif } -/* - * If invoked concurrently, the updaters may clobber each other. The - * caller is responsible for synchronization. - */ static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, u16 prioidx) { - struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; - - if (sock_cgroup_prioidx(&skcd_buf) == prioidx) - return; - - if (!(skcd_buf.is_data & 1)) { - skcd_buf.val = 0; - skcd_buf.is_data = 1; - } - - skcd_buf.prioidx = prioidx; - WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ +#ifdef CONFIG_CGROUP_NET_PRIO + WRITE_ONCE(skcd->prioidx, prioidx); +#endif } static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, u32 classid) { - struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; - - if (sock_cgroup_classid(&skcd_buf) == classid) - return; - - if (!(skcd_buf.is_data & 1)) { - skcd_buf.val = 0; - skcd_buf.is_data = 1; - } - - skcd_buf.classid = classid; - WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ +#ifdef CONFIG_CGROUP_NET_CLASSID + WRITE_ONCE(skcd->classid, classid); +#endif } #else /* CONFIG_SOCK_CGROUP_DATA */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 7bf60454a3136..75c151413fda8 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -829,33 +829,13 @@ static inline void cgroup_account_cputime_field(struct task_struct *task, */ #ifdef CONFIG_SOCK_CGROUP_DATA -#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) -extern spinlock_t cgroup_sk_update_lock; -#endif - -void cgroup_sk_alloc_disable(void); void cgroup_sk_alloc(struct sock_cgroup_data *skcd); void cgroup_sk_clone(struct sock_cgroup_data *skcd); void cgroup_sk_free(struct sock_cgroup_data *skcd); static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) { -#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) - unsigned long v; - - /* - * @skcd->val is 64bit but the following is safe on 32bit too as we - * just need the lower ulong to be written and read atomically. - */ - v = READ_ONCE(skcd->val); - - if (v & 3) - return &cgrp_dfl_root.cgrp; - - return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; -#else - return (struct cgroup *)(unsigned long)skcd->val; -#endif + return skcd->cgroup; } #else /* CONFIG_CGROUP_DATA */ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index b67261a1e3e9c..3d5af56337bdb 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -188,6 +188,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, (typeof(ptr)) (__ptr + (off)); }) #endif +#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0) + #ifndef OPTIMIZER_HIDE_VAR /* Make the optimizer believe the variable can be manipulated arbitrarily. */ #define OPTIMIZER_HIDE_VAR(var) \ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index b066024c62e3f..34de69b3b8bad 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -118,6 +118,7 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); void memblock_free_all(void); +void memblock_free_ptr(void *ptr, size_t size); void reset_node_managed_pages(pg_data_t *pgdat); void reset_all_zones_managed_pages(void); diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index b179f1e3541a2..96e113e23d04e 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -144,15 +144,6 @@ static inline void mmap_read_unlock(struct mm_struct *mm) up_read(&mm->mmap_lock); } -static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm) -{ - if (mmap_read_trylock(mm)) { - rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_); - return true; - } - return false; -} - static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) { __mmap_lock_trace_released(mm, false); diff --git a/include/linux/sched.h b/include/linux/sched.h index e12b524426b02..39039ce8ac4c1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1471,6 +1471,7 @@ struct task_struct { mce_whole_page : 1, __mce_reserved : 62; struct callback_head mce_kill_me; + int mce_count; #endif #ifdef CONFIG_KRETPROBES diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6bdb0db3e8258..841e2f0f5240b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1940,7 +1940,7 @@ static inline void __skb_insert(struct sk_buff *newsk, WRITE_ONCE(newsk->prev, prev); WRITE_ONCE(next->prev, newsk); WRITE_ONCE(prev->next, newsk); - list->qlen++; + WRITE_ONCE(list->qlen, list->qlen + 1); } static inline void __skb_queue_splice(const struct sk_buff_head *list, diff --git a/include/linux/uio.h b/include/linux/uio.h index 5265024e8b909..207101a9c5c32 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -27,6 +27,12 @@ enum iter_type { ITER_DISCARD, }; +struct iov_iter_state { + size_t iov_offset; + size_t count; + unsigned long nr_segs; +}; + struct iov_iter { u8 iter_type; bool data_source; @@ -47,7 +53,6 @@ struct iov_iter { }; loff_t xarray_start; }; - size_t truncated; }; static inline enum iter_type iov_iter_type(const struct iov_iter *i) @@ -55,6 +60,14 @@ static inline enum iter_type iov_iter_type(const struct iov_iter *i) return i->iter_type; } +static inline void iov_iter_save_state(struct iov_iter *iter, + struct iov_iter_state *state) +{ + state->iov_offset = iter->iov_offset; + state->count = iter->count; + state->nr_segs = iter->nr_segs; +} + static inline bool iter_is_iovec(const struct iov_iter *i) { return iov_iter_type(i) == ITER_IOVEC; @@ -233,6 +246,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start); int iov_iter_npages(const struct iov_iter *i, int maxpages); +void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state); const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags); @@ -255,10 +269,8 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count) * conversion in assignement is by definition greater than all * values of size_t, including old i->count. */ - if (i->count > count) { - i->truncated += i->count - count; + if (i->count > count) i->count = count; - } } /* @@ -267,7 +279,6 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count) */ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) { - i->truncated -= count - i->count; i->count = count; } diff --git a/include/net/dsa.h b/include/net/dsa.h index f9a17145255a3..258867eff2309 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -447,6 +447,11 @@ static inline bool dsa_port_is_user(struct dsa_port *dp) return dp->type == DSA_PORT_TYPE_USER; } +static inline bool dsa_port_is_unused(struct dsa_port *dp) +{ + return dp->type == DSA_PORT_TYPE_UNUSED; +} + static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p) { return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED; diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 9f73ed2cf0611..bca73e8c8cdec 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -306,11 +306,13 @@ enum afs_flock_operation { enum afs_cb_break_reason { afs_cb_break_no_break, + afs_cb_break_no_promise, afs_cb_break_for_callback, afs_cb_break_for_deleted, afs_cb_break_for_lapsed, + afs_cb_break_for_s_reinit, afs_cb_break_for_unlink, - afs_cb_break_for_vsbreak, + afs_cb_break_for_v_break, afs_cb_break_for_volume_callback, afs_cb_break_for_zap, }; @@ -602,11 +604,13 @@ enum afs_cb_break_reason { #define afs_cb_break_reasons \ EM(afs_cb_break_no_break, "no-break") \ + EM(afs_cb_break_no_promise, "no-promise") \ EM(afs_cb_break_for_callback, "break-cb") \ EM(afs_cb_break_for_deleted, "break-del") \ EM(afs_cb_break_for_lapsed, "break-lapsed") \ + EM(afs_cb_break_for_s_reinit, "s-reinit") \ EM(afs_cb_break_for_unlink, "break-unlink") \ - EM(afs_cb_break_for_vsbreak, "break-vs") \ + EM(afs_cb_break_for_v_break, "break-v") \ EM(afs_cb_break_for_volume_callback, "break-v-cb") \ E_(afs_cb_break_for_zap, "break-zap") diff --git a/include/uapi/linux/cifs/cifs_mount.h b/include/uapi/linux/cifs/cifs_mount.h index 69829205fdb5e..8e87d27b09516 100644 --- a/include/uapi/linux/cifs/cifs_mount.h +++ b/include/uapi/linux/cifs/cifs_mount.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */ /* - * include/uapi/linux/cifs/cifs_mount.h * * Author(s): Scott Lovenberg (scott.lovenberg@gmail.com) * diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 59ef35154e3d8..b270a07b285ec 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -317,13 +317,19 @@ enum { IORING_REGISTER_IOWQ_AFF = 17, IORING_UNREGISTER_IOWQ_AFF = 18, - /* set/get max number of workers */ + /* set/get max number of io-wq workers */ IORING_REGISTER_IOWQ_MAX_WORKERS = 19, /* this goes last */ IORING_REGISTER_LAST }; +/* io-wq worker categories */ +enum { + IO_WQ_BOUND, + IO_WQ_UNBOUND, +}; + /* deprecated, see struct io_uring_rsrc_update */ struct io_uring_files_update { __u32 offset; diff --git a/init/main.c b/init/main.c index 5c9a48df90e1b..3f72169344412 100644 --- a/init/main.c +++ b/init/main.c @@ -924,7 +924,7 @@ static void __init print_unknown_bootoptions(void) end += sprintf(end, " %s", *p); pr_notice("Unknown command line parameters:%s\n", unknown_options); - memblock_free(__pa(unknown_options), len); + memblock_free_ptr(unknown_options, len); } asmlinkage __visible void __init __no_sanitize_address start_kernel(void) diff --git a/ipc/sem.c b/ipc/sem.c index f833238df1ce2..6693daf4fe112 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -2238,7 +2238,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops, return -EINVAL; if (nsops > SEMOPM_FAST) { - sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL_ACCOUNT); + sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); if (sops == NULL) return -ENOMEM; } diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c index ca3cd9aaa6ced..7b4afb7d96db1 100644 --- a/kernel/bpf/disasm.c +++ b/kernel/bpf/disasm.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0-only +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2016 Facebook */ diff --git a/kernel/bpf/disasm.h b/kernel/bpf/disasm.h index e546b18d27dae..a4b040793f443 100644 --- a/kernel/bpf/disasm.h +++ b/kernel/bpf/disasm.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2016 Facebook */ diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index e8eefdf8cf3ec..09a3fd97d329e 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -179,7 +179,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, * with build_id. */ if (!user || !current || !current->mm || irq_work_busy || - !mmap_read_trylock_non_owner(current->mm)) { + !mmap_read_trylock(current->mm)) { /* cannot access current->mm, fall back to ips */ for (i = 0; i < trace_nr; i++) { id_offs[i].status = BPF_STACK_BUILD_ID_IP; @@ -204,9 +204,15 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, } if (!work) { - mmap_read_unlock_non_owner(current->mm); + mmap_read_unlock(current->mm); } else { work->mm = current->mm; + + /* The lock will be released once we're out of interrupt + * context. Tell lockdep that we've released it now so + * it doesn't complain that we forgot to release it. + */ + rwsem_release(¤t->mm->mmap_lock.dep_map, _RET_IP_); irq_work_queue(&work->irq_work); } } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 047ac4b4703b0..e76b559179054 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -9912,6 +9912,8 @@ static int check_btf_line(struct bpf_verifier_env *env, nr_linfo = attr->line_info_cnt; if (!nr_linfo) return 0; + if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info)) + return -EINVAL; rec_size = attr->line_info_rec_size; if (rec_size < MIN_BPF_LINEINFO_SIZE || diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 881ce1470beba..8afa8690d288c 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -6572,74 +6572,44 @@ int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v) */ #ifdef CONFIG_SOCK_CGROUP_DATA -#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) - -DEFINE_SPINLOCK(cgroup_sk_update_lock); -static bool cgroup_sk_alloc_disabled __read_mostly; - -void cgroup_sk_alloc_disable(void) -{ - if (cgroup_sk_alloc_disabled) - return; - pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n"); - cgroup_sk_alloc_disabled = true; -} - -#else - -#define cgroup_sk_alloc_disabled false - -#endif - void cgroup_sk_alloc(struct sock_cgroup_data *skcd) { - if (cgroup_sk_alloc_disabled) { - skcd->no_refcnt = 1; - return; - } - /* Don't associate the sock with unrelated interrupted task's cgroup. */ if (in_interrupt()) return; rcu_read_lock(); - while (true) { struct css_set *cset; cset = task_css_set(current); if (likely(cgroup_tryget(cset->dfl_cgrp))) { - skcd->val = (unsigned long)cset->dfl_cgrp; + skcd->cgroup = cset->dfl_cgrp; cgroup_bpf_get(cset->dfl_cgrp); break; } cpu_relax(); } - rcu_read_unlock(); } void cgroup_sk_clone(struct sock_cgroup_data *skcd) { - if (skcd->val) { - if (skcd->no_refcnt) - return; - /* - * We might be cloning a socket which is left in an empty - * cgroup and the cgroup might have already been rmdir'd. - * Don't use cgroup_get_live(). - */ - cgroup_get(sock_cgroup_ptr(skcd)); - cgroup_bpf_get(sock_cgroup_ptr(skcd)); - } + struct cgroup *cgrp = sock_cgroup_ptr(skcd); + + /* + * We might be cloning a socket which is left in an empty + * cgroup and the cgroup might have already been rmdir'd. + * Don't use cgroup_get_live(). + */ + cgroup_get(cgrp); + cgroup_bpf_get(cgrp); } void cgroup_sk_free(struct sock_cgroup_data *skcd) { struct cgroup *cgrp = sock_cgroup_ptr(skcd); - if (skcd->no_refcnt) - return; cgroup_bpf_put(cgrp); cgroup_put(cgrp); } diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 6c90c69e5311a..95445bd6eb725 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -567,7 +567,8 @@ static void add_dma_entry(struct dma_debug_entry *entry) pr_err("cacheline tracking ENOMEM, dma-debug disabled\n"); global_disable = true; } else if (rc == -EEXIST) { - pr_err("cacheline tracking EEXIST, overlapping mappings aren't supported\n"); + err_printk(entry->dev, entry, + "cacheline tracking EEXIST, overlapping mappings aren't supported\n"); } } diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 7ee5284bff585..06fec5547e7c7 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -206,7 +206,8 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, /** * dma_map_sg_attrs - Map the given buffer for DMA * @dev: The device for which to perform the DMA operation - * @sg: The sg_table object describing the buffer + * @sg: The sg_table object describing the buffer + * @nents: Number of entries to map * @dir: DMA direction * @attrs: Optional DMA attributes for the map operation * diff --git a/kernel/events/core.c b/kernel/events/core.c index 744e8726c5b2f..0c000cb01eeb1 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -10193,7 +10193,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event) return; if (ifh->nr_file_filters) { - mm = get_task_mm(event->ctx->task); + mm = get_task_mm(task); if (!mm) goto restart; diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c index 4ba15088e6405..88191f6e252c3 100644 --- a/kernel/locking/rwbase_rt.c +++ b/kernel/locking/rwbase_rt.c @@ -41,6 +41,12 @@ * The risk of writer starvation is there, but the pathological use cases * which trigger it are not necessarily the typical RT workloads. * + * Fast-path orderings: + * The lock/unlock of readers can run in fast paths: lock and unlock are only + * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE + * semantics of rwbase_rt. Atomic ops should thus provide _acquire() + * and _release() (or stronger). + * * Common code shared between RT rw_semaphore and rwlock */ @@ -53,6 +59,7 @@ static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb) * set. */ for (r = atomic_read(&rwb->readers); r < 0;) { + /* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */ if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1))) return 1; } @@ -162,6 +169,8 @@ static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb, /* * rwb->readers can only hit 0 when a writer is waiting for the * active readers to leave the critical section. + * + * dec_and_test() is fully ordered, provides RELEASE. */ if (unlikely(atomic_dec_and_test(&rwb->readers))) __rwbase_read_unlock(rwb, state); @@ -172,7 +181,11 @@ static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias, { struct rt_mutex_base *rtm = &rwb->rtmutex; - atomic_add(READER_BIAS - bias, &rwb->readers); + /* + * _release() is needed in case that reader is in fast path, pairing + * with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE + */ + (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers); raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); rwbase_rtmutex_unlock(rtm); } @@ -196,6 +209,23 @@ static inline void rwbase_write_downgrade(struct rwbase_rt *rwb) __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags); } +static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb) +{ + /* Can do without CAS because we're serialized by wait_lock. */ + lockdep_assert_held(&rwb->rtmutex.wait_lock); + + /* + * _acquire is needed in case the reader is in the fast path, pairing + * with rwbase_read_unlock(), provides ACQUIRE. + */ + if (!atomic_read_acquire(&rwb->readers)) { + atomic_set(&rwb->readers, WRITER_BIAS); + return 1; + } + + return 0; +} + static int __sched rwbase_write_lock(struct rwbase_rt *rwb, unsigned int state) { @@ -210,34 +240,30 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb, atomic_sub(READER_BIAS, &rwb->readers); raw_spin_lock_irqsave(&rtm->wait_lock, flags); - /* - * set_current_state() for rw_semaphore - * current_save_and_set_rtlock_wait_state() for rwlock - */ - rwbase_set_and_save_current_state(state); + if (__rwbase_write_trylock(rwb)) + goto out_unlock; - /* Block until all readers have left the critical section. */ - for (; atomic_read(&rwb->readers);) { + rwbase_set_and_save_current_state(state); + for (;;) { /* Optimized out for rwlocks */ if (rwbase_signal_pending_state(state, current)) { - __set_current_state(TASK_RUNNING); + rwbase_restore_current_state(); __rwbase_write_unlock(rwb, 0, flags); return -EINTR; } + + if (__rwbase_write_trylock(rwb)) + break; + raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); + rwbase_schedule(); + raw_spin_lock_irqsave(&rtm->wait_lock, flags); - /* - * Schedule and wait for the readers to leave the critical - * section. The last reader leaving it wakes the waiter. - */ - if (atomic_read(&rwb->readers) != 0) - rwbase_schedule(); set_current_state(state); - raw_spin_lock_irqsave(&rtm->wait_lock, flags); } - - atomic_set(&rwb->readers, WRITER_BIAS); rwbase_restore_current_state(); + +out_unlock: raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); return 0; } @@ -253,8 +279,7 @@ static inline int rwbase_write_trylock(struct rwbase_rt *rwb) atomic_sub(READER_BIAS, &rwb->readers); raw_spin_lock_irqsave(&rtm->wait_lock, flags); - if (!atomic_read(&rwb->readers)) { - atomic_set(&rwb->readers, WRITER_BIAS); + if (__rwbase_write_trylock(rwb)) { raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); return 1; } diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 825277e1e742d..a8d0a58deebc7 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1166,9 +1166,9 @@ void __init setup_log_buf(int early) return; err_free_descs: - memblock_free(__pa(new_descs), new_descs_size); + memblock_free_ptr(new_descs, new_descs_size); err_free_log_buf: - memblock_free(__pa(new_log_buf), new_log_buf_len); + memblock_free_ptr(new_log_buf, new_log_buf_len); } static bool __read_mostly ignore_loglevel; diff --git a/lib/bootconfig.c b/lib/bootconfig.c index f8419cff11471..5ae248b293738 100644 --- a/lib/bootconfig.c +++ b/lib/bootconfig.c @@ -792,7 +792,7 @@ void __init xbc_destroy_all(void) xbc_data = NULL; xbc_data_size = 0; xbc_node_num = 0; - memblock_free(__pa(xbc_nodes), sizeof(struct xbc_node) * XBC_NODE_MAX); + memblock_free_ptr(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX); xbc_nodes = NULL; brace_index = 0; } diff --git a/lib/iov_iter.c b/lib/iov_iter.c index f2d50d69a6c36..755c10c5138cd 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1972,3 +1972,39 @@ int import_single_range(int rw, void __user *buf, size_t len, return 0; } EXPORT_SYMBOL(import_single_range); + +/** + * iov_iter_restore() - Restore a &struct iov_iter to the same state as when + * iov_iter_save_state() was called. + * + * @i: &struct iov_iter to restore + * @state: state to restore from + * + * Used after iov_iter_save_state() to bring restore @i, if operations may + * have advanced it. + * + * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC + */ +void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) +{ + if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && + !iov_iter_is_kvec(i)) + return; + i->iov_offset = state->iov_offset; + i->count = state->count; + /* + * For the *vec iters, nr_segs + iov is constant - if we increment + * the vec, then we also decrement the nr_segs count. Hence we don't + * need to track both of these, just one is enough and we can deduct + * the other from that. ITER_KVEC and ITER_IOVEC are the same struct + * size, so we can just increment the iov pointer as they are unionzed. + * ITER_BVEC _may_ be the same size on some archs, but on others it is + * not. Be safe and handle it separately. + */ + BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); + if (iov_iter_is_bvec(i)) + i->bvec -= state->nr_segs - i->nr_segs; + else + i->iov -= state->nr_segs - i->nr_segs; + i->nr_segs = state->nr_segs; +} diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c index 2d3eb1cb73b8f..ce39ce9f3526e 100644 --- a/lib/pci_iomap.c +++ b/lib/pci_iomap.c @@ -134,4 +134,47 @@ void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen) return pci_iomap_wc_range(dev, bar, 0, maxlen); } EXPORT_SYMBOL_GPL(pci_iomap_wc); + +/* + * pci_iounmap() somewhat illogically comes from lib/iomap.c for the + * CONFIG_GENERIC_IOMAP case, because that's the code that knows about + * the different IOMAP ranges. + * + * But if the architecture does not use the generic iomap code, and if + * it has _not_ defined it's own private pci_iounmap function, we define + * it here. + * + * NOTE! This default implementation assumes that if the architecture + * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will + * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [, + * and does not need unmapping with 'ioport_unmap()'. + * + * If you have different rules for your architecture, you need to + * implement your own pci_iounmap() that knows the rules for where + * and how IO vs MEM get mapped. + * + * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes + * from legacy header file behavior. In particular, + * it would seem to make sense to do the iounmap(p) for the non-IO-space + * case here regardless, but that's not what the old header file code + * did. Probably incorrectly, but this is meant to be bug-for-bug + * compatible. + */ +#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP) + +void pci_iounmap(struct pci_dev *dev, void __iomem *p) +{ +#ifdef ARCH_HAS_GENERIC_IOPORT_MAP + uintptr_t start = (uintptr_t) PCI_IOBASE; + uintptr_t addr = (uintptr_t) p; + + if (addr >= start && addr < start + IO_SPACE_LIMIT) + return; + iounmap(p); +#endif +} +EXPORT_SYMBOL(pci_iounmap); + +#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */ + #endif /* CONFIG_PCI */ diff --git a/mm/memblock.c b/mm/memblock.c index 0ab5a749bfa69..184dcd2e5d998 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -472,7 +472,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type, kfree(old_array); else if (old_array != memblock_memory_init_regions && old_array != memblock_reserved_init_regions) - memblock_free(__pa(old_array), old_alloc_size); + memblock_free_ptr(old_array, old_alloc_size); /* * Reserve the new array if that comes from the memblock. Otherwise, we @@ -795,6 +795,20 @@ int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) return memblock_remove_range(&memblock.memory, base, size); } +/** + * memblock_free_ptr - free boot memory allocation + * @ptr: starting address of the boot memory allocation + * @size: size of the boot memory block in bytes + * + * Free boot memory block previously allocated by memblock_alloc_xx() API. + * The freeing memory will not be released to the buddy allocator. + */ +void __init_memblock memblock_free_ptr(void *ptr, size_t size) +{ + if (ptr) + memblock_free(__pa(ptr), size); +} + /** * memblock_free - free boot memory block * @base: phys starting address of the boot memory block diff --git a/mm/memory.c b/mm/memory.c index 25fc46e872142..adf9b9ef8277d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3403,6 +3403,7 @@ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, unmap_mapping_range_tree(&mapping->i_mmap, &details); i_mmap_unlock_write(mapping); } +EXPORT_SYMBOL_GPL(unmap_mapping_pages); /** * unmap_mapping_range - unmap the portion of all mmaps in the specified diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 37b67194c0dfe..414dc5671c45e 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -53,20 +53,6 @@ struct chnl_net { enum caif_states state; }; -static void robust_list_del(struct list_head *delete_node) -{ - struct list_head *list_node; - struct list_head *n; - ASSERT_RTNL(); - list_for_each_safe(list_node, n, &chnl_net_list) { - if (list_node == delete_node) { - list_del(list_node); - return; - } - } - WARN_ON(1); -} - static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) { struct sk_buff *skb; @@ -364,6 +350,7 @@ static int chnl_net_init(struct net_device *dev) ASSERT_RTNL(); priv = netdev_priv(dev); strncpy(priv->name, dev->name, sizeof(priv->name)); + INIT_LIST_HEAD(&priv->list_field); return 0; } @@ -372,7 +359,7 @@ static void chnl_net_uninit(struct net_device *dev) struct chnl_net *priv; ASSERT_RTNL(); priv = netdev_priv(dev); - robust_list_del(&priv->list_field); + list_del_init(&priv->list_field); } static const struct net_device_ops netdev_ops = { @@ -537,7 +524,7 @@ static void __exit chnl_exit_module(void) rtnl_lock(); list_for_each_safe(list_node, _tmp, &chnl_net_list) { dev = list_entry(list_node, struct chnl_net, list_field); - list_del(list_node); + list_del_init(list_node); delete_device(dev); } rtnl_unlock(); diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index b49c57d35a88e..1a6a86693b745 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c @@ -71,11 +71,8 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n) struct update_classid_context *ctx = (void *)v; struct socket *sock = sock_from_file(file); - if (sock) { - spin_lock(&cgroup_sk_update_lock); + if (sock) sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid); - spin_unlock(&cgroup_sk_update_lock); - } if (--ctx->batch == 0) { ctx->batch = UPDATE_CLASSID_BATCH; return n + 1; @@ -121,8 +118,6 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft, struct css_task_iter it; struct task_struct *p; - cgroup_sk_alloc_disable(); - cs->classid = (u32)value; css_task_iter_start(css, 0, &it); diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 99a431c56f230..8456dfbe2eb40 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c @@ -207,8 +207,6 @@ static ssize_t write_priomap(struct kernfs_open_file *of, if (!dev) return -ENODEV; - cgroup_sk_alloc_disable(); - rtnl_lock(); ret = netprio_set_prio(of_css(of), dev, prio); @@ -221,12 +219,10 @@ static ssize_t write_priomap(struct kernfs_open_file *of, static int update_netprio(const void *v, struct file *file, unsigned n) { struct socket *sock = sock_from_file(file); - if (sock) { - spin_lock(&cgroup_sk_update_lock); + + if (sock) sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data, (unsigned long)v); - spin_unlock(&cgroup_sk_update_lock); - } return 0; } @@ -235,8 +231,6 @@ static void net_prio_attach(struct cgroup_taskset *tset) struct task_struct *p; struct cgroup_subsys_state *css; - cgroup_sk_alloc_disable(); - cgroup_taskset_for_each(p, css, tset) { void *v = (void *)(unsigned long)css->id; diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index c5c74a34d139d..91e7a22026971 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -94,6 +94,8 @@ struct sock *dccp_create_openreq_child(const struct sock *sk, newdp->dccps_role = DCCP_ROLE_SERVER; newdp->dccps_hc_rx_ackvec = NULL; newdp->dccps_service_list = NULL; + newdp->dccps_hc_rx_ccid = NULL; + newdp->dccps_hc_tx_ccid = NULL; newdp->dccps_service = dreq->dreq_service; newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo; newdp->dccps_timestamp_time = dreq->dreq_timestamp_time; diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 1dc45e40f961c..41f36ad8b0ec6 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -345,6 +345,11 @@ bool dsa_schedule_work(struct work_struct *work) return queue_work(dsa_owq, work); } +void dsa_flush_workqueue(void) +{ + flush_workqueue(dsa_owq); +} + int dsa_devlink_param_get(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx) { diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 1b2b25d7bd025..eef13cd20f19f 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -897,6 +897,33 @@ static void dsa_switch_teardown(struct dsa_switch *ds) ds->setup = false; } +/* First tear down the non-shared, then the shared ports. This ensures that + * all work items scheduled by our switchdev handlers for user ports have + * completed before we destroy the refcounting kept on the shared ports. + */ +static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst) +{ + struct dsa_port *dp; + + list_for_each_entry(dp, &dst->ports, list) + if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) + dsa_port_teardown(dp); + + dsa_flush_workqueue(); + + list_for_each_entry(dp, &dst->ports, list) + if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) + dsa_port_teardown(dp); +} + +static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst) +{ + struct dsa_port *dp; + + list_for_each_entry(dp, &dst->ports, list) + dsa_switch_teardown(dp->ds); +} + static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) { struct dsa_port *dp; @@ -923,26 +950,13 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) return 0; teardown: - list_for_each_entry(dp, &dst->ports, list) - dsa_port_teardown(dp); + dsa_tree_teardown_ports(dst); - list_for_each_entry(dp, &dst->ports, list) - dsa_switch_teardown(dp->ds); + dsa_tree_teardown_switches(dst); return err; } -static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst) -{ - struct dsa_port *dp; - - list_for_each_entry(dp, &dst->ports, list) - dsa_port_teardown(dp); - - list_for_each_entry(dp, &dst->ports, list) - dsa_switch_teardown(dp->ds); -} - static int dsa_tree_setup_master(struct dsa_switch_tree *dst) { struct dsa_port *dp; @@ -1052,6 +1066,8 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst) dsa_tree_teardown_master(dst); + dsa_tree_teardown_ports(dst); + dsa_tree_teardown_switches(dst); dsa_tree_teardown_cpu_ports(dst); diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 33ab7d7af9eb4..a5c9bc7b66c6e 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -170,6 +170,7 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops); const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf); bool dsa_schedule_work(struct work_struct *work); +void dsa_flush_workqueue(void); const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops); static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 662ff531d4e29..a2bf2d8ac65b7 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1854,13 +1854,11 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev) * use the switch internal MDIO bus instead */ ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags); - if (ret) { - netdev_err(slave_dev, - "failed to connect to port %d: %d\n", - dp->index, ret); - phylink_destroy(dp->pl); - return ret; - } + } + if (ret) { + netdev_err(slave_dev, "failed to connect to PHY: %pe\n", + ERR_PTR(ret)); + phylink_destroy(dp->pl); } return ret; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3f7bd7ae7d7af..141e85e6422b1 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1346,7 +1346,7 @@ static u8 tcp_sacktag_one(struct sock *sk, if (dup_sack && (sacked & TCPCB_RETRANS)) { if (tp->undo_marker && tp->undo_retrans > 0 && after(end_seq, tp->undo_marker)) - tp->undo_retrans--; + tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount); if ((sacked & TCPCB_SACKED_ACKED) && before(start_seq, state->reord)) state->reord = start_seq; diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c index 0d122edc368dd..b91003538d87a 100644 --- a/net/ipv4/udp_tunnel_nic.c +++ b/net/ipv4/udp_tunnel_nic.c @@ -935,7 +935,7 @@ static int __init udp_tunnel_nic_init_module(void) { int err; - udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0); + udp_tunnel_nic_workqueue = alloc_ordered_workqueue("udp_tunnel_nic", 0); if (!udp_tunnel_nic_workqueue) return -ENOMEM; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 1bec5b22f80d5..0371d2c141455 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1378,7 +1378,6 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt, int err = -ENOMEM; int allow_create = 1; int replace_required = 0; - int sernum = fib6_new_sernum(info->nl_net); if (info->nlh) { if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) @@ -1478,7 +1477,7 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt, if (!err) { if (rt->nh) list_add(&rt->nh_list, &rt->nh->f6i_list); - __fib6_update_sernum_upto_root(rt, sernum); + __fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net)); fib6_start_gc(info->nl_net, rt); } diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 53486b162f01c..93271a2632b8e 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -869,8 +869,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) } if (tunnel->version == L2TP_HDR_VER_3 && - l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) + l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) { + l2tp_session_dec_refcount(session); goto invalid; + } l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); l2tp_session_dec_refcount(session); diff --git a/net/mctp/route.c b/net/mctp/route.c index 5265525011ad2..5ca186d53cb0f 100644 --- a/net/mctp/route.c +++ b/net/mctp/route.c @@ -1083,8 +1083,10 @@ static void __net_exit mctp_routes_net_exit(struct net *net) { struct mctp_route *rt; + rcu_read_lock(); list_for_each_entry_rcu(rt, &net->mctp.routes, list) mctp_route_release(rt); + rcu_read_unlock(); } static struct pernet_operations mctp_net_ops = { diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 543365f58e973..2a2bc64f75cfd 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -46,6 +46,8 @@ * Copyright (C) 2011, */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include diff --git a/net/tipc/socket.c b/net/tipc/socket.c index a0a27d87f6312..ad570c2450be8 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2423,7 +2423,7 @@ static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, u32 dport, struct sk_buff_head *xmitq) { - unsigned long time_limit = jiffies + 2; + unsigned long time_limit = jiffies + usecs_to_jiffies(20000); struct sk_buff *skb; unsigned int lim; atomic_t *dcnt; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index eb47b9de23809..92345c9bb60cc 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -3073,7 +3073,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, other = unix_peer(sk); if (other && unix_peer(other) != sk && - unix_recvq_full(other) && + unix_recvq_full_lockless(other) && unix_dgram_peer_wake_me(sk, other)) writable = 0; diff --git a/scripts/Makefile.clang b/scripts/Makefile.clang index 4cce8fd0779c7..51fc23e2e9e50 100644 --- a/scripts/Makefile.clang +++ b/scripts/Makefile.clang @@ -29,7 +29,12 @@ CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) else CLANG_FLAGS += -fintegrated-as endif +# By default, clang only warns when it encounters an unknown warning flag or +# certain optimization flags it knows it has not implemented. +# Make it behave more like gcc by erroring when these flags are encountered +# so they can be implemented or wrapped in cc-option. CLANG_FLAGS += -Werror=unknown-warning-option +CLANG_FLAGS += -Werror=ignored-optimization-argument KBUILD_CFLAGS += $(CLANG_FLAGS) KBUILD_AFLAGS += $(CLANG_FLAGS) export CLANG_FLAGS diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index eef56d629799a..48585c4d04ade 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -13,7 +13,7 @@ # Stage 2 is handled by this file and does the following # 1) Find all modules listed in modules.order # 2) modpost is then used to -# 3) create one .mod.c file pr. module +# 3) create one .mod.c file per module # 4) create one Module.symvers file with CRC for all exported symbols # Step 3 is used to place certain information in the module's ELF diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py index b9b0f15e58802..217d21abc86e8 100755 --- a/scripts/checkkconfigsymbols.py +++ b/scripts/checkkconfigsymbols.py @@ -34,7 +34,6 @@ REGEX_KCONFIG_DEF = re.compile(DEF) REGEX_KCONFIG_EXPR = re.compile(EXPR) REGEX_KCONFIG_STMT = re.compile(STMT) -REGEX_KCONFIG_HELP = re.compile(r"^\s+help\s*$") REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$") REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+") REGEX_QUOTES = re.compile("(\"(.*?)\")") @@ -102,6 +101,9 @@ def parse_options(): "continue.") if args.commit: + if args.commit.startswith('HEAD'): + sys.exit("The --commit option can't use the HEAD ref") + args.find = False if args.ignore: @@ -432,7 +434,6 @@ def parse_kconfig_file(kfile): lines = [] defined = [] references = [] - skip = False if not os.path.exists(kfile): return defined, references @@ -448,12 +449,6 @@ def parse_kconfig_file(kfile): if REGEX_KCONFIG_DEF.match(line): symbol_def = REGEX_KCONFIG_DEF.findall(line) defined.append(symbol_def[0]) - skip = False - elif REGEX_KCONFIG_HELP.match(line): - skip = True - elif skip: - # ignore content of help messages - pass elif REGEX_KCONFIG_STMT.match(line): line = REGEX_QUOTES.sub("", line) symbols = get_symbols_in_line(line) diff --git a/scripts/clang-tools/gen_compile_commands.py b/scripts/clang-tools/gen_compile_commands.py index 0033eedce003e..1d1bde1fd45eb 100755 --- a/scripts/clang-tools/gen_compile_commands.py +++ b/scripts/clang-tools/gen_compile_commands.py @@ -13,6 +13,7 @@ import os import re import subprocess +import sys _DEFAULT_OUTPUT = 'compile_commands.json' _DEFAULT_LOG_LEVEL = 'WARNING' diff --git a/tools/bootconfig/include/linux/memblock.h b/tools/bootconfig/include/linux/memblock.h index 7862f217d85d3..f2e506f7d57f9 100644 --- a/tools/bootconfig/include/linux/memblock.h +++ b/tools/bootconfig/include/linux/memblock.h @@ -4,9 +4,8 @@ #include -#define __pa(addr) (addr) #define SMP_CACHE_BYTES 0 #define memblock_alloc(size, align) malloc(size) -#define memblock_free(paddr, size) free(paddr) +#define memblock_free_ptr(paddr, size) free(paddr) #endif diff --git a/tools/include/linux/compiler-gcc.h b/tools/include/linux/compiler-gcc.h index 43d9a46d36f07..8816f06fc6c76 100644 --- a/tools/include/linux/compiler-gcc.h +++ b/tools/include/linux/compiler-gcc.h @@ -16,7 +16,9 @@ # define __fallthrough __attribute__ ((fallthrough)) #endif -#define __compiletime_error(message) __attribute__((error(message))) +#if __has_attribute(__error__) +# define __compiletime_error(message) __attribute__((error(message))) +#endif /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c index d8886720e83d8..8441e3e1aaac3 100644 --- a/tools/lib/perf/evsel.c +++ b/tools/lib/perf/evsel.c @@ -43,7 +43,7 @@ void perf_evsel__delete(struct perf_evsel *evsel) free(evsel); } -#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y)) +#define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y)) #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL) int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) @@ -54,7 +54,10 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) int cpu, thread; for (cpu = 0; cpu < ncpus; cpu++) { for (thread = 0; thread < nthreads; thread++) { - FD(evsel, cpu, thread) = -1; + int *fd = FD(evsel, cpu, thread); + + if (fd) + *fd = -1; } } } @@ -80,7 +83,7 @@ sys_perf_event_open(struct perf_event_attr *attr, static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd) { struct perf_evsel *leader = evsel->leader; - int fd; + int *fd; if (evsel == leader) { *group_fd = -1; @@ -95,10 +98,10 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *grou return -ENOTCONN; fd = FD(leader, cpu, thread); - if (fd == -1) + if (fd == NULL || *fd == -1) return -EBADF; - *group_fd = fd; + *group_fd = *fd; return 0; } @@ -138,7 +141,11 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, for (cpu = 0; cpu < cpus->nr; cpu++) { for (thread = 0; thread < threads->nr; thread++) { - int fd, group_fd; + int fd, group_fd, *evsel_fd; + + evsel_fd = FD(evsel, cpu, thread); + if (evsel_fd == NULL) + return -EINVAL; err = get_group_fd(evsel, cpu, thread, &group_fd); if (err < 0) @@ -151,7 +158,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, if (fd < 0) return -errno; - FD(evsel, cpu, thread) = fd; + *evsel_fd = fd; } } @@ -163,9 +170,12 @@ static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu) int thread; for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) { - if (FD(evsel, cpu, thread) >= 0) - close(FD(evsel, cpu, thread)); - FD(evsel, cpu, thread) = -1; + int *fd = FD(evsel, cpu, thread); + + if (fd && *fd >= 0) { + close(*fd); + *fd = -1; + } } } @@ -209,13 +219,12 @@ void perf_evsel__munmap(struct perf_evsel *evsel) for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) { for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { - int fd = FD(evsel, cpu, thread); - struct perf_mmap *map = MMAP(evsel, cpu, thread); + int *fd = FD(evsel, cpu, thread); - if (fd < 0) + if (fd == NULL || *fd < 0) continue; - perf_mmap__munmap(map); + perf_mmap__munmap(MMAP(evsel, cpu, thread)); } } @@ -239,15 +248,16 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages) for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) { for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { - int fd = FD(evsel, cpu, thread); - struct perf_mmap *map = MMAP(evsel, cpu, thread); + int *fd = FD(evsel, cpu, thread); + struct perf_mmap *map; - if (fd < 0) + if (fd == NULL || *fd < 0) continue; + map = MMAP(evsel, cpu, thread); perf_mmap__init(map, NULL, false, NULL); - ret = perf_mmap__mmap(map, &mp, fd, cpu); + ret = perf_mmap__mmap(map, &mp, *fd, cpu); if (ret) { perf_evsel__munmap(evsel); return ret; @@ -260,7 +270,9 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages) void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread) { - if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL) + int *fd = FD(evsel, cpu, thread); + + if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL) return NULL; return MMAP(evsel, cpu, thread)->base; @@ -295,17 +307,18 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, struct perf_counts_values *count) { size_t size = perf_evsel__read_size(evsel); + int *fd = FD(evsel, cpu, thread); memset(count, 0, sizeof(*count)); - if (FD(evsel, cpu, thread) < 0) + if (fd == NULL || *fd < 0) return -EINVAL; if (MMAP(evsel, cpu, thread) && !perf_mmap__read_self(MMAP(evsel, cpu, thread), count)) return 0; - if (readn(FD(evsel, cpu, thread), count->values, size) <= 0) + if (readn(*fd, count->values, size) <= 0) return -errno; return 0; @@ -318,8 +331,13 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int thread; for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { - int fd = FD(evsel, cpu, thread), - err = ioctl(fd, ioc, arg); + int err; + int *fd = FD(evsel, cpu, thread); + + if (fd == NULL || *fd < 0) + return -1; + + err = ioctl(*fd, ioc, arg); if (err) return err; diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 0e824f7d8b19c..6211d0b84b7a6 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -368,16 +368,6 @@ static inline int output_type(unsigned int type) return OUTPUT_TYPE_OTHER; } -static inline unsigned int attr_type(unsigned int type) -{ - switch (type) { - case OUTPUT_TYPE_SYNTH: - return PERF_TYPE_SYNTH; - default: - return type; - } -} - static bool output_set_by_user(void) { int j; @@ -556,6 +546,18 @@ static void set_print_ip_opts(struct perf_event_attr *attr) output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE; } +static struct evsel *find_first_output_type(struct evlist *evlist, + unsigned int type) +{ + struct evsel *evsel; + + evlist__for_each_entry(evlist, evsel) { + if (output_type(evsel->core.attr.type) == (int)type) + return evsel; + } + return NULL; +} + /* * verify all user requested events exist and the samples * have the expected data @@ -567,7 +569,7 @@ static int perf_session__check_output_opt(struct perf_session *session) struct evsel *evsel; for (j = 0; j < OUTPUT_TYPE_MAX; ++j) { - evsel = perf_session__find_first_evtype(session, attr_type(j)); + evsel = find_first_output_type(session->evlist, j); /* * even if fields is set to 0 (ie., show nothing) event must diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c index 781afe42e90e0..fa5bd5c20e96b 100644 --- a/tools/perf/ui/browser.c +++ b/tools/perf/ui/browser.c @@ -757,25 +757,40 @@ void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column, } void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column, - unsigned int row, bool arrow_down) + unsigned int row, int diff, bool arrow_down) { - unsigned int end_row; + int end_row; - if (row >= browser->top_idx) - end_row = row - browser->top_idx; - else + if (diff <= 0) return; SLsmg_set_char_set(1); if (arrow_down) { + if (row + diff <= browser->top_idx) + return; + + end_row = row + diff - browser->top_idx; ui_browser__gotorc(browser, end_row, column - 1); - SLsmg_write_char(SLSMG_ULCORN_CHAR); - ui_browser__gotorc(browser, end_row, column); - SLsmg_draw_hline(2); - ui_browser__gotorc(browser, end_row + 1, column - 1); SLsmg_write_char(SLSMG_LTEE_CHAR); + + while (--end_row >= 0 && end_row > (int)(row - browser->top_idx)) { + ui_browser__gotorc(browser, end_row, column - 1); + SLsmg_draw_vline(1); + } + + end_row = (int)(row - browser->top_idx); + if (end_row >= 0) { + ui_browser__gotorc(browser, end_row, column - 1); + SLsmg_write_char(SLSMG_ULCORN_CHAR); + ui_browser__gotorc(browser, end_row, column); + SLsmg_draw_hline(2); + } } else { + if (row < browser->top_idx) + return; + + end_row = row - browser->top_idx; ui_browser__gotorc(browser, end_row, column - 1); SLsmg_write_char(SLSMG_LTEE_CHAR); ui_browser__gotorc(browser, end_row, column); diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h index 3678eb88f119c..510ce45540501 100644 --- a/tools/perf/ui/browser.h +++ b/tools/perf/ui/browser.h @@ -51,7 +51,7 @@ void ui_browser__write_graph(struct ui_browser *browser, int graph); void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column, u64 start, u64 end); void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column, - unsigned int row, bool arrow_down); + unsigned int row, int diff, bool arrow_down); void __ui_browser__show_title(struct ui_browser *browser, const char *title); void ui_browser__show_title(struct ui_browser *browser, const char *title); int ui_browser__show(struct ui_browser *browser, const char *title, diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index ef4da4295bf7f..e81c2493efdf9 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -125,13 +125,20 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int ab->selection = al; } -static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor) +static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor) { struct disasm_line *pos = list_prev_entry(cursor, al.node); const char *name; + int diff = 1; + + while (pos && pos->al.offset == -1) { + pos = list_prev_entry(pos, al.node); + if (!ab->opts->hide_src_code) + diff++; + } if (!pos) - return false; + return 0; if (ins__is_lock(&pos->ins)) name = pos->ops.locked.ins.name; @@ -139,9 +146,11 @@ static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor) name = pos->ins.name; if (!name || !cursor->ins.name) - return false; + return 0; - return ins__is_fused(ab->arch, name, cursor->ins.name); + if (ins__is_fused(ab->arch, name, cursor->ins.name)) + return diff; + return 0; } static void annotate_browser__draw_current_jump(struct ui_browser *browser) @@ -155,6 +164,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser) struct annotation *notes = symbol__annotation(sym); u8 pcnt_width = annotation__pcnt_width(notes); int width; + int diff = 0; /* PLT symbols contain external offsets */ if (strstr(sym->name, "@plt")) @@ -205,11 +215,11 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser) pcnt_width + 2 + notes->widths.addr + width, from, to); - if (is_fused(ab, cursor)) { + diff = is_fused(ab, cursor); + if (diff > 0) { ui_browser__mark_fused(browser, pcnt_width + 3 + notes->widths.addr + width, - from - 1, - to > from); + from - diff, diff, to > from); } } diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index da19be7da284c..44e40bad0e336 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -2149,6 +2149,7 @@ static int add_callchain_ip(struct thread *thread, al.filtered = 0; al.sym = NULL; + al.srcline = NULL; if (!cpumode) { thread__find_cpumode_addr_location(thread, ip, &al); } else { diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index 033051717ba58..f3daa44a82660 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c @@ -12,27 +12,36 @@ #include #include - #include "cgroup_helpers.h" /* * To avoid relying on the system setup, when setup_cgroup_env is called - * we create a new mount namespace, and cgroup namespace. The cgroup2 - * root is mounted at CGROUP_MOUNT_PATH - * - * Unfortunately, most people don't have cgroupv2 enabled at this point in time. - * It's easier to create our own mount namespace and manage it ourselves. + * we create a new mount namespace, and cgroup namespace. The cgroupv2 + * root is mounted at CGROUP_MOUNT_PATH. Unfortunately, most people don't + * have cgroupv2 enabled at this point in time. It's easier to create our + * own mount namespace and manage it ourselves. We assume /mnt exists. * - * We assume /mnt exists. + * Related cgroupv1 helpers are named *classid*(), since we only use the + * net_cls controller for tagging net_cls.classid. We assume the default + * mount under /sys/fs/cgroup/net_cls, which should be the case for the + * vast majority of users. */ #define WALK_FD_LIMIT 16 + #define CGROUP_MOUNT_PATH "/mnt" +#define CGROUP_MOUNT_DFLT "/sys/fs/cgroup" +#define NETCLS_MOUNT_PATH CGROUP_MOUNT_DFLT "/net_cls" #define CGROUP_WORK_DIR "/cgroup-test-work-dir" + #define format_cgroup_path(buf, path) \ snprintf(buf, sizeof(buf), "%s%s%s", CGROUP_MOUNT_PATH, \ CGROUP_WORK_DIR, path) +#define format_classid_path(buf) \ + snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH, \ + CGROUP_WORK_DIR) + /** * enable_all_controllers() - Enable all available cgroup v2 controllers * @@ -139,8 +148,7 @@ static int nftwfunc(const char *filename, const struct stat *statptr, return 0; } - -static int join_cgroup_from_top(char *cgroup_path) +static int join_cgroup_from_top(const char *cgroup_path) { char cgroup_procs_path[PATH_MAX + 1]; pid_t pid = getpid(); @@ -313,3 +321,114 @@ int cgroup_setup_and_join(const char *path) { } return cg_fd; } + +/** + * setup_classid_environment() - Setup the cgroupv1 net_cls environment + * + * After calling this function, cleanup_classid_environment should be called + * once testing is complete. + * + * This function will print an error to stderr and return 1 if it is unable + * to setup the cgroup environment. If setup is successful, 0 is returned. + */ +int setup_classid_environment(void) +{ + char cgroup_workdir[PATH_MAX + 1]; + + format_classid_path(cgroup_workdir); + + if (mount("tmpfs", CGROUP_MOUNT_DFLT, "tmpfs", 0, NULL) && + errno != EBUSY) { + log_err("mount cgroup base"); + return 1; + } + + if (mkdir(NETCLS_MOUNT_PATH, 0777) && errno != EEXIST) { + log_err("mkdir cgroup net_cls"); + return 1; + } + + if (mount("net_cls", NETCLS_MOUNT_PATH, "cgroup", 0, "net_cls") && + errno != EBUSY) { + log_err("mount cgroup net_cls"); + return 1; + } + + cleanup_classid_environment(); + + if (mkdir(cgroup_workdir, 0777) && errno != EEXIST) { + log_err("mkdir cgroup work dir"); + return 1; + } + + return 0; +} + +/** + * set_classid() - Set a cgroupv1 net_cls classid + * @id: the numeric classid + * + * Writes the passed classid into the cgroup work dir's net_cls.classid + * file in order to later on trigger socket tagging. + * + * On success, it returns 0, otherwise on failure it returns 1. If there + * is a failure, it prints the error to stderr. + */ +int set_classid(unsigned int id) +{ + char cgroup_workdir[PATH_MAX - 42]; + char cgroup_classid_path[PATH_MAX + 1]; + int fd, rc = 0; + + format_classid_path(cgroup_workdir); + snprintf(cgroup_classid_path, sizeof(cgroup_classid_path), + "%s/net_cls.classid", cgroup_workdir); + + fd = open(cgroup_classid_path, O_WRONLY); + if (fd < 0) { + log_err("Opening cgroup classid: %s", cgroup_classid_path); + return 1; + } + + if (dprintf(fd, "%u\n", id) < 0) { + log_err("Setting cgroup classid"); + rc = 1; + } + + close(fd); + return rc; +} + +/** + * join_classid() - Join a cgroupv1 net_cls classid + * + * This function expects the cgroup work dir to be already created, as we + * join it here. This causes the process sockets to be tagged with the given + * net_cls classid. + * + * On success, it returns 0, otherwise on failure it returns 1. + */ +int join_classid(void) +{ + char cgroup_workdir[PATH_MAX + 1]; + + format_classid_path(cgroup_workdir); + return join_cgroup_from_top(cgroup_workdir); +} + +/** + * cleanup_classid_environment() - Cleanup the cgroupv1 net_cls environment + * + * At call time, it moves the calling process to the root cgroup, and then + * runs the deletion process. + * + * On failure, it will print an error to stderr, and try to continue. + */ +void cleanup_classid_environment(void) +{ + char cgroup_workdir[PATH_MAX + 1]; + + format_classid_path(cgroup_workdir); + join_cgroup_from_top(NETCLS_MOUNT_PATH); + nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT); +} diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h index 5fe3d88e4f0d2..629da3854b3e7 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.h +++ b/tools/testing/selftests/bpf/cgroup_helpers.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __CGROUP_HELPERS_H #define __CGROUP_HELPERS_H + #include #include @@ -8,12 +9,21 @@ #define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \ __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__) - +/* cgroupv2 related */ int cgroup_setup_and_join(const char *path); int create_and_get_cgroup(const char *path); +unsigned long long get_cgroup_id(const char *path); + int join_cgroup(const char *path); + int setup_cgroup_environment(void); void cleanup_cgroup_environment(void); -unsigned long long get_cgroup_id(const char *path); -#endif +/* cgroupv1 related */ +int set_classid(unsigned int id); +int join_classid(void); + +int setup_classid_environment(void); +void cleanup_classid_environment(void); + +#endif /* __CGROUP_HELPERS_H */ diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index 7e9f6375757a9..6db1af8fdee78 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -208,11 +208,26 @@ int fastopen_connect(int server_fd, const char *data, unsigned int data_len, static int connect_fd_to_addr(int fd, const struct sockaddr_storage *addr, - socklen_t addrlen) + socklen_t addrlen, const bool must_fail) { - if (connect(fd, (const struct sockaddr *)addr, addrlen)) { - log_err("Failed to connect to server"); - return -1; + int ret; + + errno = 0; + ret = connect(fd, (const struct sockaddr *)addr, addrlen); + if (must_fail) { + if (!ret) { + log_err("Unexpected success to connect to server"); + return -1; + } + if (errno != EPERM) { + log_err("Unexpected error from connect to server"); + return -1; + } + } else { + if (ret) { + log_err("Failed to connect to server"); + return -1; + } } return 0; @@ -257,7 +272,7 @@ int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts) strlen(opts->cc) + 1)) goto error_close; - if (connect_fd_to_addr(fd, &addr, addrlen)) + if (connect_fd_to_addr(fd, &addr, addrlen, opts->must_fail)) goto error_close; return fd; @@ -289,7 +304,7 @@ int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms) return -1; } - if (connect_fd_to_addr(client_fd, &addr, len)) + if (connect_fd_to_addr(client_fd, &addr, len, false)) return -1; return 0; diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index da7e132657d59..d198181a56487 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -20,6 +20,7 @@ typedef __u16 __sum16; struct network_helper_opts { const char *cc; int timeout_ms; + bool must_fail; }; /* ipv4 test vector */ diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c new file mode 100644 index 0000000000000..ab3b9bc5e6d1c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#include "connect4_dropper.skel.h" + +#include "cgroup_helpers.h" +#include "network_helpers.h" + +static int run_test(int cgroup_fd, int server_fd, bool classid) +{ + struct network_helper_opts opts = { + .must_fail = true, + }; + struct connect4_dropper *skel; + int fd, err = 0; + + skel = connect4_dropper__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return -1; + + skel->links.connect_v4_dropper = + bpf_program__attach_cgroup(skel->progs.connect_v4_dropper, + cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.connect_v4_dropper, "prog_attach")) { + err = -1; + goto out; + } + + if (classid && !ASSERT_OK(join_classid(), "join_classid")) { + err = -1; + goto out; + } + + fd = connect_to_fd_opts(server_fd, &opts); + if (fd < 0) + err = -1; + else + close(fd); +out: + connect4_dropper__destroy(skel); + return err; +} + +void test_cgroup_v1v2(void) +{ + struct network_helper_opts opts = {}; + int server_fd, client_fd, cgroup_fd; + static const int port = 60123; + + /* Step 1: Check base connectivity works without any BPF. */ + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0); + if (!ASSERT_GE(server_fd, 0, "server_fd")) + return; + client_fd = connect_to_fd_opts(server_fd, &opts); + if (!ASSERT_GE(client_fd, 0, "client_fd")) { + close(server_fd); + return; + } + close(client_fd); + close(server_fd); + + /* Step 2: Check BPF policy prog attached to cgroups drops connectivity. */ + cgroup_fd = test__join_cgroup("/connect_dropper"); + if (!ASSERT_GE(cgroup_fd, 0, "cgroup_fd")) + return; + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0); + if (!ASSERT_GE(server_fd, 0, "server_fd")) { + close(cgroup_fd); + return; + } + ASSERT_OK(run_test(cgroup_fd, server_fd, false), "cgroup-v2-only"); + setup_classid_environment(); + set_classid(42); + ASSERT_OK(run_test(cgroup_fd, server_fd, true), "cgroup-v1v2"); + cleanup_classid_environment(); + close(server_fd); + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c index 53f0e0fa1a537..37c20b5ffa70f 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c +++ b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #define _GNU_SOURCE #include -#include #include "test_task_pt_regs.skel.h" void test_task_pt_regs(void) diff --git a/tools/testing/selftests/bpf/progs/connect4_dropper.c b/tools/testing/selftests/bpf/progs/connect4_dropper.c new file mode 100644 index 0000000000000..b565d997810ad --- /dev/null +++ b/tools/testing/selftests/bpf/progs/connect4_dropper.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#include +#include + +#include + +#include +#include + +#define VERDICT_REJECT 0 +#define VERDICT_PROCEED 1 + +SEC("cgroup/connect4") +int connect_v4_dropper(struct bpf_sock_addr *ctx) +{ + if (ctx->type != SOCK_STREAM) + return VERDICT_PROCEED; + if (ctx->user_port == bpf_htons(60123)) + return VERDICT_REJECT; + return VERDICT_PROCEED; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_task_pt_regs.c b/tools/testing/selftests/bpf/progs/test_task_pt_regs.c index 6c059f1cfa1bb..e6cb09259408f 100644 --- a/tools/testing/selftests/bpf/progs/test_task_pt_regs.c +++ b/tools/testing/selftests/bpf/progs/test_task_pt_regs.c @@ -1,12 +1,17 @@ // SPDX-License-Identifier: GPL-2.0 -#include -#include +#include "vmlinux.h" #include #include -struct pt_regs current_regs = {}; -struct pt_regs ctx_regs = {}; +#define PT_REGS_SIZE sizeof(struct pt_regs) + +/* + * The kernel struct pt_regs isn't exported in its entirety to userspace. + * Pass it as an array to task_pt_regs.c + */ +char current_regs[PT_REGS_SIZE] = {}; +char ctx_regs[PT_REGS_SIZE] = {}; int uprobe_res = 0; SEC("uprobe/trigger_func") @@ -17,8 +22,10 @@ int handle_uprobe(struct pt_regs *ctx) current = bpf_get_current_task_btf(); regs = (struct pt_regs *) bpf_task_pt_regs(current); - __builtin_memcpy(¤t_regs, regs, sizeof(*regs)); - __builtin_memcpy(&ctx_regs, ctx, sizeof(*ctx)); + if (bpf_probe_read_kernel(current_regs, PT_REGS_SIZE, regs)) + return 0; + if (bpf_probe_read_kernel(ctx_regs, PT_REGS_SIZE, ctx)) + return 0; /* Prove that uprobe was run */ uprobe_res = 1; diff --git a/tools/testing/selftests/nci/nci_dev.c b/tools/testing/selftests/nci/nci_dev.c index e1bf55dabdf65..162c41e9bcae8 100644 --- a/tools/testing/selftests/nci/nci_dev.c +++ b/tools/testing/selftests/nci/nci_dev.c @@ -746,7 +746,7 @@ int read_write_nci_cmd(int nfc_sock, int virtual_fd, const __u8 *cmd, __u32 cmd_ const __u8 *rsp, __u32 rsp_len) { char buf[256]; - unsigned int len; + int len; send(nfc_sock, &cmd[3], cmd_len - 3, 0); len = read(virtual_fd, buf, cmd_len); diff --git a/tools/testing/selftests/net/altnames.sh b/tools/testing/selftests/net/altnames.sh index 4254ddc3f70b5..1ef9e4159bba8 100755 --- a/tools/testing/selftests/net/altnames.sh +++ b/tools/testing/selftests/net/altnames.sh @@ -45,7 +45,7 @@ altnames_test() check_err $? "Got unexpected long alternative name from link show JSON" ip link property del $DUMMY_DEV altname $SHORT_NAME - check_err $? "Failed to add short alternative name" + check_err $? "Failed to delete short alternative name" ip -j -p link show $SHORT_NAME &>/dev/null check_fail $? "Unexpected success while trying to do link show with deleted short alternative name" diff --git a/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S b/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S index bd1ca25febe4c..aed632d29fff5 100644 --- a/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S +++ b/tools/testing/selftests/powerpc/tm/tm-syscall-asm.S @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#include +#include #include .text @@ -26,3 +26,38 @@ FUNC_START(getppid_tm_suspended) 1: li r3, -1 blr + + +.macro scv level + .long (0x44000001 | (\level) << 5) +.endm + +FUNC_START(getppid_scv_tm_active) + PUSH_BASIC_STACK(0) + tbegin. + beq 1f + li r0, __NR_getppid + scv 0 + tend. + POP_BASIC_STACK(0) + blr +1: + li r3, -1 + POP_BASIC_STACK(0) + blr + +FUNC_START(getppid_scv_tm_suspended) + PUSH_BASIC_STACK(0) + tbegin. + beq 1f + li r0, __NR_getppid + tsuspend. + scv 0 + tresume. + tend. + POP_BASIC_STACK(0) + blr +1: + li r3, -1 + POP_BASIC_STACK(0) + blr diff --git a/tools/testing/selftests/powerpc/tm/tm-syscall.c b/tools/testing/selftests/powerpc/tm/tm-syscall.c index 467a6b3134b2a..b763354c2eb47 100644 --- a/tools/testing/selftests/powerpc/tm/tm-syscall.c +++ b/tools/testing/selftests/powerpc/tm/tm-syscall.c @@ -19,23 +19,36 @@ #include "utils.h" #include "tm.h" +#ifndef PPC_FEATURE2_SCV +#define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */ +#endif + extern int getppid_tm_active(void); extern int getppid_tm_suspended(void); +extern int getppid_scv_tm_active(void); +extern int getppid_scv_tm_suspended(void); unsigned retries = 0; #define TEST_DURATION 10 /* seconds */ -pid_t getppid_tm(bool suspend) +pid_t getppid_tm(bool scv, bool suspend) { int i; pid_t pid; for (i = 0; i < TM_RETRIES; i++) { - if (suspend) - pid = getppid_tm_suspended(); - else - pid = getppid_tm_active(); + if (suspend) { + if (scv) + pid = getppid_scv_tm_suspended(); + else + pid = getppid_tm_suspended(); + } else { + if (scv) + pid = getppid_scv_tm_active(); + else + pid = getppid_tm_active(); + } if (pid >= 0) return pid; @@ -82,15 +95,24 @@ int tm_syscall(void) * Test a syscall within a suspended transaction and verify * that it succeeds. */ - FAIL_IF(getppid_tm(true) == -1); /* Should succeed. */ + FAIL_IF(getppid_tm(false, true) == -1); /* Should succeed. */ /* * Test a syscall within an active transaction and verify that * it fails with the correct failure code. */ - FAIL_IF(getppid_tm(false) != -1); /* Should fail... */ + FAIL_IF(getppid_tm(false, false) != -1); /* Should fail... */ FAIL_IF(!failure_is_persistent()); /* ...persistently... */ FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */ + + /* Now do it all again with scv if it is available. */ + if (have_hwcap2(PPC_FEATURE2_SCV)) { + FAIL_IF(getppid_tm(true, true) == -1); /* Should succeed. */ + FAIL_IF(getppid_tm(true, false) != -1); /* Should fail... */ + FAIL_IF(!failure_is_persistent()); /* ...persistently... */ + FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */ + } + gettimeofday(&now, 0); }