diff options
390 files changed, 3580 insertions, 2033 deletions
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt index fd786ea13a1f..e182be5e3c83 100644 --- a/Documentation/acpi/enumeration.txt +++ b/Documentation/acpi/enumeration.txt @@ -60,12 +60,6 @@ If the driver needs to perform more complex initialization like getting and configuring GPIOs it can get its ACPI handle and extract this information from ACPI tables. -Currently the kernel is not able to automatically determine from which ACPI -device it should make the corresponding platform device so we need to add -the ACPI device explicitly to acpi_platform_device_ids list defined in -drivers/acpi/acpi_platform.c. This limitation is only for the platform -devices, SPI and I2C devices are created automatically as described below. - DMA support ~~~~~~~~~~~ DMA controllers enumerated via ACPI should be registered in the system to diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt index f055515d2b62..366690cb86a3 100644 --- a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt +++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt @@ -8,10 +8,12 @@ Both required and optional properties listed below must be defined under node /cpus/cpu@0. Required properties: -- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt - for details +- None Optional properties: +- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt for + details. OPPs *must* be supplied either via DT, i.e. this property, or + populated at runtime. - clock-latency: Specify the possible maximum transition latency for clock, in unit of nanoseconds. - voltage-tolerance: Specify the CPU voltage tolerance in percentage. diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt index f1ea2c69648d..c587a966413e 100644 --- a/Documentation/input/event-codes.txt +++ b/Documentation/input/event-codes.txt @@ -281,6 +281,19 @@ gestures can normally be extracted from it. If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT device. +INPUT_PROP_TOPBUTTONPAD: +----------------------- +Some laptops, most notably the Lenovo *40 series provide a trackstick +device but do not have physical buttons associated with the trackstick +device. Instead, the top area of the touchpad is marked to show +visual/haptic areas for left, middle, right buttons intended to be used +with the trackstick. + +If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons +accordingly. This property does not affect kernel behavior. +The kernel does not provide button emulation for such devices but treats +them as any other INPUT_PROP_BUTTONPAD device. + Guidelines: ========== The guidelines below ensure proper single-touch and multi-finger functionality. diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index c1b9aa8c5a52..b7fa2f599459 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2790,6 +2790,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. leaf rcu_node structure. Useful for very large systems. + rcutree.jiffies_till_sched_qs= [KNL] + Set required age in jiffies for a + given grace period before RCU starts + soliciting quiescent-state help from + rcu_note_context_switch(). + rcutree.jiffies_till_first_fqs= [KNL] Set delay from grace-period initialization to first attempt to force quiescent states. @@ -3526,7 +3532,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. the allocated input device; If set to 0, video driver will only send out the event without touching backlight brightness level. - default: 0 + default: 1 virtio_mmio.device= [VMMIO] Memory mapped virtio (platform) device. diff --git a/MAINTAINERS b/MAINTAINERS index e31c87474739..86efa7e213c2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -156,7 +156,6 @@ F: drivers/net/hamradio/6pack.c 8169 10/100/1000 GIGABIT ETHERNET DRIVER M: Realtek linux nic maintainers <nic_swsd@realtek.com> -M: Francois Romieu <romieu@fr.zoreil.com> L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/realtek/r8169.c @@ -4511,8 +4510,7 @@ S: Supported F: drivers/idle/i7300_idle.c IEEE 802.15.4 SUBSYSTEM -M: Alexander Smirnov <alex.bluesman.smirnov@gmail.com> -M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> +M: Alexander Aring <alex.aring@gmail.com> L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) W: http://apps.sourceforge.net/trac/linux-zigbee T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git @@ -6958,6 +6956,12 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/pinctrl/pinctrl-at91.c +PIN CONTROLLER - RENESAS +M: Laurent Pinchart <laurent.pinchart@ideasonboard.com> +L: linux-sh@vger.kernel.org +S: Maintained +F: drivers/pinctrl/sh-pfc/ + PIN CONTROLLER - SAMSUNG M: Tomasz Figa <t.figa@samsung.com> M: Thomas Abraham <thomas.abraham@linaro.org> @@ -8021,6 +8025,16 @@ F: drivers/ata/ F: include/linux/ata.h F: include/linux/libata.h +SERIAL ATA AHCI PLATFORM devices support +M: Hans de Goede <hdegoede@redhat.com> +M: Tejun Heo <tj@kernel.org> +L: linux-ide@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git +S: Supported +F: drivers/ata/ahci_platform.c +F: drivers/ata/libahci_platform.c +F: include/linux/ahci_platform.h + SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com> L: linux-scsi@vger.kernel.org @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 16 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc7 NAME = Shuffling Zombie Juror # *DOCUMENTATION* @@ -688,6 +688,8 @@ KBUILD_CFLAGS += -fomit-frame-pointer endif endif +KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments) + ifdef CONFIG_DEBUG_INFO KBUILD_CFLAGS += -g KBUILD_AFLAGS += -Wa,-gdwarf-2 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 245058b3b0ef..88acf8bc1490 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -6,6 +6,7 @@ config ARM select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_WANT_IPC_PARSE_VERSION diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi index 287795985e32..b84bac5bada4 100644 --- a/arch/arm/boot/dts/at91sam9n12.dtsi +++ b/arch/arm/boot/dts/at91sam9n12.dtsi @@ -925,7 +925,7 @@ compatible = "atmel,at91rm9200-ohci", "usb-ohci"; reg = <0x00500000 0x00100000>; interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, + clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; status = "disabled"; diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index 2ebc42140ea6..2c0d6ea3ab41 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi @@ -1124,6 +1124,7 @@ compatible = "atmel,at91sam9rl-pwm"; reg = <0xf8034000 0x300>; interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>; + clocks = <&pwm_clk>; #pwm-cells = <3>; status = "disabled"; }; @@ -1155,8 +1156,7 @@ compatible = "atmel,at91rm9200-ohci", "usb-ohci"; reg = <0x00600000 0x100000>; interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; - clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, - <&uhpck>; + clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; status = "disabled"; }; diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 9d853189028b..e35d880f9773 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -275,7 +275,7 @@ void store_cpu_topology(unsigned int cpuid) cpu_topology[cpuid].socket_id, mpidr); } -static inline const int cpu_corepower_flags(void) +static inline int cpu_corepower_flags(void) { return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; } diff --git a/arch/arm/mach-exynos/hotplug.c b/arch/arm/mach-exynos/hotplug.c index 8a134d019cb3..920a4baa53cd 100644 --- a/arch/arm/mach-exynos/hotplug.c +++ b/arch/arm/mach-exynos/hotplug.c @@ -40,15 +40,17 @@ static inline void cpu_leave_lowpower(void) static inline void platform_do_lowpower(unsigned int cpu, int *spurious) { + u32 mpidr = cpu_logical_map(cpu); + u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); + for (;;) { - /* make cpu1 to be turned off at next WFI command */ - if (cpu == 1) - exynos_cpu_power_down(cpu); + /* Turn the CPU off on next WFI instruction. */ + exynos_cpu_power_down(core_id); wfi(); - if (pen_release == cpu_logical_map(cpu)) { + if (pen_release == core_id) { /* * OK, proper wakeup, we're done */ diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index 1c8d31e39520..50b9aad5e27b 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c @@ -90,7 +90,8 @@ static void exynos_secondary_init(unsigned int cpu) static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; - unsigned long phys_cpu = cpu_logical_map(cpu); + u32 mpidr = cpu_logical_map(cpu); + u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); int ret = -ENOSYS; /* @@ -104,17 +105,18 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. * - * Note that "pen_release" is the hardware CPU ID, whereas + * Note that "pen_release" is the hardware CPU core ID, whereas * "cpu" is Linux's internal ID. */ - write_pen_release(phys_cpu); + write_pen_release(core_id); - if (!exynos_cpu_power_state(cpu)) { - exynos_cpu_power_up(cpu); + if (!exynos_cpu_power_state(core_id)) { + exynos_cpu_power_up(core_id); timeout = 10; /* wait max 10 ms until cpu1 is on */ - while (exynos_cpu_power_state(cpu) != S5P_CORE_LOCAL_PWR_EN) { + while (exynos_cpu_power_state(core_id) + != S5P_CORE_LOCAL_PWR_EN) { if (timeout-- == 0) break; @@ -145,20 +147,20 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) * Try to set boot address using firmware first * and fall back to boot register if it fails. */ - ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr); + ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr); if (ret && ret != -ENOSYS) goto fail; if (ret == -ENOSYS) { - void __iomem *boot_reg = cpu_boot_reg(phys_cpu); + void __iomem *boot_reg = cpu_boot_reg(core_id); if (IS_ERR(boot_reg)) { ret = PTR_ERR(boot_reg); goto fail; } - __raw_writel(boot_addr, cpu_boot_reg(phys_cpu)); + __raw_writel(boot_addr, cpu_boot_reg(core_id)); } - call_firmware_op(cpu_boot, phys_cpu); + call_firmware_op(cpu_boot, core_id); arch_send_wakeup_ipi_mask(cpumask_of(cpu)); @@ -227,22 +229,24 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus) * boot register if it fails. */ for (i = 1; i < max_cpus; ++i) { - unsigned long phys_cpu; unsigned long boot_addr; + u32 mpidr; + u32 core_id; int ret; - phys_cpu = cpu_logical_map(i); + mpidr = cpu_logical_map(i); + core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); boot_addr = virt_to_phys(exynos4_secondary_startup); - ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr); + ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr); if (ret && ret != -ENOSYS) break; if (ret == -ENOSYS) { - void __iomem *boot_reg = cpu_boot_reg(phys_cpu); + void __iomem *boot_reg = cpu_boot_reg(core_id); if (IS_ERR(boot_reg)) break; - __raw_writel(boot_addr, cpu_boot_reg(phys_cpu)); + __raw_writel(boot_addr, cpu_boot_reg(core_id)); } } } diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index 8e795dea02ec..8556c787e59c 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -70,7 +70,7 @@ static const char *cko_sels[] = { "cko1", "cko2", }; static const char *lvds_sels[] = { "dummy", "dummy", "dummy", "dummy", "dummy", "dummy", "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref", - "pcie_ref", "sata_ref", + "pcie_ref_125m", "sata_ref_100m", }; enum mx6q_clks { @@ -491,7 +491,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) /* All existing boards with PCIe use LVDS1 */ if (IS_ENABLED(CONFIG_PCI_IMX6)) - clk_set_parent(clk[lvds1_sel], clk[sata_ref]); + clk_set_parent(clk[lvds1_sel], clk[sata_ref_100m]); /* Set initial power mode */ imx6q_set_lpm(WAIT_CLOCKED); diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c index 477202fd39cc..2bdc3233abe2 100644 --- a/arch/arm/mach-mvebu/coherency.c +++ b/arch/arm/mach-mvebu/coherency.c @@ -292,6 +292,10 @@ static struct notifier_block mvebu_hwcc_nb = { .notifier_call = mvebu_hwcc_notifier, }; +static struct notifier_block mvebu_hwcc_pci_nb = { + .notifier_call = mvebu_hwcc_notifier, +}; + static void __init armada_370_coherency_init(struct device_node *np) { struct resource res; @@ -427,7 +431,7 @@ static int __init coherency_pci_init(void) { if (coherency_available()) bus_register_notifier(&pci_bus_type, - &mvebu_hwcc_nb); + &mvebu_hwcc_pci_nb); return 0; } diff --git a/arch/arm/mach-mvebu/headsmp-a9.S b/arch/arm/mach-mvebu/headsmp-a9.S index 5925366bc03c..da5bb292b91c 100644 --- a/arch/arm/mach-mvebu/headsmp-a9.S +++ b/arch/arm/mach-mvebu/headsmp-a9.S @@ -15,6 +15,8 @@ #include <linux/linkage.h> #include <linux/init.h> +#include <asm/assembler.h> + __CPUINIT #define CPU_RESUME_ADDR_REG 0xf10182d4 @@ -22,13 +24,18 @@ .global armada_375_smp_cpu1_enable_code_end armada_375_smp_cpu1_enable_code_start: - ldr r0, [pc, #4] +ARM_BE8(setend be) + adr r0, 1f + ldr r0, [r0] ldr r1, [r0] +ARM_BE8(rev r1, r1) mov pc, r1 +1: .word CPU_RESUME_ADDR_REG armada_375_smp_cpu1_enable_code_end: ENTRY(mvebu_cortex_a9_secondary_startup) +ARM_BE8(setend be) bl v7_invalidate_l1 b secondary_startup ENDPROC(mvebu_cortex_a9_secondary_startup) diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c index a1d407c0febe..25aa8237d668 100644 --- a/arch/arm/mach-mvebu/pmsu.c +++ b/arch/arm/mach-mvebu/pmsu.c @@ -201,12 +201,12 @@ static noinline int do_armada_370_xp_cpu_suspend(unsigned long deepidle) /* Test the CR_C bit and set it if it was cleared */ asm volatile( - "mrc p15, 0, %0, c1, c0, 0 \n\t" - "tst %0, #(1 << 2) \n\t" - "orreq %0, %0, #(1 << 2) \n\t" - "mcreq p15, 0, %0, c1, c0, 0 \n\t" + "mrc p15, 0, r0, c1, c0, 0 \n\t" + "tst r0, #(1 << 2) \n\t" + "orreq r0, r0, #(1 << 2) \n\t" + "mcreq p15, 0, r0, c1, c0, 0 \n\t" "isb " - : : "r" (0)); + : : : "r0"); pr_warn("Failed to suspend the system\n"); diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a474de346be6..839f48c26ef0 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -4,6 +4,7 @@ config ARM64 select ARCH_HAS_OPP select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_COMPAT_IPC_PARSE_VERSION select ARCH_WANT_FRAME_POINTERS diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c index 60e98a639ac5..e786e6cdc400 100644 --- a/arch/arm64/kernel/efi-stub.c +++ b/arch/arm64/kernel/efi-stub.c @@ -12,8 +12,6 @@ #include <linux/efi.h> #include <linux/libfdt.h> #include <asm/sections.h> -#include <generated/compile.h> -#include <generated/utsrelease.h> /* * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index f43db8a69262..e90c5426fe14 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -60,6 +60,17 @@ static int __init early_initrd(char *p) early_param("initrd", early_initrd); #endif +/* + * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It + * currently assumes that for memory starting above 4G, 32-bit devices will + * use a DMA offset. + */ +static phys_addr_t max_zone_dma_phys(void) +{ + phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32); + return min(offset + (1ULL << 32), memblock_end_of_DRAM()); +} + static void __init zone_sizes_init(unsigned long min, unsigned long max) { struct memblock_region *reg; @@ -70,9 +81,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) /* 4GB maximum for 32-bit only capable devices */ if (IS_ENABLED(CONFIG_ZONE_DMA)) { - unsigned long max_dma_phys = - (unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1); - max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT)); + max_dma = PFN_DOWN(max_zone_dma_phys()); zone_size[ZONE_DMA] = max_dma - min; } zone_size[ZONE_NORMAL] = max - max_dma; @@ -146,7 +155,7 @@ void __init arm64_memblock_init(void) /* 4GB maximum for 32-bit only capable devices */ if (IS_ENABLED(CONFIG_ZONE_DMA)) - dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1; + dma_phys_limit = max_zone_dma_phys(); dma_contiguous_reserve(dma_phys_limit); memblock_allow_resize(); diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig index a7e9bfd84183..fcec5ce71392 100644 --- a/arch/blackfin/configs/BF609-EZKIT_defconfig +++ b/arch/blackfin/configs/BF609-EZKIT_defconfig @@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y CONFIG_I2C_BLACKFIN_TWI=y CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 CONFIG_SPI=y -CONFIG_SPI_BFIN_V3=y +CONFIG_SPI_ADI_V3=y CONFIG_GPIOLIB=y CONFIG_GPIO_SYSFS=y # CONFIG_HWMON is not set diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index ba35864b2b74..c9eec84aa258 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -145,7 +145,7 @@ SECTIONS .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) #else - .init.data : AT(__data_lma + __data_len) + .init.data : AT(__data_lma + __data_len + 32) { __sinitdata = .; INIT_DATA diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c index 63b0e4fe760c..0ccf0cf4daaf 100644 --- a/arch/blackfin/mach-bf533/boards/blackstamp.c +++ b/arch/blackfin/mach-bf533/boards/blackstamp.c @@ -20,6 +20,7 @@ #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/irq.h> +#include <linux/gpio.h> #include <linux/i2c.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c index c65c6dbda3da..1e7290ef3525 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c @@ -21,6 +21,7 @@ #endif #include <linux/ata_platform.h> #include <linux/irq.h> +#include <linux/gpio.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/portmux.h> diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c index af58454b4bff..c7495dc74690 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c @@ -21,6 +21,7 @@ #endif #include <linux/ata_platform.h> #include <linux/irq.h> +#include <linux/gpio.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/portmux.h> diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c index a0211225748d..6b988ad653d8 100644 --- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c +++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c @@ -21,6 +21,7 @@ #endif #include <linux/ata_platform.h> #include <linux/irq.h> +#include <linux/gpio.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/portmux.h> diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index 90138e6112c1..1fe7ff286619 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c @@ -2118,7 +2118,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = { PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"), - PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", NULL, "ppi0_24b"), + PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"), PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"), @@ -2140,7 +2140,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = { PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"), #endif PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"), - PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", NULL, "keys_4x4"), + PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", "keys_4x4grp", "keys"), + PIN_MAP_MUX_GROUP("bf54x-keys", "4bit", "pinctrl-adi2.0", "keys_4x4grp", "keys"), + PIN_MAP_MUX_GROUP("bf54x-keys", "8bit", "pinctrl-adi2.0", "keys_8x8grp", "keys"), }; static int __init ezkit_init(void) diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c index 430b16d5ccb1..6ab951534d79 100644 --- a/arch/blackfin/mach-bf561/boards/acvilon.c +++ b/arch/blackfin/mach-bf561/boards/acvilon.c @@ -44,6 +44,7 @@ #include <linux/spi/flash.h> #include <linux/irq.h> #include <linux/interrupt.h> +#include <linux/gpio.h> #include <linux/jiffies.h> #include <linux/i2c-pca-platform.h> #include <linux/delay.h> diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c index 9f777df4cacc..e862f7823e68 100644 --- a/arch/blackfin/mach-bf561/boards/cm_bf561.c +++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c @@ -18,6 +18,7 @@ #endif #include <linux/ata_platform.h> #include <linux/irq.h> +#include <linux/gpio.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/portmux.h> diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c index 88dee43e7abe..2de71e8c104b 100644 --- a/arch/blackfin/mach-bf561/boards/ezkit.c +++ b/arch/blackfin/mach-bf561/boards/ezkit.c @@ -14,6 +14,7 @@ #include <linux/spi/spi.h> #include <linux/irq.h> #include <linux/interrupt.h> +#include <linux/gpio.h> #include <linux/delay.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c index 1ba4600de69f..e2c0b024ce88 100644 --- a/arch/blackfin/mach-bf609/boards/ezkit.c +++ b/arch/blackfin/mach-bf609/boards/ezkit.c @@ -698,8 +698,6 @@ int bf609_nor_flash_init(struct platform_device *pdev) { #define CONFIG_SMC_GCTL_VAL 0x00000010 - if (!devm_pinctrl_get_select_default(&pdev->dev)) - return -EBUSY; bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL); bfin_write32(SMC_B0CTL, 0x01002011); bfin_write32(SMC_B0TIM, 0x08170977); @@ -709,7 +707,6 @@ int bf609_nor_flash_init(struct platform_device *pdev) void bf609_nor_flash_exit(struct platform_device *pdev) { - devm_pinctrl_put(pdev->dev.pins->p); bfin_write32(SMC_GCTL, 0); } @@ -2058,15 +2055,14 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = { PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"), - PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", NULL, "ppi2_16b"), - PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), -#if IS_ENABLED(CONFIG_VIDEO_MT9M114) - PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_8b"), -#elif IS_ENABLED(CONFIG_VIDEO_VS6624) - PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), -#else - PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_24b"), -#endif + PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"), + PIN_MAP_MUX_GROUP("bfin_display.0", "8bit", "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"), + PIN_MAP_MUX_GROUP("bfin_display.0", "16bit", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"), + PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit", "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"), + PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"), + PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"), PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"), diff --git a/arch/blackfin/mach-bf609/include/mach/pm.h b/arch/blackfin/mach-bf609/include/mach/pm.h index 3ca0fb965636..a1efd936dd30 100644 --- a/arch/blackfin/mach-bf609/include/mach/pm.h +++ b/arch/blackfin/mach-bf609/include/mach/pm.h @@ -10,6 +10,7 @@ #define __MACH_BF609_PM_H__ #include <linux/suspend.h> +#include <linux/platform_device.h> extern int bfin609_pm_enter(suspend_state_t state); extern int bf609_pm_prepare(void); @@ -19,6 +20,6 @@ void bf609_hibernate(void); void bfin_sec_raise_irq(unsigned int sid); void coreb_enable(void); -int bf609_nor_flash_init(void); -void bf609_nor_flash_exit(void); +int bf609_nor_flash_init(struct platform_device *pdev); +void bf609_nor_flash_exit(struct platform_device *pdev); #endif diff --git a/arch/blackfin/mach-bf609/pm.c b/arch/blackfin/mach-bf609/pm.c index 0cdd6955c7be..b1bfcf434d16 100644 --- a/arch/blackfin/mach-bf609/pm.c +++ b/arch/blackfin/mach-bf609/pm.c @@ -291,13 +291,13 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = { #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static int smc_pm_syscore_suspend(void) { - bf609_nor_flash_exit(); + bf609_nor_flash_exit(NULL); return 0; } static void smc_pm_syscore_resume(void) { - bf609_nor_flash_init(); + bf609_nor_flash_init(NULL); } static struct syscore_ops smc_pm_syscore_ops = { diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index 867b7cef204c..1f94784eab6d 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -1208,8 +1208,6 @@ int __init init_arch_irq(void) bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority); - bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority); - /* Enable interrupts IVG7-15 */ bfin_irq_flags |= IMASK_IVG15 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h index a2fa297196bc..f5645d6a89f2 100644 --- a/arch/parisc/include/uapi/asm/signal.h +++ b/arch/parisc/include/uapi/asm/signal.h @@ -69,8 +69,6 @@ #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_RESTORER 0x04000000 /* obsolete -- ignored */ - #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index ae085ad0fba0..0bef864264c0 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -728,7 +728,6 @@ static void __init pagetable_init(void) #endif empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); - memset(empty_zero_page, 0, PAGE_SIZE); } static void __init gateway_init(void) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index fefe7c8bf05f..80b94b0add1f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -145,6 +145,7 @@ config PPC select HAVE_IRQ_EXIT_ON_IRQ_STACK select ARCH_USE_CMPXCHG_LOCKREF if PPC64 select HAVE_ARCH_AUDITSYSCALL + select ARCH_SUPPORTS_ATOMIC_RMW config GENERIC_CSUM def_bool CPU_LITTLE_ENDIAN diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index bc2347774f0a..0fdd7eece6d9 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -447,6 +447,7 @@ extern const char *powerpc_base_platform; CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP) #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) +#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL) #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index fddb72b48ce9..d645428a65a4 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, return rb; } -static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) +static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l, + bool is_base_size) { + int size, a_psize; /* Look at the 8 bit LP value */ unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); @@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) continue; a_psize = __hpte_actual_psize(lp, size); - if (a_psize != -1) + if (a_psize != -1) { + if (is_base_size) + return 1ul << mmu_psize_defs[size].shift; return 1ul << mmu_psize_defs[a_psize].shift; + } } } return 0; } +static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) +{ + return __hpte_page_size(h, l, 0); +} + +static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l) +{ + return __hpte_page_size(h, l, 1); +} + static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) { return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 807014dde821..c2b4dcf23d03 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -22,6 +22,7 @@ */ #include <asm/pgtable-ppc64.h> #include <asm/bug.h> +#include <asm/processor.h> /* * Segment table @@ -496,7 +497,7 @@ extern void slb_set_size(u16 size); */ struct subpage_prot_table { unsigned long maxaddr; /* only addresses < this are protected */ - unsigned int **protptrs[2]; + unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)]; unsigned int *low_prot[4]; }; diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 9ea266eae33e..7e4612528546 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -277,6 +277,8 @@ n: .globl n; \ n: +#define _GLOBAL_TOC(name) _GLOBAL(name) + #define _KPROBE(n) \ .section ".kprobes.text","a"; \ .globl n; \ diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 965291b4c2fa..0c157642c2a1 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -527,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check_early = __machine_check_early_realmode_p8, .platform = "power8", }, + { /* Power8 DD1: Does not support doorbell IPIs */ + .pvr_mask = 0xffffff00, + .pvr_value = 0x004d0100, + .cpu_name = "POWER8 (raw)", + .cpu_features = CPU_FTRS_POWER8_DD1, + .cpu_user_features = COMMON_USER_POWER8, + .cpu_user_features2 = COMMON_USER2_POWER8, + .mmu_features = MMU_FTRS_POWER8, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .oprofile_cpu_type = "ppc64/power8", + .oprofile_type = PPC_OPROFILE_INVALID, + .cpu_setup = __setup_cpu_power8, + .cpu_restore = __restore_cpu_power8, + .flush_tlb = __flush_tlb_power8, + .machine_check_early = __machine_check_early_realmode_p8, + .platform = "power8", + }, { /* Power8 */ .pvr_mask = 0xffff0000, .pvr_value = 0x004d0000, diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 51a3ff78838a..1007fb802e6b 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -747,7 +747,7 @@ int setup_profiling_timer(unsigned int multiplier) #ifdef CONFIG_SCHED_SMT /* cpumask of CPUs with asymetric SMT dependancy */ -static const int powerpc_smt_flags(void) +static int powerpc_smt_flags(void) { int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 80561074078d..68468d695f12 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, goto out; } if (!rma_setup && is_vrma_hpte(v)) { - unsigned long psize = hpte_page_size(v, r); + unsigned long psize = hpte_base_page_size(v, r); unsigned long senc = slb_pgsize_encoding(psize); unsigned long lpcr; diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 6e6224318c36..5a24d3c2b6b8 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, r = hpte[i+1]; /* - * Check the HPTE again, including large page size - * Since we don't currently allow any MPSS (mixed - * page-size segment) page sizes, it is sufficient - * to check against the actual page size. + * Check the HPTE again, including base page size */ if ((v & valid) && (v & mask) == val && - hpte_page_size(v, r) == (1ul << pshift)) + hpte_base_page_size(v, r) == (1ul << pshift)) /* Return with the HPTE still locked */ return (hash << 3) + (i >> 1); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 868347ef09fd..558a67df8126 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -48,7 +48,7 @@ * * LR = return address to continue at after eventually re-enabling MMU */ -_GLOBAL(kvmppc_hv_entry_trampoline) +_GLOBAL_TOC(kvmppc_hv_entry_trampoline) mflr r0 std r0, PPC_LR_STKOFF(r1) stdu r1, -112(r1) diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index e2c29e381dc7..d044b8b7c69d 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -25,7 +25,11 @@ #include <asm/exception-64s.h> #if defined(CONFIG_PPC_BOOK3S_64) +#if defined(_CALL_ELF) && _CALL_ELF == 2 +#define FUNC(name) name +#else #define FUNC(name) GLUE(.,name) +#endif #define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU #elif defined(CONFIG_PPC_BOOK3S_32) diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 9eec675220e6..16c4d88ba27d 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -36,7 +36,11 @@ #if defined(CONFIG_PPC_BOOK3S_64) +#if defined(_CALL_ELF) && _CALL_ELF == 2 +#define FUNC(name) name +#else #define FUNC(name) GLUE(.,name) +#endif #elif defined(CONFIG_PPC_BOOK3S_32) @@ -146,7 +150,7 @@ kvmppc_handler_skip_ins: * On entry, r4 contains the guest shadow MSR * MSR.EE has to be 0 when calling this function */ -_GLOBAL(kvmppc_entry_trampoline) +_GLOBAL_TOC(kvmppc_entry_trampoline) mfmsr r5 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) toreal(r7) diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c index edb14ba992b3..ef27fbd5d9c5 100644 --- a/arch/powerpc/kvm/book3s_rtas.c +++ b/arch/powerpc/kvm/book3s_rtas.c @@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) u32 irq, server, priority; int rc; - if (args->nargs != 3 || args->nret != 1) { + if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) { rc = -3; goto out; } - irq = args->args[0]; - server = args->args[1]; - priority = args->args[2]; + irq = be32_to_cpu(args->args[0]); + server = be32_to_cpu(args->args[1]); + priority = be32_to_cpu(args->args[2]); rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); if (rc) rc = -3; out: - args->rets[0] = rc; + args->rets[0] = cpu_to_be32(rc); } static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) @@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) u32 irq, server, priority; int rc; - if (args->nargs != 1 || args->nret != 3) { + if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) { rc = -3; goto out; } - irq = args->args[0]; + irq = be32_to_cpu(args->args[0]); server = priority = 0; rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); @@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) goto out; } - args->rets[1] = server; - args->rets[2] = priority; + args->rets[1] = cpu_to_be32(server); + args->rets[2] = cpu_to_be32(priority); out: - args->rets[0] = rc; + args->rets[0] = cpu_to_be32(rc); } static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) @@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) u32 irq; int rc; - if (args->nargs != 1 || args->nret != 1) { + if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) { rc = -3; goto out; } - irq = args->args[0]; + irq = be32_to_cpu(args->args[0]); rc = kvmppc_xics_int_off(vcpu->kvm, irq); if (rc) rc = -3; out: - args->rets[0] = rc; + args->rets[0] = cpu_to_be32(rc); } static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) @@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) u32 irq; int rc; - if (args->nargs != 1 || args->nret != 1) { + if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) { rc = -3; goto out; } - irq = args->args[0]; + irq = be32_to_cpu(args->args[0]); rc = kvmppc_xics_int_on(vcpu->kvm, irq); if (rc) rc = -3; out: - args->rets[0] = rc; + args->rets[0] = cpu_to_be32(rc); } #endif /* CONFIG_KVM_XICS */ @@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp) return rc; } -static void kvmppc_rtas_swap_endian_in(struct rtas_args *args) -{ -#ifdef __LITTLE_ENDIAN__ - int i; - - args->token = be32_to_cpu(args->token); - args->nargs = be32_to_cpu(args->nargs); - args->nret = be32_to_cpu(args->nret); - for (i = 0; i < args->nargs; i++) - args->args[i] = be32_to_cpu(args->args[i]); -#endif -} - -static void kvmppc_rtas_swap_endian_out(struct rtas_args *args) -{ -#ifdef __LITTLE_ENDIAN__ - int i; - - for (i = 0; i < args->nret; i++) - args->args[i] = cpu_to_be32(args->args[i]); - args->token = cpu_to_be32(args->token); - args->nargs = cpu_to_be32(args->nargs); - args->nret = cpu_to_be32(args->nret); -#endif -} - int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) { struct rtas_token_definition *d; @@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) if (rc) goto fail; - kvmppc_rtas_swap_endian_in(&args); - /* * args->rets is a pointer into args->args. Now that we've * copied args we need to fix it up to point into our copy, @@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) * value so we can restore it on the way out. */ orig_rets = args.rets; - args.rets = &args.args[args.nargs]; + args.rets = &args.args[be32_to_cpu(args.nargs)]; mutex_lock(&vcpu->kvm->lock); rc = -ENOENT; list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { - if (d->token == args.token) { + if (d->token == be32_to_cpu(args.token)) { d->handler->handler(vcpu, &args); rc = 0; break; @@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) if (rc == 0) { args.rets = orig_rets; - kvmppc_rtas_swap_endian_out(&args); rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args)); if (rc) goto fail; diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index dd2cc03f406f..86903d3f5a03 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, if (printk_ratelimit()) pr_err("%s: pte not present: gfn %lx, pfn %lx\n", __func__, (long)gfn, pfn); - return -EINVAL; + ret = -EINVAL; + goto out; } kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S index 0738f96befbf..43435c6892fb 100644 --- a/arch/powerpc/lib/mem_64.S +++ b/arch/powerpc/lib/mem_64.S @@ -77,7 +77,7 @@ _GLOBAL(memset) stb r4,0(r6) blr -_GLOBAL(memmove) +_GLOBAL_TOC(memmove) cmplw 0,r3,r4 bgt backwards_memcpy b memcpy diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 412dd46dd0b7..5c09f365c842 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1198,7 +1198,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) sh = regs->gpr[rb] & 0x3f; ival = (signed int) regs->gpr[rd]; regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); - if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0)) + if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0)) regs->xer |= XER_CA; else regs->xer &= ~XER_CA; @@ -1208,7 +1208,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) sh = rb; ival = (signed int) regs->gpr[rd]; regs->gpr[ra] = ival >> sh; - if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) + if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) regs->xer |= XER_CA; else regs->xer &= ~XER_CA; @@ -1216,7 +1216,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) #ifdef __powerpc64__ case 27: /* sld */ - sh = regs->gpr[rd] & 0x7f; + sh = regs->gpr[rb] & 0x7f; if (sh < 64) regs->gpr[ra] = regs->gpr[rd] << sh; else @@ -1235,7 +1235,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) sh = regs->gpr[rb] & 0x7f; ival = (signed long int) regs->gpr[rd]; regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); - if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0)) + if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0)) regs->xer |= XER_CA; else regs->xer &= ~XER_CA; @@ -1246,7 +1246,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) sh = rb | ((instr & 2) << 4); ival = (signed long int) regs->gpr[rd]; regs->gpr[ra] = ival >> sh; - if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) + if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) regs->xer |= XER_CA; else regs->xer &= ~XER_CA; diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 6dcdadefd8d0..82e82cadcde5 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -390,12 +390,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, case BPF_ANC | SKF_AD_VLAN_TAG: case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); + BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); + PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, vlan_tci)); - if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) - PPC_ANDI(r_A, r_A, VLAN_VID_MASK); - else + if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) { + PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT); + } else { PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); + PPC_SRWI(r_A, r_A, 12); + } break; case BPF_ANC | SKF_AD_QUEUE: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 022b38e6a80b..2d0b4d68a40a 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -86,6 +86,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa, } of_node_set_flag(dn, OF_DYNAMIC); + of_node_init(dn); return dn; } diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 0435bb65d0aa..1c0a60d98867 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -69,6 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist np->properties = proplist; of_node_set_flag(np, OF_DYNAMIC); + of_node_init(np); np->parent = derive_parent(path); if (IS_ERR(np->parent)) { diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index df38c70cd59e..18ea9e3f8142 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -51,8 +51,8 @@ static inline int restore_fp_ctl(u32 *fpc) return 0; asm volatile( - "0: lfpc %1\n" - " la %0,0\n" + " lfpc %1\n" + "0: la %0,0\n" "1:\n" EX_TABLE(0b,1b) : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL)); diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 7ba7d6784510..e88d35d74950 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -437,11 +437,11 @@ ENTRY(startup_kdump) #if defined(CONFIG_64BIT) #if defined(CONFIG_MARCH_ZEC12) - .long 3, 0xc100efea, 0xf46ce800, 0x00400000 + .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 #elif defined(CONFIG_MARCH_Z196) - .long 2, 0xc100efea, 0xf46c0000 + .long 2, 0xc100eff2, 0xf46c0000 #elif defined(CONFIG_MARCH_Z10) - .long 2, 0xc100efea, 0xf0680000 + .long 2, 0xc100eff2, 0xf0680000 #elif defined(CONFIG_MARCH_Z9_109) .long 1, 0xc100efc2 #elif defined(CONFIG_MARCH_Z990) diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 2d716734b5b1..5dc7ad9e2fbf 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -334,9 +334,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) unsigned long mask = PSW_MASK_USER; mask |= is_ri_task(child) ? PSW_MASK_RI : 0; - if ((data & ~mask) != PSW_USER_BITS) + if ((data ^ PSW_USER_BITS) & ~mask) + /* Invalid psw mask. */ + return -EINVAL; + if ((data & PSW_MASK_ASC) == PSW_ASC_HOME) + /* Invalid address-space-control bits */ return -EINVAL; if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) + /* Invalid addressing mode bits */ return -EINVAL; } *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; @@ -672,9 +677,12 @@ static int __poke_user_compat(struct task_struct *child, mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; /* Build a 64 bit psw mask from 31 bit mask. */ - if ((tmp & ~mask) != PSW32_USER_BITS) + if ((tmp ^ PSW32_USER_BITS) & ~mask) /* Invalid psw mask. */ return -EINVAL; + if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME) + /* Invalid address-space-control bits */ + return -EINVAL; regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | (regs->psw.mask & PSW_MASK_BA) | (__u64)(tmp & mask) << 32; diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 9ddc51eeb8d6..30de42730b2f 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -48,13 +48,10 @@ static LIST_HEAD(zpci_list); static DEFINE_SPINLOCK(zpci_list_lock); -static void zpci_enable_irq(struct irq_data *data); -static void zpci_disable_irq(struct irq_data *data); - static struct irq_chip zpci_irq_chip = { .name = "zPCI", - .irq_unmask = zpci_enable_irq, - .irq_mask = zpci_disable_irq, + .irq_unmask = unmask_msi_irq, + .irq_mask = mask_msi_irq, }; static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); @@ -244,43 +241,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) return rc; } -static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag) -{ - int offset, pos; - u32 mask_bits; - - if (msi->msi_attrib.is_msix) { - offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL; - msi->masked = readl(msi->mask_base + offset); - writel(flag, msi->mask_base + offset); - } else if (msi->msi_attrib.maskbit) { - pos = (long) msi->mask_base; - pci_read_config_dword(msi->dev, pos, &mask_bits); - mask_bits &= ~(mask); - mask_bits |= flag & mask; - pci_write_config_dword(msi->dev, pos, mask_bits); - } else - return 0; - - msi->msi_attrib.maskbit = !!flag; - return 1; -} - -static void zpci_enable_irq(struct irq_data *data) -{ - struct msi_desc *msi = irq_get_msi_desc(data->irq); - - zpci_msi_set_mask_bits(msi, 1, 0); -} - -static void zpci_disable_irq(struct irq_data *data) -{ - struct msi_desc *msi = irq_get_msi_desc(data->irq); - - zpci_msi_set_mask_bits(msi, 1, 1); -} - void pcibios_fixup_bus(struct pci_bus *bus) { } @@ -487,7 +447,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) /* Release MSI interrupts */ list_for_each_entry(msi, &pdev->msi_list, list) { - zpci_msi_set_mask_bits(msi, 1, 1); + if (msi->msi_attrib.is_msix) + default_msix_mask_irq(msi, 1); + else + default_msi_mask_irq(msi, 1, 1); irq_set_msi_desc(msi->irq, NULL); irq_free_desc(msi->irq); msi->msg.address_lo = 0; diff --git a/arch/sh/Makefile b/arch/sh/Makefile index d4d16e4be07c..bf5b3f5f4962 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -32,7 +32,8 @@ endif cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,) cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \ - $(call cc-option,-m2a-nofpu,) + $(call cc-option,-m2a-nofpu,) \ + $(call cc-option,-m4-nofpu,) cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,) cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \ $(call cc-option,-mno-implicit-fp,-m4-nofpu) diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 29f2e988c56a..407c87d9879a 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -78,6 +78,7 @@ config SPARC64 select HAVE_C_RECORDMCOUNT select NO_BOOTMEM select HAVE_ARCH_AUDITSYSCALL + select ARCH_SUPPORTS_ATOMIC_RMW config ARCH_DEFCONFIG string diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h index b73274fb961a..42f2bca1d338 100644 --- a/arch/sparc/include/uapi/asm/unistd.h +++ b/arch/sparc/include/uapi/asm/unistd.h @@ -410,8 +410,9 @@ #define __NR_finit_module 342 #define __NR_sched_setattr 343 #define __NR_sched_getattr 344 +#define __NR_renameat2 345 -#define NR_syscalls 345 +#define NR_syscalls 346 /* Bitmask values returned from kern_features system call. */ #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index d066eb18650c..f834224208ed 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S @@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1) SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) +SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2) .globl sys32_mmap2 sys32_mmap2: diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 151ace8766cc..85fe9b1087cd 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -86,3 +86,4 @@ sys_call_table: /*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev /*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr +/*345*/ .long sys_renameat2 diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 4bd4e2bb26cf..33ecba2826ea 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -87,6 +87,7 @@ sys_call_table32: /*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr + .word sys32_renameat2 #endif /* CONFIG_COMPAT */ @@ -165,3 +166,4 @@ sys_call_table: /*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr + .word sys_renameat2 diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 9472079471bb..f1b3eb14b855 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -12,6 +12,7 @@ #include <mem_user.h> #include <os.h> #include <skas.h> +#include <kern_util.h> struct host_vm_change { struct host_vm_op { @@ -124,6 +125,9 @@ static int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *last; int ret = 0; + if ((addr >= STUB_START) && (addr < STUB_END)) + return -EINVAL; + if (hvc->index != 0) { last = &hvc->ops[hvc->index - 1]; if ((last->type == MUNMAP) && @@ -283,8 +287,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, /* This is not an else because ret is modified above */ if (ret) { printk(KERN_ERR "fix_range_common: failed, killing current " - "process\n"); + "process: %d\n", task_tgid_vnr(current)); + /* We are under mmap_sem, release it such that current can terminate */ + up_write(¤t->mm->mmap_sem); force_sig(SIGKILL, current); + do_signal(); } } diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 974b87474a99..5678c3571e7c 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -206,7 +206,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, int is_write = FAULT_WRITE(fi); unsigned long address = FAULT_ADDRESS(fi); - if (regs) + if (!is_user && regs) current->thread.segv_regs = container_of(regs, struct pt_regs, regs); if (!is_user && (address >= start_vm) && (address < end_vm)) { diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index d531879a4617..908579f2b0ab 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -54,7 +54,7 @@ static int ptrace_dump_regs(int pid) void wait_stub_done(int pid) { - int n, status, err, bad_stop = 0; + int n, status, err; while (1) { CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); @@ -74,8 +74,6 @@ void wait_stub_done(int pid) if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0) return; - else - bad_stop = 1; bad_wait: err = ptrace_dump_regs(pid); @@ -85,10 +83,7 @@ bad_wait: printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, " "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno, status); - if (bad_stop) - kill(pid, SIGKILL); - else - fatal_sigsegv(); + fatal_sigsegv(); } extern unsigned long current_stub_stack(void); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a8f749ef0fdc..d24887b645dc 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -131,6 +131,7 @@ config X86 select HAVE_CC_STACKPROTECTOR select GENERIC_CPU_AUTOPROBE select HAVE_ARCH_AUDITSYSCALL + select ARCH_SUPPORTS_ATOMIC_RMW config INSTRUCTION_DECODER def_bool y diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 84c223479e3c..7a6d43a554d7 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -91,10 +91,9 @@ bs_die: .section ".bsdata", "a" bugger_off_msg: - .ascii "Direct floppy boot is not supported. " - .ascii "Use a boot loader program instead.\r\n" + .ascii "Use a boot loader.\r\n" .ascii "\n" - .ascii "Remove disk and press any key to reboot ...\r\n" + .ascii "Remove disk and press any key to reboot...\r\n" .byte 0 #ifdef CONFIG_EFI_STUB @@ -108,7 +107,7 @@ coff_header: #else .word 0x8664 # x86-64 #endif - .word 3 # nr_sections + .word 4 # nr_sections .long 0 # TimeDateStamp .long 0 # PointerToSymbolTable .long 1 # NumberOfSymbols @@ -250,6 +249,25 @@ section_table: .word 0 # NumberOfLineNumbers .long 0x60500020 # Characteristics (section flags) + # + # The offset & size fields are filled in by build.c. + # + .ascii ".bss" + .byte 0 + .byte 0 + .byte 0 + .byte 0 + .long 0 + .long 0x0 + .long 0 # Size of initialized data + # on disk + .long 0x0 + .long 0 # PointerToRelocations + .long 0 # PointerToLineNumbers + .word 0 # NumberOfRelocations + .word 0 # NumberOfLineNumbers + .long 0xc8000080 # Characteristics (section flags) + #endif /* CONFIG_EFI_STUB */ # Kernel attributes; used by setup. This is part 1 of the diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c index 1a2f2121cada..a7661c430cd9 100644 --- a/arch/x86/boot/tools/build.c +++ b/arch/x86/boot/tools/build.c @@ -143,7 +143,7 @@ static void usage(void) #ifdef CONFIG_EFI_STUB -static void update_pecoff_section_header(char *section_name, u32 offset, u32 size) +static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset) { unsigned int pe_header; unsigned short num_sections; @@ -164,10 +164,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz put_unaligned_le32(size, section + 0x8); /* section header vma field */ - put_unaligned_le32(offset, section + 0xc); + put_unaligned_le32(vma, section + 0xc); /* section header 'size of initialised data' field */ - put_unaligned_le32(size, section + 0x10); + put_unaligned_le32(datasz, section + 0x10); /* section header 'file offset' field */ put_unaligned_le32(offset, section + 0x14); @@ -179,6 +179,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz } } +static void update_pecoff_section_header(char *section_name, u32 offset, u32 size) +{ + update_pecoff_section_header_fields(section_name, offset, size, size, offset); +} + static void update_pecoff_setup_and_reloc(unsigned int size) { u32 setup_offset = 0x200; @@ -203,9 +208,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz) pe_header = get_unaligned_le32(&buf[0x3c]); - /* Size of image */ - put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); - /* * Size of code: Subtract the size of the first sector (512 bytes) * which includes the header. @@ -220,6 +222,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz) update_pecoff_section_header(".text", text_start, text_sz); } +static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz) +{ + unsigned int pe_header; + unsigned int bss_sz = init_sz - file_sz; + + pe_header = get_unaligned_le32(&buf[0x3c]); + + /* Size of uninitialized data */ + put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]); + + /* Size of image */ + put_unaligned_le32(init_sz, &buf[pe_header + 0x50]); + + update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0); +} + static int reserve_pecoff_reloc_section(int c) { /* Reserve 0x20 bytes for .reloc section */ @@ -259,6 +277,8 @@ static void efi_stub_entry_update(void) static inline void update_pecoff_setup_and_reloc(unsigned int size) {} static inline void update_pecoff_text(unsigned int text_start, unsigned int file_sz) {} +static inline void update_pecoff_bss(unsigned int file_sz, + unsigned int init_sz) {} static inline void efi_stub_defaults(void) {} static inline void efi_stub_entry_update(void) {} @@ -310,7 +330,7 @@ static void parse_zoffset(char *fname) int main(int argc, char ** argv) { - unsigned int i, sz, setup_sectors; + unsigned int i, sz, setup_sectors, init_sz; int c; u32 sys_size; struct stat sb; @@ -376,7 +396,9 @@ int main(int argc, char ** argv) buf[0x1f1] = setup_sectors-1; put_unaligned_le32(sys_size, &buf[0x1f4]); - update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); + update_pecoff_text(setup_sectors * 512, i + (sys_size * 16)); + init_sz = get_unaligned_le32(&buf[0x260]); + update_pecoff_bss(i + (sys_size * 16), init_sz); efi_stub_entry_update(); diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index f3a1f04ed4cb..584874451414 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -841,7 +841,6 @@ static int apm_do_idle(void) u32 eax; u8 ret = 0; int idled = 0; - int polling; int err = 0; if (!need_resched()) { diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index a80029035bf2..f9e4fdd3b877 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c) */ detect_extended_topology(c); + if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { + /* + * let's use the legacy cpuid vector 0x1 and 0x4 for topology + * detection. + */ + c->x86_max_cores = intel_num_cpu_cores(c); +#ifdef CONFIG_X86_32 + detect_ht(c); +#endif + } + l2 = init_intel_cacheinfo(c); if (c->cpuid_level > 9) { unsigned eax = cpuid_eax(10); @@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_P3); #endif - if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { - /* - * let's use the legacy cpuid vector 0x1 and 0x4 for topology - * detection. - */ - c->x86_max_cores = intel_num_cpu_cores(c); -#ifdef CONFIG_X86_32 - detect_ht(c); -#endif - } - /* Work around errata */ srat_detect_node(c); diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index a952e9c85b6f..9c8f7394c612 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c) #endif } +#ifdef CONFIG_X86_HT + /* + * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in + * turns means that the only possibility is SMT (as indicated in + * cpuid1). Since cpuid2 doesn't specify shared caches, and we know + * that SMT shares all caches, we can unconditionally set cpu_llc_id to + * c->phys_proc_id. + */ + if (per_cpu(cpu_llc_id, cpu) == BAD_APICID) + per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; +#endif + c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); return l2; diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index bb92f38153b2..9a79c8dbd8e8 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void) for_each_online_cpu(i) { err = mce_device_create(i); if (err) { + /* + * Register notifier anyway (and do not unreg it) so + * that we don't leave undeleted timers, see notifier + * callback above. + */ + __register_hotcpu_notifier(&mce_cpu_notifier); cpu_notifier_register_done(); goto err_device_create; } @@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void) err_register: unregister_syscore_ops(&mce_syscore_ops); - cpu_notifier_register_begin(); - __unregister_hotcpu_notifier(&mce_cpu_notifier); - cpu_notifier_register_done(); - err_device_create: /* * We didn't keep track of which devices were created above, but diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 2bdfbff8a4f6..2879ecdaac43 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event) continue; if (event->attr.config1 & ~er->valid_mask) return -EINVAL; + /* Check if the extra msrs can be safely accessed*/ + if (!er->extra_msr_access) + return -ENXIO; reg->idx = er->idx; reg->config = event->attr.config1; diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 3b2f9bdd974b..8ade93111e03 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -295,14 +295,16 @@ struct extra_reg { u64 config_mask; u64 valid_mask; int idx; /* per_xxx->regs[] reg index */ + bool extra_msr_access; }; #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ - .event = (e), \ - .msr = (ms), \ - .config_mask = (m), \ - .valid_mask = (vm), \ - .idx = EXTRA_REG_##i, \ + .event = (e), \ + .msr = (ms), \ + .config_mask = (m), \ + .valid_mask = (vm), \ + .idx = EXTRA_REG_##i, \ + .extra_msr_access = true, \ } #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index adb02aa62af5..2502d0d9d246 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1382,6 +1382,15 @@ again: intel_pmu_lbr_read(); /* + * CondChgd bit 63 doesn't mean any overflow status. Ignore + * and clear the bit. + */ + if (__test_and_clear_bit(63, (unsigned long *)&status)) { + if (!status) + goto done; + } + + /* * PEBS overflow sets bit 62 in the global status register */ if (__test_and_clear_bit(62, (unsigned long *)&status)) { @@ -2173,6 +2182,41 @@ static void intel_snb_check_microcode(void) } } +/* + * Under certain circumstances, access certain MSR may cause #GP. + * The function tests if the input MSR can be safely accessed. + */ +static bool check_msr(unsigned long msr, u64 mask) +{ + u64 val_old, val_new, val_tmp; + + /* + * Read the current value, change it and read it back to see if it + * matches, this is needed to detect certain hardware emulators + * (qemu/kvm) that don't trap on the MSR access and always return 0s. + */ + if (rdmsrl_safe(msr, &val_old)) + return false; + + /* + * Only change the bits which can be updated by wrmsrl. + */ + val_tmp = val_old ^ mask; + if (wrmsrl_safe(msr, val_tmp) || + rdmsrl_safe(msr, &val_new)) + return false; + + if (val_new != val_tmp) + return false; + + /* Here it's sure that the MSR can be safely accessed. + * Restore the old value and return. + */ + wrmsrl(msr, val_old); + + return true; +} + static __init void intel_sandybridge_quirk(void) { x86_pmu.check_microcode = intel_snb_check_microcode; @@ -2262,7 +2306,8 @@ __init int intel_pmu_init(void) union cpuid10_ebx ebx; struct event_constraint *c; unsigned int unused; - int version; + struct extra_reg *er; + int version, i; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { switch (boot_cpu_data.x86) { @@ -2465,6 +2510,9 @@ __init int intel_pmu_init(void) case 62: /* IvyBridge EP */ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + /* dTLB-load-misses on IVB is different than SNB */ + hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */ + memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); @@ -2565,6 +2613,34 @@ __init int intel_pmu_init(void) } } + /* + * Access LBR MSR may cause #GP under certain circumstances. + * E.g. KVM doesn't support LBR MSR + * Check all LBT MSR here. + * Disable LBR access if any LBR MSRs can not be accessed. + */ + if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL)) + x86_pmu.lbr_nr = 0; + for (i = 0; i < x86_pmu.lbr_nr; i++) { + if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) && + check_msr(x86_pmu.lbr_to + i, 0xffffUL))) + x86_pmu.lbr_nr = 0; + } + + /* + * Access extra MSR may cause #GP under certain circumstances. + * E.g. KVM doesn't support offcore event + * Check all extra_regs here. + */ + if (x86_pmu.extra_regs) { + for (er = x86_pmu.extra_regs; er->msr; er++) { + er->extra_msr_access = check_msr(er->msr, 0x1ffUL); + /* Disable LBR select mapping */ + if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) + x86_pmu.lbr_sel_map = NULL; + } + } + /* Support full width counters using alternative MSR range */ if (x86_pmu.intel_cap.full_width_write) { x86_pmu.max_period = x86_pmu.cntval_mask; diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 980970cb744d..696ade311ded 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu) if (!x86_pmu.bts) return 0; - buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node); - if (unlikely(!buffer)) + buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node); + if (unlikely(!buffer)) { + WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__); return -ENOMEM; + } max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; thresh = max / 16; diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 65bbbea38b9c..ae6552a0701f 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = { SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), - SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc), - SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc), + SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa), + SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa), SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), - SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc), - SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc), + SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa), + SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa), SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), @@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = { SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, SNBEP_CBO_PMON_CTL_TID_EN, 0x1), SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), + SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), @@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = { SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), - SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), + SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index dbaa23e78b36..0d0c9d4ab6d5 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -425,8 +425,8 @@ sysenter_do_call: cmpl $(NR_syscalls), %eax jae sysenter_badsys call *sys_call_table(,%eax,4) - movl %eax,PT_EAX(%esp) sysenter_after_call: + movl %eax,PT_EAX(%esp) LOCKDEP_SYS_EXIT DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF @@ -502,6 +502,7 @@ ENTRY(system_call) jae syscall_badsys syscall_call: call *sys_call_table(,%eax,4) +syscall_after_call: movl %eax,PT_EAX(%esp) # store the return value syscall_exit: LOCKDEP_SYS_EXIT @@ -675,12 +676,12 @@ syscall_fault: END(syscall_fault) syscall_badsys: - movl $-ENOSYS,PT_EAX(%esp) - jmp syscall_exit + movl $-ENOSYS,%eax + jmp syscall_after_call END(syscall_badsys) sysenter_badsys: - movl $-ENOSYS,PT_EAX(%esp) + movl $-ENOSYS,%eax jmp sysenter_after_call END(syscall_badsys) CFI_ENDPROC diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index 6afbb16e9b79..94d857fb1033 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c @@ -175,7 +175,7 @@ void init_espfix_ap(void) if (!pud_present(pud)) { pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP); pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); - paravirt_alloc_pud(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); + paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); for (n = 0; n < ESPFIX_PUD_CLONES; n++) set_pud(&pud_p[n], pud); } @@ -185,7 +185,7 @@ void init_espfix_ap(void) if (!pmd_present(pmd)) { pte_p = (pte_t *)__get_free_page(PGALLOC_GFP); pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); - paravirt_alloc_pmd(&init_mm, __pa(pte_p) >> PAGE_SHIFT); + paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT); for (n = 0; n < ESPFIX_PMD_CLONES; n++) set_pmd(&pmd_p[n], pmd); } @@ -193,7 +193,6 @@ void init_espfix_ap(void) pte_p = pte_offset_kernel(&pmd, addr); stack_page = (void *)__get_free_page(GFP_KERNEL); pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); - paravirt_alloc_pte(&init_mm, __pa(stack_page) >> PAGE_SHIFT); for (n = 0; n < ESPFIX_PTE_CLONES; n++) set_pte(&pte_p[n*PTE_STRIDE], pte); diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 7596df664901..67e6d19ef1be 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs) struct kprobe *p; struct kprobe_ctlblk *kcb; + if (user_mode_vm(regs)) + return 0; + addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); /* * We don't want to be preempted for the entire diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 57e5ce126d5a..ea030319b321 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -920,9 +920,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); if (!(freq->flags & CPUFREQ_CONST_LOOPS)) mark_tsc_unstable("cpufreq changes"); - } - set_cyc2ns_scale(tsc_khz, freq->cpu); + set_cyc2ns_scale(tsc_khz, freq->cpu); + } return 0; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f6449334ec45..ef432f891d30 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) kvm_x86_ops->set_nmi(vcpu); } } else if (kvm_cpu_has_injectable_intr(vcpu)) { + /* + * Because interrupts can be injected asynchronously, we are + * calling check_nested_events again here to avoid a race condition. + * See https://lkml.org/lkml/2014/7/2/60 for discussion about this + * proposal and current concerns. Perhaps we should be setting + * KVM_REQ_EVENT only on certain events and not unconditionally? + */ + if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { + r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); + if (r != 0) + return r; + } if (kvm_x86_ops->interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index f9e1ec346e35..8453e6e39895 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow: beqz a2, 1f # if at start of vector, don't restore addi a0, a0, -128 - bbsi a0, 8, 1f # don't restore except for overflow 8 and 12 - bbsi a0, 7, 2f + bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12 + + /* + * This fixup handler is for the extremely unlikely case where the + * overflow handler's reference thru a0 gets a hardware TLB refill + * that bumps out the (distinct, aliasing) TLB entry that mapped its + * prior references thru a9/a13, and where our reference now thru + * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill). + */ + movi a2, window_overflow_restore_a0_fixup + s32i a2, a3, EXC_TABLE_FIXUP + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + xsr a3, excsave1 + + bbsi.l a0, 7, 2f /* * Restore a0 as saved by _WindowOverflow8(). - * - * FIXME: we really need a fixup handler for this L32E, - * for the extremely unlikely case where the overflow handler's - * reference thru a0 gets a hardware TLB refill that bumps out - * the (distinct, aliasing) TLB entry that mapped its prior - * references thru a9, and where our reference now thru a9 - * gets a 2nd-level miss exception (not hardware TLB refill). */ - l32e a2, a9, -16 - wsr a2, depc # replace the saved a0 - j 1f + l32e a0, a9, -16 + wsr a0, depc # replace the saved a0 + j 3f 2: /* * Restore a0 as saved by _WindowOverflow12(). - * - * FIXME: we really need a fixup handler for this L32E, - * for the extremely unlikely case where the overflow handler's - * reference thru a0 gets a hardware TLB refill that bumps out - * the (distinct, aliasing) TLB entry that mapped its prior - * references thru a13, and where our reference now thru a13 - * gets a 2nd-level miss exception (not hardware TLB refill). */ - l32e a2, a13, -16 - wsr a2, depc # replace the saved a0 + l32e a0, a13, -16 + wsr a0, depc # replace the saved a0 +3: + xsr a3, excsave1 + movi a0, 0 + s32i a0, a3, EXC_TABLE_FIXUP + s32i a2, a3, EXC_TABLE_DOUBLE_SAVE 1: /* * Restore WindowBase while leaving all address registers restored. @@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow: s32i a0, a2, PT_DEPC +_DoubleExceptionVector_handle_exception: addx4 a0, a0, a3 l32i a0, a0, EXC_TABLE_FAST_USER xsr a3, excsave1 @@ -464,11 +469,120 @@ _DoubleExceptionVector_WindowOverflow: rotw -3 j 1b - .end literal_prefix ENDPROC(_DoubleExceptionVector) /* + * Fixup handler for TLB miss in double exception handler for window owerflow. + * We get here with windowbase set to the window that was being spilled and + * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12 + * (bit set) window. + * + * We do the following here: + * - go to the original window retaining a0 value; + * - set up exception stack to return back to appropriate a0 restore code + * (we'll need to rotate window back and there's no place to save this + * information, use different return address for that); + * - handle the exception; + * - go to the window that was being spilled; + * - set up window_overflow_restore_a0_fixup as a fixup routine; + * - reload a0; + * - restore the original window; + * - reset the default fixup routine; + * - return to user. By the time we get to this fixup handler all information + * about the conditions of the original double exception that happened in + * the window overflow handler is lost, so we just return to userspace to + * retry overflow from start. + * + * a0: value of depc, original value in depc + * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE + * a3: exctable, original value in excsave1 + */ + +ENTRY(window_overflow_restore_a0_fixup) + + rsr a0, ps + extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH + rsr a2, windowbase + sub a0, a2, a0 + extui a0, a0, 0, 3 + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + xsr a3, excsave1 + + _beqi a0, 1, .Lhandle_1 + _beqi a0, 3, .Lhandle_3 + + .macro overflow_fixup_handle_exception_pane n + + rsr a0, depc + rotw -\n + + xsr a3, excsave1 + wsr a2, depc + l32i a2, a3, EXC_TABLE_KSTK + s32i a0, a2, PT_AREG0 + + movi a0, .Lrestore_\n + s32i a0, a2, PT_DEPC + rsr a0, exccause + j _DoubleExceptionVector_handle_exception + + .endm + + overflow_fixup_handle_exception_pane 2 +.Lhandle_1: + overflow_fixup_handle_exception_pane 1 +.Lhandle_3: + overflow_fixup_handle_exception_pane 3 + + .macro overflow_fixup_restore_a0_pane n + + rotw \n + /* Need to preserve a0 value here to be able to handle exception + * that may occur on a0 reload from stack. It may occur because + * TLB miss handler may not be atomic and pointer to page table + * may be lost before we get here. There are no free registers, + * so we need to use EXC_TABLE_DOUBLE_SAVE area. + */ + xsr a3, excsave1 + s32i a2, a3, EXC_TABLE_DOUBLE_SAVE + movi a2, window_overflow_restore_a0_fixup + s32i a2, a3, EXC_TABLE_FIXUP + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + xsr a3, excsave1 + bbsi.l a0, 7, 1f + l32e a0, a9, -16 + j 2f +1: + l32e a0, a13, -16 +2: + rotw -\n + + .endm + +.Lrestore_2: + overflow_fixup_restore_a0_pane 2 + +.Lset_default_fixup: + xsr a3, excsave1 + s32i a2, a3, EXC_TABLE_DOUBLE_SAVE + movi a2, 0 + s32i a2, a3, EXC_TABLE_FIXUP + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + xsr a3, excsave1 + rfe + +.Lrestore_1: + overflow_fixup_restore_a0_pane 1 + j .Lset_default_fixup +.Lrestore_3: + overflow_fixup_restore_a0_pane 3 + j .Lset_default_fixup + +ENDPROC(window_overflow_restore_a0_fixup) + + .end literal_prefix +/* * Debug interrupt vector * * There is not much space here, so simply jump to another handler. diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index ee32c0085dff..d16db6df86f8 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -269,13 +269,13 @@ SECTIONS .UserExceptionVector.literal) SECTION_VECTOR (_DoubleExceptionVector_literal, .DoubleExceptionVector.literal, - DOUBLEEXC_VECTOR_VADDR - 16, + DOUBLEEXC_VECTOR_VADDR - 40, SIZEOF(.UserExceptionVector.text), .UserExceptionVector.text) SECTION_VECTOR (_DoubleExceptionVector_text, .DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR, - 32, + 40, .DoubleExceptionVector.literal) . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 4224256bb215..77ed20209ca5 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -191,7 +191,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) return -EINVAL; } - if (it && start - it->start < bank_sz) { + if (it && start - it->start <= bank_sz) { if (start == it->start) { if (end - it->start < bank_sz) { it->start = end; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index b9f4cc494ece..28d227c5ca77 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -872,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q) { lockdep_assert_held(q->queue_lock); + /* + * @q could be exiting and already have destroyed all blkgs as + * indicated by NULL root_blkg. If so, don't confuse policies. + */ + if (!q->root_blkg) + return; + blk_throtl_drain(q); } diff --git a/block/blk-tag.c b/block/blk-tag.c index 3f33d8672268..a185b86741e5 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag) EXPORT_SYMBOL(blk_queue_find_tag); /** - * __blk_free_tags - release a given set of tag maintenance info + * blk_free_tags - release a given set of tag maintenance info * @bqt: the tag map to free * - * Tries to free the specified @bqt. Returns true if it was - * actually freed and false if there are still references using it + * Drop the reference count on @bqt and frees it when the last reference + * is dropped. */ -static int __blk_free_tags(struct blk_queue_tag *bqt) +void blk_free_tags(struct blk_queue_tag *bqt) { - int retval; - - retval = atomic_dec_and_test(&bqt->refcnt); - if (retval) { + if (atomic_dec_and_test(&bqt->refcnt)) { BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < bqt->max_depth); @@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt) kfree(bqt); } - - return retval; } +EXPORT_SYMBOL(blk_free_tags); /** * __blk_queue_free_tags - release tag maintenance info @@ -69,28 +65,13 @@ void __blk_queue_free_tags(struct request_queue *q) if (!bqt) return; - __blk_free_tags(bqt); + blk_free_tags(bqt); q->queue_tags = NULL; queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); } /** - * blk_free_tags - release a given set of tag maintenance info - * @bqt: the tag map to free - * - * For externally managed @bqt frees the map. Callers of this - * function must guarantee to have released all the queues that - * might have been using this tag map. - */ -void blk_free_tags(struct blk_queue_tag *bqt) -{ - if (unlikely(!__blk_free_tags(bqt))) - BUG(); -} -EXPORT_SYMBOL(blk_free_tags); - -/** * blk_queue_free_tags - release tag maintenance info * @q: the request queue for the device * diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index fbd5a67cb773..a0926a6094b2 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) case BLKROSET: case BLKDISCARD: case BLKSECDISCARD: + case BLKZEROOUT: /* * the ones below are implemented in blkdev_locked_ioctl, * but we call blkdev_ioctl, which gets the lock for us diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 071c1dfb93f3..350d52a8f781 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -68,7 +68,7 @@ MODULE_AUTHOR("Bruno Ducrot"); MODULE_DESCRIPTION("ACPI Video Driver"); MODULE_LICENSE("GPL"); -static bool brightness_switch_enabled; +static bool brightness_switch_enabled = 1; module_param(brightness_switch_enabled, bool, 0644); /* @@ -581,6 +581,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { }, { .callback = video_set_use_native_backlight, + .ident = "HP ProBook 4540s", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"), + }, + }, + { + .callback = video_set_use_native_backlight, .ident = "HP ProBook 2013 models", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index dae5607e1115..4cd52a4541a9 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -456,6 +456,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { /* Promise */ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ + { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */ /* Asmedia */ { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 18d97d5c7d90..677c0c1b03bd 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) * ata_qc_new - Request an available ATA command, for queueing * @ap: target port * + * Some ATA host controllers may implement a queue depth which is less + * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond + * the hardware limitation. + * * LOCKING: * None. */ @@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) { struct ata_queued_cmd *qc = NULL; + unsigned int max_queue = ap->host->n_tags; unsigned int i, tag; /* no command while frozen */ if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) return NULL; - for (i = 0; i < ATA_MAX_QUEUE; i++) { - tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE; + for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) { + tag = tag < max_queue ? tag : 0; /* the last tag is reserved for internal command. */ if (tag == ATA_TAG_INTERNAL) @@ -6088,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev, { spin_lock_init(&host->lock); mutex_init(&host->eh_mutex); + host->n_tags = ATA_MAX_QUEUE - 1; host->dev = dev; host->ops = ops; } @@ -6169,6 +6175,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) { int i, rc; + host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1); + /* host must have been started */ if (!(host->flags & ATA_HOST_STARTED)) { dev_err(host->dev, "BUG: trying to register unstarted host\n"); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 6760fc4e85b8..dad83df555c4 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1811,7 +1811,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, case ATA_DEV_ATA: if (err & ATA_ICRC) qc->err_mask |= AC_ERR_ATA_BUS; - if (err & ATA_UNC) + if (err & (ATA_UNC | ATA_AMNF)) qc->err_mask |= AC_ERR_MEDIA; if (err & ATA_IDNF) qc->err_mask |= AC_ERR_INVALID; @@ -2556,11 +2556,12 @@ static void ata_eh_link_report(struct ata_link *link) } if (cmd->command != ATA_CMD_PACKET && - (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | - ATA_ABORTED))) - ata_dev_err(qc->dev, "error: { %s%s%s%s}\n", + (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF | + ATA_IDNF | ATA_ABORTED))) + ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", res->feature & ATA_ICRC ? "ICRC " : "", res->feature & ATA_UNC ? "UNC " : "", + res->feature & ATA_AMNF ? "AMNF " : "", res->feature & ATA_IDNF ? "IDNF " : "", res->feature & ATA_ABORTED ? "ABRT " : ""); #endif diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c index 6ad5c072ce34..4d37c5415fc7 100644 --- a/drivers/ata/pata_ep93xx.c +++ b/drivers/ata/pata_ep93xx.c @@ -915,7 +915,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev) struct ep93xx_pata_data *drv_data; struct ata_host *host; struct ata_port *ap; - unsigned int irq; + int irq; struct resource *mem_res; void __iomem *ide_base; int err; diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 9e9227e1762d..eee48c49f5de 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -89,8 +89,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) return dev->archdata.irqs[num]; #else struct resource *r; - if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) - return of_irq_get(dev->dev.of_node, num); + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { + int ret; + + ret = of_irq_get(dev->dev.of_node, num); + if (ret >= 0 || ret == -EPROBE_DEFER) + return ret; + } r = platform_get_resource(dev, IORESOURCE_IRQ, num); @@ -133,8 +138,13 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name) { struct resource *r; - if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) - return of_irq_get_byname(dev->dev.of_node, name); + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { + int ret; + + ret = of_irq_get_byname(dev->dev.of_node, name); + if (ret >= 0 || ret == -EPROBE_DEFER) + return ret; + } r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); return r ? r->start : -ENXIO; diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 1b35c45c92b7..3f2e16738080 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection) struct task_struct *opa; kref_get(&connection->kref); + /* We may just have force_sig()'ed this thread + * to get it out of some blocking network function. + * Clear signals; otherwise kthread_run(), which internally uses + * wait_on_completion_killable(), will mistake our pending signal + * for a new fatal signal and fail. */ + flush_signals(current); opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h"); if (IS_ERR(opa)) { drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n"); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 089e72cd37be..36e54be402df 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -622,11 +622,18 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity) memset(&zram->stats, 0, sizeof(zram->stats)); zram->disksize = 0; - if (reset_capacity) { + if (reset_capacity) set_capacity(zram->disk, 0); - revalidate_disk(zram->disk); - } + up_write(&zram->init_lock); + + /* + * Revalidate disk out of the init_lock to avoid lockdep splat. + * It's okay because disk's capacity is protected by init_lock + * so that revalidate_disk always sees up-to-date capacity. + */ + if (reset_capacity) + revalidate_disk(zram->disk); } static ssize_t disksize_store(struct device *dev, @@ -666,8 +673,15 @@ static ssize_t disksize_store(struct device *dev, zram->comp = comp; zram->disksize = disksize; set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); - revalidate_disk(zram->disk); up_write(&zram->init_lock); + + /* + * Revalidate disk out of the init_lock to avoid lockdep splat. + * It's okay because disk's capacity is protected by init_lock + * so that revalidate_disk always sees up-to-date capacity. + */ + revalidate_disk(zram->disk); + return len; out_destroy_comp: diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index f98380648cb3..f50dffc0374f 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -90,7 +90,6 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x0b05, 0x17d0) }, { USB_DEVICE(0x0CF3, 0x0036) }, { USB_DEVICE(0x0CF3, 0x3004) }, - { USB_DEVICE(0x0CF3, 0x3005) }, { USB_DEVICE(0x0CF3, 0x3008) }, { USB_DEVICE(0x0CF3, 0x311D) }, { USB_DEVICE(0x0CF3, 0x311E) }, @@ -140,7 +139,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index a1c80b0c7663..6250fc2fb93a 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -162,7 +162,6 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 04680ead9275..fede8ca7147c 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c) H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) { BT_ERR("Non-link packet received in non-active state"); h5_reset_rx(h5); + return 0; } h5->rx_func = h5_rx_payload; diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 334601cc81cf..c4419ea1ab07 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -55,16 +55,41 @@ static DEFINE_MUTEX(rng_mutex); static int data_avail; static u8 *rng_buffer; +static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, + int wait); + static size_t rng_buffer_size(void) { return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; } +static void add_early_randomness(struct hwrng *rng) +{ + unsigned char bytes[16]; + int bytes_read; + + /* + * Currently only virtio-rng cannot return data during device + * probe, and that's handled in virtio-rng.c itself. If there + * are more such devices, this call to rng_get_data can be + * made conditional here instead of doing it per-device. + */ + bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); + if (bytes_read > 0) + add_device_randomness(bytes, bytes_read); +} + static inline int hwrng_init(struct hwrng *rng) { - if (!rng->init) - return 0; - return rng->init(rng); + if (rng->init) { + int ret; + + ret = rng->init(rng); + if (ret) + return ret; + } + add_early_randomness(rng); + return 0; } static inline void hwrng_cleanup(struct hwrng *rng) @@ -304,8 +329,6 @@ int hwrng_register(struct hwrng *rng) { int err = -EINVAL; struct hwrng *old_rng, *tmp; - unsigned char bytes[16]; - int bytes_read; if (rng->name == NULL || (rng->data_read == NULL && rng->read == NULL)) @@ -347,9 +370,17 @@ int hwrng_register(struct hwrng *rng) INIT_LIST_HEAD(&rng->list); list_add_tail(&rng->list, &rng_list); - bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); - if (bytes_read > 0) - add_device_randomness(bytes, bytes_read); + if (old_rng && !rng->init) { + /* + * Use a new device's input to add some randomness to + * the system. If this rng device isn't going to be + * used right away, its init function hasn't been + * called yet; so only use the randomness from devices + * that don't need an init callback. + */ + add_early_randomness(rng); + } + out_unlock: mutex_unlock(&rng_mutex); out: diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index f3e71501de54..e9b15bc18b4d 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -38,6 +38,8 @@ struct virtrng_info { int index; }; +static bool probe_done; + static void random_recv_done(struct virtqueue *vq) { struct virtrng_info *vi = vq->vdev->priv; @@ -67,6 +69,13 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) int ret; struct virtrng_info *vi = (struct virtrng_info *)rng->priv; + /* + * Don't ask host for data till we're setup. This call can + * happen during hwrng_register(), after commit d9e7972619. + */ + if (unlikely(!probe_done)) + return 0; + if (!vi->busy) { vi->busy = true; init_completion(&vi->have_data); @@ -137,6 +146,7 @@ static int probe_common(struct virtio_device *vdev) return err; } + probe_done = true; return 0; } diff --git a/drivers/char/random.c b/drivers/char/random.c index 0a7ac0a7b252..71529e196b84 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -641,7 +641,7 @@ retry: } while (unlikely(entropy_count < pool_size-2 && pnfrac)); } - if (entropy_count < 0) { + if (unlikely(entropy_count < 0)) { pr_warn("random: negative entropy/overflow: pool %s count %d\n", r->name, entropy_count); WARN_ON(1); @@ -981,7 +981,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, int reserved) { int entropy_count, orig; - size_t ibytes; + size_t ibytes, nfrac; BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); @@ -999,7 +999,17 @@ retry: } if (ibytes < min) ibytes = 0; - if ((entropy_count -= ibytes << (ENTROPY_SHIFT + 3)) < 0) + + if (unlikely(entropy_count < 0)) { + pr_warn("random: negative entropy count: pool %s count %d\n", + r->name, entropy_count); + WARN_ON(1); + entropy_count = 0; + } + nfrac = ibytes << (ENTROPY_SHIFT + 3); + if ((size_t) entropy_count > nfrac) + entropy_count -= nfrac; + else entropy_count = 0; if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) @@ -1376,6 +1386,7 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) "with %d bits of entropy available\n", current->comm, nonblocking_pool.entropy_total); + nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3)); ret = extract_entropy_user(&nonblocking_pool, buf, nbytes); trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool), diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index ebac67115009..7364a538e056 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -104,6 +104,7 @@ config ARM_IMX6Q_CPUFREQ tristate "Freescale i.MX6 cpufreq support" depends on ARCH_MXC depends on REGULATOR_ANATOP + select PM_OPP help This adds cpufreq driver support for Freescale i.MX6 series SoCs. @@ -118,7 +119,7 @@ config ARM_INTEGRATOR If in doubt, say Y. config ARM_KIRKWOOD_CPUFREQ - def_bool MACH_KIRKWOOD + def_bool ARCH_KIRKWOOD || MACH_KIRKWOOD help This adds the CPUFreq driver for Marvell Kirkwood SoCs. diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index ee1ae303a07c..86beda9f950b 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -152,11 +152,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) goto out_put_reg; } - ret = of_init_opp_table(cpu_dev); - if (ret) { - pr_err("failed to init OPP table: %d\n", ret); - goto out_put_clk; - } + /* OPPs might be populated at runtime, don't check for error here */ + of_init_opp_table(cpu_dev); ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 62259d27f03e..6f024852c6fb 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1153,10 +1153,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) * the creation of a brand new one. So we need to perform this update * by invoking update_policy_cpu(). */ - if (recover_policy && cpu != policy->cpu) + if (recover_policy && cpu != policy->cpu) { update_policy_cpu(policy, cpu); - else + WARN_ON(kobject_move(&policy->kobj, &dev->kobj)); + } else { policy->cpu = cpu; + } cpumask_copy(policy->cpus, cpumask_of(cpu)); diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c index 546376719d8f..b5befc211172 100644 --- a/drivers/cpufreq/sa1110-cpufreq.c +++ b/drivers/cpufreq/sa1110-cpufreq.c @@ -349,7 +349,7 @@ static int __init sa1110_clk_init(void) name = "K4S641632D"; if (machine_is_h3100()) name = "KM416S4030CT"; - if (machine_is_jornada720()) + if (machine_is_jornada720() || machine_is_h3600()) name = "K4S281632B-1H"; if (machine_is_nanoengine()) name = "MT48LC8M16A2TG-75"; diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index 4199849e3758..145974f9662b 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig @@ -1,4 +1,5 @@ menu "IEEE 1394 (FireWire) support" + depends on HAS_DMA depends on PCI || COMPILE_TEST # firewire-core does not depend on PCI but is # not useful without PCI controller driver diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 57985410f12f..a66a3217f1d9 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -336,10 +336,10 @@ static const struct { QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE}, {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0, - QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, + QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI}, {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID, - 0}, + QUIRK_NO_MSI}, {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index eff1a2f22f09..dc79346689e6 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -346,6 +346,7 @@ static __initdata struct { struct param_info { int verbose; + int found; void *params; }; @@ -362,16 +363,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname, (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) return 0; - pr_info("Getting parameters from FDT:\n"); - for (i = 0; i < ARRAY_SIZE(dt_params); i++) { prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len); - if (!prop) { - pr_err("Can't find %s in device tree!\n", - dt_params[i].name); + if (!prop) return 0; - } dest = info->params + dt_params[i].offset; + info->found++; val = of_read_number(prop, len / sizeof(u32)); @@ -390,10 +387,21 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname, int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose) { struct param_info info; + int ret; + + pr_info("Getting EFI parameters from FDT:\n"); info.verbose = verbose; + info.found = 0; info.params = params; - return of_scan_flat_dt(fdt_find_uefi_params, &info); + ret = of_scan_flat_dt(fdt_find_uefi_params, &info); + if (!info.found) + pr_info("UEFI not found.\n"); + else if (!ret) + pr_err("Can't find '%s' in device tree!\n", + dt_params[info.found].name); + + return ret; } #endif /* CONFIG_EFI_PARAMS_FROM_FDT */ diff --git a/drivers/firmware/efi/fdt.c b/drivers/firmware/efi/fdt.c index 82d774161cc9..507a3df46a5d 100644 --- a/drivers/firmware/efi/fdt.c +++ b/drivers/firmware/efi/fdt.c @@ -23,16 +23,6 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, u32 fdt_val32; u64 fdt_val64; - /* - * Copy definition of linux_banner here. Since this code is - * built as part of the decompressor for ARM v7, pulling - * in version.c where linux_banner is defined for the - * kernel brings other kernel dependencies with it. - */ - const char linux_banner[] = - "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@" - LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n"; - /* Do some checks on provided FDT, if it exists*/ if (orig_fdt) { if (fdt_check_header(orig_fdt)) { diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index fe7c0e211f9a..57adbc90fdad 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c @@ -900,8 +900,6 @@ static int mcp23s08_probe(struct spi_device *spi) if (spi_present_mask & (1 << addr)) chips++; } - if (!chips) - return -ENODEV; } else { type = spi_get_device_id(spi)->driver_data; pdata = dev_get_platdata(&spi->dev); @@ -940,10 +938,6 @@ static int mcp23s08_probe(struct spi_device *spi) if (!(spi_present_mask & (1 << addr))) continue; chips--; - if (chips < 0) { - dev_err(&spi->dev, "FATAL: invalid negative chip id\n"); - goto fail; - } data->mcp[addr] = &data->chip[chips]; status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi, 0x40 | (addr << 1), type, base, diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index 0c9f803fc1ac..b6ae89ea8811 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -284,6 +284,7 @@ static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq, static struct irq_domain_ops gpio_rcar_irq_domain_ops = { .map = gpio_rcar_irq_domain_map, + .xlate = irq_domain_xlate_twocell, }; struct gpio_rcar_info { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f36126383d26..d893e4da5dce 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1616,22 +1616,6 @@ out: return ret; } -void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv) -{ - struct i915_vma *vma; - - /* - * Only the global gtt is relevant for gtt memory mappings, so restrict - * list traversal to objects bound into the global address space. Note - * that the active list should be empty, but better safe than sorry. - */ - WARN_ON(!list_empty(&dev_priv->gtt.base.active_list)); - list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list) - i915_gem_release_mmap(vma->obj); - list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list) - i915_gem_release_mmap(vma->obj); -} - /** * i915_gem_release_mmap - remove physical page mappings * @obj: obj in question @@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) obj->fault_mappable = false; } +void +i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv) +{ + struct drm_i915_gem_object *obj; + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) + i915_gem_release_mmap(obj); +} + uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) { diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 3521f998a178..34894b573064 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -31,7 +31,7 @@ struct i915_render_state { struct drm_i915_gem_object *obj; unsigned long ggtt_offset; - void *batch; + u32 *batch; u32 size; u32 len; }; @@ -80,7 +80,7 @@ free: static void render_state_free(struct i915_render_state *so) { - kunmap(so->batch); + kunmap(kmap_to_page(so->batch)); i915_gem_object_ggtt_unpin(so->obj); drm_gem_object_unreference(&so->obj->base); kfree(so); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 267f069765ad..c05c84f3f091 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2845,7 +2845,7 @@ static int semaphore_passed(struct intel_engine_cs *ring) { struct drm_i915_private *dev_priv = ring->dev->dev_private; struct intel_engine_cs *signaller; - u32 seqno, ctl; + u32 seqno; ring->hangcheck.deadlock++; @@ -2857,15 +2857,12 @@ static int semaphore_passed(struct intel_engine_cs *ring) if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) return -1; - /* cursory check for an unkickable deadlock */ - ctl = I915_READ_CTL(signaller); - if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) - return -1; - if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) return 1; - if (signaller->hangcheck.deadlock) + /* cursory check for an unkickable deadlock */ + if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && + semaphore_passed(signaller) < 0) return -1; return 0; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e27e7804c0b9..f0be855ddf45 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11673,6 +11673,9 @@ static struct intel_quirk intel_quirks[] = { /* Toshiba CB35 Chromebook (Celeron 2955U) */ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, + + /* HP Chromebook 14 (Celeron 2955U) */ + { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, }; static void intel_init_quirks(struct drm_device *dev) @@ -11911,6 +11914,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) * ... */ plane = crtc->plane; crtc->plane = !plane; + crtc->primary_enabled = true; dev_priv->display.crtc_disable(&crtc->base); crtc->plane = plane; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 075170d1844f..8a1a4fbc06ac 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -906,8 +906,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, bpp); - for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { - for (clock = min_clock; clock <= max_clock; clock++) { + for (clock = min_clock; clock <= max_clock; clock++) { + for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); link_avail = intel_dp_max_data_rate(link_clock, lane_count); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 23126023aeba..5e5a72fca5fb 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -111,6 +111,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, pipe_config->adjusted_mode.flags |= flags; + /* gen2/3 store dither state in pfit control, needs to match */ + if (INTEL_INFO(dev)->gen < 4) { + tmp = I915_READ(PFIT_CONTROL); + + pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; + } + dotclock = pipe_config->port_clock; if (HAS_PCH_SPLIT(dev_priv->dev)) diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 628cd8938274..12b02fe1d0ae 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -361,16 +361,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | PFIT_FILTER_FUZZY); - /* Make sure pre-965 set dither correctly for 18bpp panels. */ - if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18) - pfit_control |= PANEL_8TO6_DITHER_ENABLE; - out: if ((pfit_control & PFIT_ENABLE) == 0) { pfit_control = 0; pfit_pgm_ratios = 0; } + /* Make sure pre-965 set dither correctly for 18bpp panels. */ + if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18) + pfit_control |= PANEL_8TO6_DITHER_ENABLE; + pipe_config->gmch_pfit.control = pfit_control; pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios; pipe_config->gmch_pfit.lvds_border_bits = border; diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c index cfde9eb44ad0..6212537b90c5 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c @@ -192,11 +192,11 @@ alarm_timer_callback(struct nouveau_alarm *alarm) nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown, NOUVEAU_THERM_THRS_SHUTDOWN); + spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); + /* schedule the next poll in one second */ if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head)) - ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); - - spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); + ptimer->alarm(ptimer, 1000000000ULL, alarm); } void diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c index 34d6a85e9023..0bf1e20c6e44 100644 --- a/drivers/gpu/drm/qxl/qxl_irq.c +++ b/drivers/gpu/drm/qxl/qxl_irq.c @@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg) pending = xchg(&qdev->ram_header->int_pending, 0); + if (!pending) + return IRQ_NONE; + atomic_inc(&qdev->irq_received); if (pending & QXL_INTERRUPT_DISPLAY) { diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a03c73411a56..30d242b25078 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1414,8 +1414,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); - /* set pageflip to happen anywhere in vblank interval */ - WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); + /* set pageflip to happen only at start of vblank interval (front porch) */ + WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); if (!atomic && fb && fb != crtc->primary->fb) { radeon_fb = to_radeon_framebuffer(fb); @@ -1614,8 +1614,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); - /* set pageflip to happen anywhere in vblank interval */ - WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); + /* set pageflip to happen only at start of vblank interval (front porch) */ + WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); if (!atomic && fb && fb != crtc->primary->fb) { radeon_fb = to_radeon_framebuffer(fb); diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 2b2908440644..7d68203a3737 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, struct backlight_properties props; struct radeon_backlight_privdata *pdata; struct radeon_encoder_atom_dig *dig; - u8 backlight_level; char bl_name[16]; /* Mac laptops with multiple GPUs use the gmux driver for backlight @@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, pdata->encoder = radeon_encoder; - backlight_level = radeon_atom_get_backlight_level_from_reg(rdev); - dig = radeon_encoder->enc_priv; dig->bl_dev = bd; bd->props.brightness = radeon_atom_backlight_get_brightness(bd); + /* Set a reasonable default here if the level is 0 otherwise + * fbdev will attempt to turn the backlight on after console + * unblanking and it will try and restore 0 which turns the backlight + * off again. + */ + if (bd->props.brightness == 0) + bd->props.brightness = RADEON_MAX_BL_LEVEL; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 0b2471107137..c0ea66192fe0 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) gb_tile_moden = 0; break; } + rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); } } else if (num_pipe_configs == 8) { @@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev) tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f7ece0ff431b..15e4f28015e1 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2642,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s for (i = 0; i < rdev->num_crtc; i++) { if (save->crtc_enabled[i]) { tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x3) != 0) { - tmp &= ~0x3; + if ((tmp & 0x7) != 3) { + tmp &= ~0x7; + tmp |= 0x3; WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); @@ -4755,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev) tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index 333d143fca2c..23bff590fb6e 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -239,7 +239,6 @@ # define EVERGREEN_CRTC_V_BLANK (1 << 0) #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 #define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0 -#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 #define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4 #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c66952d4b00c..3c69f58e46ef 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev) tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 29d9cc04c04e..60c47f829122 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -449,6 +449,7 @@ struct radeon_bo_va { /* protected by vm mutex */ struct list_head vm_list; + struct list_head vm_status; /* constant after initialization */ struct radeon_vm *vm; @@ -684,10 +685,9 @@ struct radeon_flip_work { struct work_struct unpin_work; struct radeon_device *rdev; int crtc_id; - struct drm_framebuffer *fb; + uint64_t base; struct drm_pending_vblank_event *event; struct radeon_bo *old_rbo; - struct radeon_bo *new_rbo; struct radeon_fence *fence; }; @@ -868,6 +868,9 @@ struct radeon_vm { struct list_head va; unsigned id; + /* BOs freed, but not yet updated in the PT */ + struct list_head freed; + /* contains the page directory */ struct radeon_bo *page_directory; uint64_t pd_gpu_addr; @@ -876,6 +879,8 @@ struct radeon_vm { /* array of page tables, one for each page directory entry */ struct radeon_vm_pt *page_tables; + struct radeon_bo_va *ib_bo_va; + struct mutex mutex; /* last fence for cs using this vm */ struct radeon_fence *fence; @@ -2833,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev, uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); int radeon_vm_update_page_directory(struct radeon_device *rdev, struct radeon_vm *vm); +int radeon_vm_clear_freed(struct radeon_device *rdev, + struct radeon_vm *vm); int radeon_vm_bo_update(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_bo *bo, + struct radeon_bo_va *bo_va, struct ttm_mem_reg *mem); void radeon_vm_bo_invalidate(struct radeon_device *rdev, struct radeon_bo *bo); @@ -2848,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, struct radeon_bo_va *bo_va, uint64_t offset, uint32_t flags); -int radeon_vm_bo_rmv(struct radeon_device *rdev, - struct radeon_bo_va *bo_va); +void radeon_vm_bo_rmv(struct radeon_device *rdev, + struct radeon_bo_va *bo_va); /* audio */ void r600_audio_update_hdmi(struct work_struct *work); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 71a143461478..ae763f60c8a0 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, struct radeon_vm *vm) { struct radeon_device *rdev = p->rdev; + struct radeon_bo_va *bo_va; int i, r; r = radeon_vm_update_page_directory(rdev, vm); if (r) return r; - r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, + r = radeon_vm_clear_freed(rdev, vm); + if (r) + return r; + + if (vm->ib_bo_va == NULL) { + DRM_ERROR("Tmp BO not in VM!\n"); + return -EINVAL; + } + + r = radeon_vm_bo_update(rdev, vm->ib_bo_va, &rdev->ring_tmp_bo.bo->tbo.mem); if (r) return r; @@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p, continue; bo = p->relocs[i].robj; - r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem); + bo_va = radeon_vm_bo_find(vm, bo); + if (bo_va == NULL) { + dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); + return -EINVAL; + } + + r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); if (r) return r; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 03686fab842d..697add2cd4e3 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev) if (!radeon_check_pot_argument(radeon_vm_size)) { dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n", radeon_vm_size); - radeon_vm_size = 4096; + radeon_vm_size = 4; } - if (radeon_vm_size < 4) { - dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n", + if (radeon_vm_size < 1) { + dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n", radeon_vm_size); - radeon_vm_size = 4096; + radeon_vm_size = 4; } /* * Max GPUVM size for Cayman, SI and CI are 40 bits. */ - if (radeon_vm_size > 1024*1024) { - dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n", + if (radeon_vm_size > 1024) { + dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n", radeon_vm_size); - radeon_vm_size = 4096; + radeon_vm_size = 4; } /* defines number of bits in page table versus page directory, * a page is 4KB so we have 12 bits offset, minimum 9 bits in the * page table and the remaining bits are in the page directory */ if (radeon_vm_block_size < 9) { - dev_warn(rdev->dev, "VM page table size (%d) to small\n", + dev_warn(rdev->dev, "VM page table size (%d) too small\n", radeon_vm_block_size); radeon_vm_block_size = 9; } if (radeon_vm_block_size > 24 || - radeon_vm_size < (1ull << radeon_vm_block_size)) { - dev_warn(rdev->dev, "VM page table size (%d) to large\n", + (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) { + dev_warn(rdev->dev, "VM page table size (%d) too large\n", radeon_vm_block_size); radeon_vm_block_size = 9; } @@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev, /* Adjust VM size here. * Max GPUVM size for cayman+ is 40 bits. */ - rdev->vm_manager.max_pfn = radeon_vm_size << 8; + rdev->vm_manager.max_pfn = radeon_vm_size << 18; /* Set asic functions */ r = radeon_asic_init(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 13896edcf0b6..bf25061c8ac4 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -366,7 +366,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); - radeon_fence_unref(&work->fence); radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); queue_work(radeon_crtc->flip_queue, &work->unpin_work); } @@ -386,51 +385,108 @@ static void radeon_flip_work_func(struct work_struct *__work) struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; struct drm_crtc *crtc = &radeon_crtc->base; - struct drm_framebuffer *fb = work->fb; - - uint32_t tiling_flags, pitch_pixels; - uint64_t base; - unsigned long flags; int r; down_read(&rdev->exclusive_lock); - while (work->fence) { + if (work->fence) { r = radeon_fence_wait(work->fence, false); if (r == -EDEADLK) { up_read(&rdev->exclusive_lock); r = radeon_gpu_reset(rdev); down_read(&rdev->exclusive_lock); } + if (r) + DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); - if (r) { - DRM_ERROR("failed to wait on page flip fence (%d)!\n", - r); - goto cleanup; - } else - radeon_fence_unref(&work->fence); + /* We continue with the page flip even if we failed to wait on + * the fence, otherwise the DRM core and userspace will be + * confused about which BO the CRTC is scanning out + */ + + radeon_fence_unref(&work->fence); } + /* We borrow the event spin lock for protecting flip_status */ + spin_lock_irqsave(&crtc->dev->event_lock, flags); + + /* set the proper interrupt */ + radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); + + /* do the flip (mmio) */ + radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); + + radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + up_read(&rdev->exclusive_lock); +} + +static int radeon_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct radeon_framebuffer *old_radeon_fb; + struct radeon_framebuffer *new_radeon_fb; + struct drm_gem_object *obj; + struct radeon_flip_work *work; + struct radeon_bo *new_rbo; + uint32_t tiling_flags, pitch_pixels; + uint64_t base; + unsigned long flags; + int r; + + work = kzalloc(sizeof *work, GFP_KERNEL); + if (work == NULL) + return -ENOMEM; + + INIT_WORK(&work->flip_work, radeon_flip_work_func); + INIT_WORK(&work->unpin_work, radeon_unpin_work_func); + + work->rdev = rdev; + work->crtc_id = radeon_crtc->crtc_id; + work->event = event; + + /* schedule unpin of the old buffer */ + old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb); + obj = old_radeon_fb->obj; + + /* take a reference to the old object */ + drm_gem_object_reference(obj); + work->old_rbo = gem_to_radeon_bo(obj); + + new_radeon_fb = to_radeon_framebuffer(fb); + obj = new_radeon_fb->obj; + new_rbo = gem_to_radeon_bo(obj); + + spin_lock(&new_rbo->tbo.bdev->fence_lock); + if (new_rbo->tbo.sync_obj) + work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj); + spin_unlock(&new_rbo->tbo.bdev->fence_lock); + /* pin the new buffer */ - DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", - work->old_rbo, work->new_rbo); + DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n", + work->old_rbo, new_rbo); - r = radeon_bo_reserve(work->new_rbo, false); + r = radeon_bo_reserve(new_rbo, false); if (unlikely(r != 0)) { DRM_ERROR("failed to reserve new rbo buffer before flip\n"); goto cleanup; } /* Only 27 bit offset for legacy CRTC */ - r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM, + r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM, ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); if (unlikely(r != 0)) { - radeon_bo_unreserve(work->new_rbo); + radeon_bo_unreserve(new_rbo); r = -EINVAL; DRM_ERROR("failed to pin new rbo buffer before flip\n"); goto cleanup; } - radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL); - radeon_bo_unreserve(work->new_rbo); + radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); + radeon_bo_unreserve(new_rbo); if (!ASIC_IS_AVIVO(rdev)) { /* crtc offset is from display base addr not FB location */ @@ -467,6 +523,7 @@ static void radeon_flip_work_func(struct work_struct *__work) } base &= ~7; } + work->base = base; r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); if (r) { @@ -477,100 +534,42 @@ static void radeon_flip_work_func(struct work_struct *__work) /* We borrow the event spin lock for protecting flip_work */ spin_lock_irqsave(&crtc->dev->event_lock, flags); - /* set the proper interrupt */ - radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); + if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + r = -EBUSY; + goto vblank_cleanup; + } + radeon_crtc->flip_status = RADEON_FLIP_PENDING; + radeon_crtc->flip_work = work; - /* do the flip (mmio) */ - radeon_page_flip(rdev, radeon_crtc->crtc_id, base); + /* update crtc fb */ + crtc->primary->fb = fb; - radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - up_read(&rdev->exclusive_lock); - return; + queue_work(radeon_crtc->flip_queue, &work->flip_work); + return 0; + +vblank_cleanup: + drm_vblank_put(crtc->dev, radeon_crtc->crtc_id); pflip_cleanup: - if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) { + if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { DRM_ERROR("failed to reserve new rbo in error path\n"); goto cleanup; } - if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) { + if (unlikely(radeon_bo_unpin(new_rbo) != 0)) { DRM_ERROR("failed to unpin new rbo in error path\n"); } - radeon_bo_unreserve(work->new_rbo); + radeon_bo_unreserve(new_rbo); cleanup: drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); radeon_fence_unref(&work->fence); kfree(work); - up_read(&rdev->exclusive_lock); -} - -static int radeon_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) -{ - struct drm_device *dev = crtc->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - struct radeon_framebuffer *old_radeon_fb; - struct radeon_framebuffer *new_radeon_fb; - struct drm_gem_object *obj; - struct radeon_flip_work *work; - unsigned long flags; - - work = kzalloc(sizeof *work, GFP_KERNEL); - if (work == NULL) - return -ENOMEM; - - INIT_WORK(&work->flip_work, radeon_flip_work_func); - INIT_WORK(&work->unpin_work, radeon_unpin_work_func); - - work->rdev = rdev; - work->crtc_id = radeon_crtc->crtc_id; - work->fb = fb; - work->event = event; - - /* schedule unpin of the old buffer */ - old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb); - obj = old_radeon_fb->obj; - - /* take a reference to the old object */ - drm_gem_object_reference(obj); - work->old_rbo = gem_to_radeon_bo(obj); - - new_radeon_fb = to_radeon_framebuffer(fb); - obj = new_radeon_fb->obj; - work->new_rbo = gem_to_radeon_bo(obj); - - spin_lock(&work->new_rbo->tbo.bdev->fence_lock); - if (work->new_rbo->tbo.sync_obj) - work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj); - spin_unlock(&work->new_rbo->tbo.bdev->fence_lock); - - /* We borrow the event spin lock for protecting flip_work */ - spin_lock_irqsave(&crtc->dev->event_lock, flags); - if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { - DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); - radeon_fence_unref(&work->fence); - kfree(work); - return -EBUSY; - } - radeon_crtc->flip_status = RADEON_FLIP_PENDING; - radeon_crtc->flip_work = work; - - /* update crtc fb */ - crtc->primary->fb = fb; - - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - - queue_work(radeon_crtc->flip_queue, &work->flip_work); - - return 0; + return r; } static int @@ -830,6 +829,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) struct radeon_device *rdev = dev->dev_private; int ret = 0; + /* don't leak the edid if we already fetched it in detect() */ + if (radeon_connector->edid) + goto got_edid; + /* on hw with routers, select right port */ if (radeon_connector->router.ddc_valid) radeon_router_select_ddc_port(radeon_connector); @@ -868,6 +871,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); } if (radeon_connector->edid) { +got_edid: drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index cb1421369e3a..e9e361084249 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -173,7 +173,7 @@ int radeon_dpm = -1; int radeon_aspm = -1; int radeon_runtime_pm = -1; int radeon_hard_reset = 0; -int radeon_vm_size = 4096; +int radeon_vm_size = 4; int radeon_vm_block_size = 9; int radeon_deep_color = 0; @@ -243,7 +243,7 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444); MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); module_param_named(hard_reset, radeon_hard_reset, int, 0444); -MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)"); +MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)"); module_param_named(vm_size, radeon_vm_size, int, 0444); MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)"); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 35d931881b4b..d25ae6acfd5a 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { struct radeon_fpriv *fpriv; - struct radeon_bo_va *bo_va; + struct radeon_vm *vm; int r; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); @@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) return -ENOMEM; } - r = radeon_vm_init(rdev, &fpriv->vm); + vm = &fpriv->vm; + r = radeon_vm_init(rdev, vm); if (r) { kfree(fpriv); return r; @@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) if (rdev->accel_working) { r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (r) { - radeon_vm_fini(rdev, &fpriv->vm); + radeon_vm_fini(rdev, vm); kfree(fpriv); return r; } /* map the ib pool buffer read only into * virtual address space */ - bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, - rdev->ring_tmp_bo.bo); - r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, + vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, + rdev->ring_tmp_bo.bo); + r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, + RADEON_VA_IB_OFFSET, RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); if (r) { - radeon_vm_fini(rdev, &fpriv->vm); + radeon_vm_fini(rdev, vm); kfree(fpriv); return r; } @@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev, /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { struct radeon_fpriv *fpriv = file_priv->driver_priv; - struct radeon_bo_va *bo_va; + struct radeon_vm *vm = &fpriv->vm; int r; if (rdev->accel_working) { r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (!r) { - bo_va = radeon_vm_bo_find(&fpriv->vm, - rdev->ring_tmp_bo.bo); - if (bo_va) - radeon_vm_bo_rmv(rdev, bo_va); + if (vm->ib_bo_va) + radeon_vm_bo_rmv(rdev, vm->ib_bo_va); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); } } - radeon_vm_fini(rdev, &fpriv->vm); + radeon_vm_fini(rdev, vm); kfree(fpriv); file_priv->driver_priv = NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index eecff6bbd341..725d3669014f 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, bo_va->ref_count = 1; INIT_LIST_HEAD(&bo_va->bo_list); INIT_LIST_HEAD(&bo_va->vm_list); + INIT_LIST_HEAD(&bo_va->vm_status); mutex_lock(&vm->mutex); list_add(&bo_va->vm_list, &vm->va); @@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, head = &tmp->vm_list; } + if (bo_va->soffset) { + /* add a clone of the bo_va to clear the old address */ + tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); + if (!tmp) { + mutex_unlock(&vm->mutex); + return -ENOMEM; + } + tmp->soffset = bo_va->soffset; + tmp->eoffset = bo_va->eoffset; + tmp->vm = vm; + list_add(&tmp->vm_status, &vm->freed); + } + bo_va->soffset = soffset; bo_va->eoffset = eoffset; bo_va->flags = flags; @@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, * Object have to be reserved and mutex must be locked! */ int radeon_vm_bo_update(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_bo *bo, + struct radeon_bo_va *bo_va, struct ttm_mem_reg *mem) { + struct radeon_vm *vm = bo_va->vm; struct radeon_ib ib; - struct radeon_bo_va *bo_va; unsigned nptes, ndw; uint64_t addr; int r; - bo_va = radeon_vm_bo_find(vm, bo); - if (bo_va == NULL) { - dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); - return -EINVAL; - } if (!bo_va->soffset) { dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", - bo, vm); + bo_va->bo, vm); return -EINVAL; } @@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, trace_radeon_vm_bo_update(bo_va); - nptes = radeon_bo_ngpu_pages(bo); + nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE; /* padding, etc. */ ndw = 64; @@ -911,33 +919,61 @@ int radeon_vm_bo_update(struct radeon_device *rdev, } /** + * radeon_vm_clear_freed - clear freed BOs in the PT + * + * @rdev: radeon_device pointer + * @vm: requested vm + * + * Make sure all freed BOs are cleared in the PT. + * Returns 0 for success. + * + * PTs have to be reserved and mutex must be locked! + */ +int radeon_vm_clear_freed(struct radeon_device *rdev, + struct radeon_vm *vm) +{ + struct radeon_bo_va *bo_va, *tmp; + int r; + + list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { + list_del(&bo_va->vm_status); + r = radeon_vm_bo_update(rdev, bo_va, NULL); + kfree(bo_va); + if (r) + return r; + } + return 0; + +} + +/** * radeon_vm_bo_rmv - remove a bo to a specific vm * * @rdev: radeon_device pointer * @bo_va: requested bo_va * * Remove @bo_va->bo from the requested vm (cayman+). - * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and - * remove the ptes for @bo_va in the page table. - * Returns 0 for success. * * Object have to be reserved! */ -int radeon_vm_bo_rmv(struct radeon_device *rdev, - struct radeon_bo_va *bo_va) +void radeon_vm_bo_rmv(struct radeon_device *rdev, + struct radeon_bo_va *bo_va) { - int r = 0; + struct radeon_vm *vm = bo_va->vm; - mutex_lock(&bo_va->vm->mutex); - if (bo_va->soffset) - r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL); + list_del(&bo_va->bo_list); + mutex_lock(&vm->mutex); list_del(&bo_va->vm_list); - mutex_unlock(&bo_va->vm->mutex); - list_del(&bo_va->bo_list); - kfree(bo_va); - return r; + if (bo_va->soffset) { + bo_va->bo = NULL; + list_add(&bo_va->vm_status, &vm->freed); + } else { + kfree(bo_va); + } + + mutex_unlock(&vm->mutex); } /** @@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) int r; vm->id = 0; + vm->ib_bo_va = NULL; vm->fence = NULL; vm->last_flush = NULL; vm->last_id_use = NULL; mutex_init(&vm->mutex); INIT_LIST_HEAD(&vm->va); + INIT_LIST_HEAD(&vm->freed); pd_size = radeon_vm_directory_size(rdev); pd_entries = radeon_vm_num_pdes(rdev); @@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) kfree(bo_va); } } - + list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) + kfree(bo_va); for (i = 0; i < radeon_vm_num_pdes(rdev); i++) radeon_bo_unref(&vm->page_tables[i].bo); diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 237dd29d9f1c..3e21e869015f 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) for (i = 0; i < rdev->num_crtc; i++) { if (save->crtc_enabled[i]) { tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x3) != 0) { - tmp &= ~0x3; + if ((tmp & 0x7) != 3) { + tmp &= ~0x7; + tmp |= 0x3; WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index eba0225259a4..9e854fd016da 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev) tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 20da6ff183df..32e50be9c4ac 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1874,15 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev) for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) pi->at[i] = TRINITY_AT_DFLT; - /* There are stability issues reported on latops with - * bapm installed when switching between AC and battery - * power. At the same time, some desktop boards hang - * if it's not enabled and dpm is enabled. + /* There are stability issues reported on with + * bapm enabled when switching between AC and battery + * power. At the same time, some MSI boards hang + * if it's not enabled and dpm is enabled. Just enable + * it for MSI boards right now. */ - if (rdev->flags & RADEON_IS_MOBILITY) - pi->enable_bapm = false; - else + if (rdev->pdev->subsystem_vendor == 0x1462) pi->enable_bapm = true; + else + pi->enable_bapm = false; pi->enable_nbps_policy = true; pi->enable_sclk_ds = true; pi->enable_gfx_power_gating = true; diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c index eaaa3d843b80..23b2ce294c4c 100644 --- a/drivers/hv/hv_fcopy.c +++ b/drivers/hv/hv_fcopy.c @@ -246,8 +246,8 @@ void hv_fcopy_onchannelcallback(void *context) /* * Send the information to the user-level daemon. */ - fcopy_send_data(); schedule_delayed_work(&fcopy_work, 5*HZ); + fcopy_send_data(); return; } icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index 0f4dea5ccf17..9ee3913850d6 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c @@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev, return -EINVAL; temp = DIV_ROUND_CLOSEST(temp, 1000); - temp = clamp_val(temp, 0, 255); + temp = clamp_val(temp, -128, 127); mutex_lock(&data->lock); data->temp_min[attr->index] = temp; @@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev, return -EINVAL; temp = DIV_ROUND_CLOSEST(temp, 1000); - temp = clamp_val(temp, 0, 255); + temp = clamp_val(temp, -128, 127); mutex_lock(&data->lock); data->temp_max[attr->index] = temp; @@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev, return -EINVAL; temp = DIV_ROUND_CLOSEST(temp, 1000); - temp = clamp_val(temp, 0, 255); + temp = clamp_val(temp, -128, 127); mutex_lock(&data->lock); data->pwm_tmin[attr->index] = temp; diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c index afd31042b452..d14ab3c45daa 100644 --- a/drivers/hwmon/da9052-hwmon.c +++ b/drivers/hwmon/da9052-hwmon.c @@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev, struct device_attribute *devattr, char *buf) { - return sprintf(buf, "da9052-hwmon\n"); + return sprintf(buf, "da9052\n"); } static ssize_t show_label(struct device *dev, diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c index 73b3865f1207..35eb7738d711 100644 --- a/drivers/hwmon/da9055-hwmon.c +++ b/drivers/hwmon/da9055-hwmon.c @@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev, struct device_attribute *devattr, char *buf) { - return sprintf(buf, "da9055-hwmon\n"); + return sprintf(buf, "da9055\n"); } static ssize_t show_label(struct device *dev, diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c index efee4c59239f..34b9a601ad07 100644 --- a/drivers/hwmon/smsc47m192.c +++ b/drivers/hwmon/smsc47m192.c @@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n) */ static inline s8 TEMP_TO_REG(int val) { - return clamp_val(SCALE(val, 1, 1000), -128000, 127000); + return SCALE(clamp_val(val, -128000, 127000), 1, 1000); } static inline int TEMP_FROM_REG(s8 val) @@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, err = kstrtoul(buf, 10, &val); if (err) return err; + if (val > 255) + return -EINVAL; data->vrm = val; return count; diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 8fb46aab2d87..a04c49f2a011 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -416,6 +416,7 @@ config BLK_DEV_CY82C693 config BLK_DEV_CS5520 tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)" + depends on X86_32 || COMPILE_TEST select BLK_DEV_IDEDMA_PCI help Include support for PIO tuning and virtual DMA on the Cyrix MediaGX @@ -426,6 +427,7 @@ config BLK_DEV_CS5520 config BLK_DEV_CS5530 tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support" + depends on X86_32 || COMPILE_TEST select BLK_DEV_IDEDMA_PCI help Include support for UDMA on the Cyrix MediaGX 5530 chipset. This @@ -435,7 +437,7 @@ config BLK_DEV_CS5530 config BLK_DEV_CS5535 tristate "AMD CS5535 chipset support" - depends on X86 && !X86_64 + depends on X86_32 select BLK_DEV_IDEDMA_PCI help Include support for UDMA on the NSC/AMD CS5535 companion chipset. @@ -486,6 +488,7 @@ config BLK_DEV_JMICRON config BLK_DEV_SC1200 tristate "National SCx200 chipset support" + depends on X86_32 || COMPILE_TEST select BLK_DEV_IDEDMA_PCI help This driver adds support for the on-board IDE controller on the diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 2a744a91370e..a3d3b1733c49 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -853,8 +853,9 @@ static int init_irq (ide_hwif_t *hwif) if (irq_handler == NULL) irq_handler = ide_intr; - if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif)) - goto out_up; + if (!host->get_lock) + if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif)) + goto out_up; #if !defined(__mc68000__) printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name, @@ -1533,7 +1534,8 @@ static void ide_unregister(ide_hwif_t *hwif) ide_proc_unregister_port(hwif); - free_irq(hwif->irq, hwif); + if (!hwif->host->get_lock) + free_irq(hwif->irq, hwif); device_unregister(hwif->portdev); device_unregister(&hwif->gendev); diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 17aeea170566..2a5fa9a436e5 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -111,8 +111,14 @@ static const int mma8452_samp_freq[8][2] = { {6, 250000}, {1, 560000} }; +/* + * Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048 + * The userspace interface uses m/s^2 and we declare micro units + * So scale factor is given by: + * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665 + */ static const int mma8452_scales[3][2] = { - {0, 977}, {0, 1953}, {0, 3906} + {0, 9577}, {0, 19154}, {0, 38307} }; static ssize_t mma8452_show_samp_freq_avail(struct device *dev, diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c index 258a973a1fb8..bfbf4d419f41 100644 --- a/drivers/iio/industrialio-event.c +++ b/drivers/iio/industrialio-event.c @@ -345,6 +345,9 @@ static int iio_device_add_event(struct iio_dev *indio_dev, &indio_dev->event_interface->dev_attr_list); kfree(postfix); + if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) + continue; + if (ret) return ret; diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 5e153f6d4b48..768a0fb67dd6 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -432,8 +432,17 @@ static void arp_failure_discard(void *handle, struct sk_buff *skb) */ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) { + struct c4iw_ep *ep = handle; + printk(KERN_ERR MOD "ARP failure duing connect\n"); kfree_skb(skb); + connect_reply_upcall(ep, -EHOSTUNREACH); + state_set(&ep->com, DEAD); + remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); + cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); + dst_release(ep->dst); + cxgb4_l2t_release(ep->l2t); + c4iw_put_ep(&ep->com); } /* @@ -658,7 +667,7 @@ static int send_connect(struct c4iw_ep *ep) opt2 |= T5_OPT_2_VALID; opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); } - t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); + t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { if (ep->com.remote_addr.ss_family == AF_INET) { @@ -2180,7 +2189,6 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb) PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid); BUG_ON(skb_cloned(skb)); skb_trim(skb, sizeof(struct cpl_tid_release)); - skb_get(skb); release_tid(&dev->rdev, hwtid, skb); return; } @@ -3917,7 +3925,7 @@ int __init c4iw_cm_init(void) return 0; } -void __exit c4iw_cm_term(void) +void c4iw_cm_term(void) { WARN_ON(!list_empty(&timeout_list)); flush_workqueue(workq); diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index dd93aadc996e..7db82b24302b 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -696,6 +696,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) pr_err(MOD "error allocating status page\n"); goto err4; } + rdev->status_page->db_off = 0; return 0; err4: c4iw_rqtpool_destroy(rdev); @@ -729,7 +730,6 @@ static void c4iw_dealloc(struct uld_ctx *ctx) if (ctx->dev->rdev.oc_mw_kva) iounmap(ctx->dev->rdev.oc_mw_kva); ib_dealloc_device(&ctx->dev->ibdev); - iwpm_exit(RDMA_NL_C4IW); ctx->dev = NULL; } @@ -826,12 +826,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) setup_debugfs(devp); } - ret = iwpm_init(RDMA_NL_C4IW); - if (ret) { - pr_err("port mapper initialization failed with %d\n", ret); - ib_dealloc_device(&devp->ibdev); - return ERR_PTR(ret); - } return devp; } @@ -1332,6 +1326,15 @@ static int __init c4iw_init_module(void) pr_err("%s[%u]: Failed to add netlink callback\n" , __func__, __LINE__); + err = iwpm_init(RDMA_NL_C4IW); + if (err) { + pr_err("port mapper initialization failed with %d\n", err); + ibnl_remove_client(RDMA_NL_C4IW); + c4iw_cm_term(); + debugfs_remove_recursive(c4iw_debugfs_root); + return err; + } + cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); return 0; @@ -1349,6 +1352,7 @@ static void __exit c4iw_exit_module(void) } mutex_unlock(&dev_mutex); cxgb4_unregister_uld(CXGB4_ULD_RDMA); + iwpm_exit(RDMA_NL_C4IW); ibnl_remove_client(RDMA_NL_C4IW); c4iw_cm_term(); debugfs_remove_recursive(c4iw_debugfs_root); diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 125bc5d1e175..361fff7a0742 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -908,7 +908,7 @@ int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev); int c4iw_register_device(struct c4iw_dev *dev); void c4iw_unregister_device(struct c4iw_dev *dev); int __init c4iw_cm_init(void); -void __exit c4iw_cm_term(void); +void c4iw_cm_term(void); void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index d13ddf1c0033..bbbcf389272c 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -675,7 +675,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, int err; uuari = &dev->mdev.priv.uuari; - if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN) + if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)) return -EINVAL; if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) diff --git a/drivers/input/input.c b/drivers/input/input.c index 1c4c0db05550..29ca0bb4f561 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev, } static int input_get_disposition(struct input_dev *dev, - unsigned int type, unsigned int code, int value) + unsigned int type, unsigned int code, int *pval) { int disposition = INPUT_IGNORE_EVENT; + int value = *pval; switch (type) { @@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev, break; } + *pval = value; return disposition; } @@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev, { int disposition; - disposition = input_get_disposition(dev, type, code, value); + disposition = input_get_disposition(dev, type, code, &value); if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) dev->event(dev, type, code, value); diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c index 758b48731415..de7be4f03d91 100644 --- a/drivers/input/keyboard/st-keyscan.c +++ b/drivers/input/keyboard/st-keyscan.c @@ -215,6 +215,7 @@ static int keyscan_probe(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP static int keyscan_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -249,6 +250,7 @@ static int keyscan_resume(struct device *dev) mutex_unlock(&input->mutex); return retval; } +#endif static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume); diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c index e4104f9b2e6d..fed5102e1802 100644 --- a/drivers/input/misc/sirfsoc-onkey.c +++ b/drivers/input/misc/sirfsoc-onkey.c @@ -213,7 +213,7 @@ static struct platform_driver sirfsoc_pwrc_driver = { module_platform_driver(sirfsoc_pwrc_driver); -MODULE_LICENSE("GPLv2"); +MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>"); MODULE_DESCRIPTION("CSR Prima2 PWRC Driver"); MODULE_ALIAS("platform:sirfsoc-pwrc"); diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index ec772d962f06..ef9e0b8a9aa7 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -132,7 +132,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = { 1232, 5710, 1156, 4696 }, { - (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL}, + (const char * const []){"LEN0034", "LEN0036", "LEN2002", + "LEN2004", NULL}, 1024, 5112, 2024, 4832 }, { @@ -168,7 +169,7 @@ static const char * const topbuttonpad_pnp_ids[] = { "LEN0049", "LEN2000", "LEN2001", /* Edge E431 */ - "LEN2002", + "LEN2002", /* Edge E531 */ "LEN2003", "LEN2004", /* L440 */ "LEN2005", diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 381b20d4c561..136b7b204f56 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -402,6 +402,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { }, }, { + /* Acer Aspire 5710 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"), + }, + }, + { /* Gericom Bellagio */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 977d05cd9e2e..e73cf2c71f35 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -1217,9 +1217,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) * a=(pi*r^2)/C. */ int a = data[5]; - int x_res = input_abs_get_res(input, ABS_X); - int y_res = input_abs_get_res(input, ABS_Y); - width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE); + int x_res = input_abs_get_res(input, ABS_MT_POSITION_X); + int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y); + width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE); height = width * y_res / x_res; } @@ -1587,7 +1587,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev, input_abs_set_res(input_dev, ABS_X, features->x_resolution); input_abs_set_res(input_dev, ABS_Y, features->y_resolution); } else { - if (features->touch_max <= 2) { + if (features->touch_max == 1) { input_set_abs_params(input_dev, ABS_X, 0, features->x_max, features->x_fuzz, 0); input_set_abs_params(input_dev, ABS_Y, 0, @@ -1815,14 +1815,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, case MTTPC: case MTTPC_B: case TABLETPC2FG: - if (features->device_type == BTN_TOOL_FINGER) { - unsigned int flags = INPUT_MT_DIRECT; - - if (wacom_wac->features.type == TABLETPC2FG) - flags = 0; - - input_mt_init_slots(input_dev, features->touch_max, flags); - } + if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1) + input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT); /* fall through */ case TABLETPC: @@ -1883,10 +1877,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, __set_bit(BTN_RIGHT, input_dev->keybit); if (features->touch_max) { - /* touch interface */ - unsigned int flags = INPUT_MT_POINTER; - - __set_bit(INPUT_PROP_POINTER, input_dev->propbit); if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, @@ -1894,12 +1884,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0, features->y_max, 0, 0); - } else { - __set_bit(BTN_TOOL_FINGER, input_dev->keybit); - __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); - flags = 0; } - input_mt_init_slots(input_dev, features->touch_max, flags); + input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER); } else { /* buttons/keys only interface */ __clear_bit(ABS_X, input_dev->absbit); diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c index 4e793a17361f..2ce649520fe0 100644 --- a/drivers/input/touchscreen/ti_am335x_tsc.c +++ b/drivers/input/touchscreen/ti_am335x_tsc.c @@ -359,9 +359,12 @@ static int titsc_parse_dt(struct platform_device *pdev, */ err = of_property_read_u32(node, "ti,coordinate-readouts", &ts_dev->coordinate_readouts); - if (err < 0) + if (err < 0) { + dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n"); err = of_property_read_u32(node, "ti,coordiante-readouts", &ts_dev->coordinate_readouts); + } + if (err < 0) return err; diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index b99dd88e31b9..bb446d742a2d 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -170,10 +170,10 @@ int pamu_disable_liodn(int liodn) static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) { /* Bug if not a power of 2 */ - BUG_ON(!is_power_of_2(addrspace_size)); + BUG_ON((addrspace_size & (addrspace_size - 1))); /* window size is 2^(WSE+1) bytes */ - return __ffs(addrspace_size) - 1; + return fls64(addrspace_size) - 2; } /* Derive the PAACE window count encoding for the subwindow count */ @@ -351,7 +351,7 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size, struct paace *ppaace; unsigned long fspi; - if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) { + if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) { pr_debug("window size too small or not a power of two %llx\n", win_size); return -EINVAL; } @@ -464,7 +464,7 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin, return -ENOENT; } - if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) { + if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) { pr_debug("subwindow size out of range, or not a power of 2\n"); return -EINVAL; } diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 93072ba44b1d..af47648301a9 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -301,7 +301,7 @@ static int check_size(u64 size, dma_addr_t iova) * Size must be a power of two and at least be equal * to PAMU page size. */ - if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) { + if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) { pr_debug("%s: size too small or not a power of two\n", __func__); return -EINVAL; } @@ -335,11 +335,6 @@ static struct fsl_dma_domain *iommu_alloc_dma_domain(void) return domain; } -static inline struct device_domain_info *find_domain(struct device *dev) -{ - return dev->archdata.iommu_domain; -} - static void remove_device_ref(struct device_domain_info *info, u32 win_cnt) { unsigned long flags; @@ -380,7 +375,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d * Check here if the device is already attached to domain or not. * If the device is already attached to a domain detach it. */ - old_domain_info = find_domain(dev); + old_domain_info = dev->archdata.iommu_domain; if (old_domain_info && old_domain_info->domain != dma_domain) { spin_unlock_irqrestore(&device_domain_lock, flags); detach_device(dev, old_domain_info->domain); @@ -399,7 +394,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d * the info for the first LIODN as all * LIODNs share the same domain */ - if (!old_domain_info) + if (!dev->archdata.iommu_domain) dev->archdata.iommu_domain = info; spin_unlock_irqrestore(&device_domain_lock, flags); @@ -1042,12 +1037,15 @@ root_bus: group = get_shared_pci_device_group(pdev); } + if (!group) + group = ERR_PTR(-ENODEV); + return group; } static int fsl_pamu_add_device(struct device *dev) { - struct iommu_group *group = NULL; + struct iommu_group *group = ERR_PTR(-ENODEV); struct pci_dev *pdev; const u32 *prop; int ret, len; @@ -1070,7 +1068,7 @@ static int fsl_pamu_add_device(struct device *dev) group = get_device_iommu_group(dev); } - if (!group || IS_ERR(group)) + if (IS_ERR(group)) return PTR_ERR(group); ret = iommu_group_add_device(group, dev); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 7e11c9d6ae8c..7c131cf7cc13 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -42,6 +42,7 @@ #include <linux/irqchip/chained_irq.h> #include <linux/irqchip/arm-gic.h> +#include <asm/cputype.h> #include <asm/irq.h> #include <asm/exception.h> #include <asm/smp_plat.h> @@ -954,7 +955,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, } for_each_possible_cpu(cpu) { - unsigned long offset = percpu_offset * cpu_logical_map(cpu); + u32 mpidr = cpu_logical_map(cpu); + u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); + unsigned long offset = percpu_offset * core_id; *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; } @@ -1071,8 +1074,10 @@ gic_of_init(struct device_node *node, struct device_node *parent) gic_cnt++; return 0; } +IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init); IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); +IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); diff --git a/drivers/isdn/hisax/l3ni1.c b/drivers/isdn/hisax/l3ni1.c index 0df6691d045c..8dc791bfaa6f 100644 --- a/drivers/isdn/hisax/l3ni1.c +++ b/drivers/isdn/hisax/l3ni1.c @@ -2059,13 +2059,17 @@ static int l3ni1_cmd_global(struct PStack *st, isdn_ctrl *ic) memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */ l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */ - if (ic->parm.ni1_io.timeout > 0) - if (!(pc = ni1_new_l3_process(st, -1))) - { free_invoke_id(st, id); + if (ic->parm.ni1_io.timeout > 0) { + pc = ni1_new_l3_process(st, -1); + if (!pc) { + free_invoke_id(st, id); return (-2); } - pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; /* remember id */ - pc->prot.ni1.proc = ic->parm.ni1_io.proc; /* and procedure */ + /* remember id */ + pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; + /* and procedure */ + pc->prot.ni1.proc = ic->parm.ni1_io.proc; + } if (!(skb = l3_alloc_skb(l))) { free_invoke_id(st, id); diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 61ac63237446..62f0688d45a5 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -442,7 +442,7 @@ static int get_filter(void __user *arg, struct sock_filter **p) { struct sock_fprog uprog; struct sock_filter *code = NULL; - int len, err; + int len; if (copy_from_user(&uprog, arg, sizeof(uprog))) return -EFAULT; @@ -458,12 +458,6 @@ static int get_filter(void __user *arg, struct sock_filter **p) if (IS_ERR(code)) return PTR_ERR(code); - err = sk_chk_filter(code, uprog.len); - if (err) { - kfree(code); - return err; - } - *p = code; return uprog.len; } @@ -644,9 +638,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg) fprog.len = len; fprog.filter = code; - if (is->pass_filter) + if (is->pass_filter) { sk_unattached_filter_destroy(is->pass_filter); - err = sk_unattached_filter_create(&is->pass_filter, &fprog); + is->pass_filter = NULL; + } + if (fprog.filter != NULL) + err = sk_unattached_filter_create(&is->pass_filter, + &fprog); + else + err = 0; kfree(code); return err; @@ -663,9 +663,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg) fprog.len = len; fprog.filter = code; - if (is->active_filter) + if (is->active_filter) { sk_unattached_filter_destroy(is->active_filter); - err = sk_unattached_filter_create(&is->active_filter, &fprog); + is->active_filter = NULL; + } + if (fprog.filter != NULL) + err = sk_unattached_filter_create(&is->active_filter, + &fprog); + else + err = 0; kfree(code); return err; diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 4ead4ba60656..d2899e7eb3aa 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -425,6 +425,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd) disk_super = dm_block_data(sblock); + /* Verify the data block size hasn't changed */ + if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) { + DMERR("changing the data block size (from %u to %llu) is not supported", + le32_to_cpu(disk_super->data_block_size), + (unsigned long long)cmd->data_block_size); + r = -EINVAL; + goto bad; + } + r = __check_incompat_features(disk_super, cmd); if (r < 0) goto bad; diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index b086a945edcb..e9d33ad59df5 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -613,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd) disk_super = dm_block_data(sblock); + /* Verify the data block size hasn't changed */ + if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) { + DMERR("changing the data block size (from %u to %llu) is not supported", + le32_to_cpu(disk_super->data_block_size), + (unsigned long long)pmd->data_block_size); + r = -EINVAL; + goto bad_unlock_sblock; + } + r = __check_incompat_features(disk_super, pmd); if (r < 0) goto bad_unlock_sblock; diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c index 8637d2ed7623..2e3cdcfa0a67 100644 --- a/drivers/media/dvb-frontends/si2168.c +++ b/drivers/media/dvb-frontends/si2168.c @@ -60,7 +60,7 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd) jiffies_to_msecs(jiffies) - (jiffies_to_msecs(timeout) - TIMEOUT)); - if (!(cmd->args[0] >> 7) & 0x01) { + if (!((cmd->args[0] >> 7) & 0x01)) { ret = -ETIMEDOUT; goto err_mutex_unlock; } @@ -485,20 +485,6 @@ static int si2168_init(struct dvb_frontend *fe) if (ret) goto err; - cmd.args[0] = 0x05; - cmd.args[1] = 0x00; - cmd.args[2] = 0xaa; - cmd.args[3] = 0x4d; - cmd.args[4] = 0x56; - cmd.args[5] = 0x40; - cmd.args[6] = 0x00; - cmd.args[7] = 0x00; - cmd.wlen = 8; - cmd.rlen = 1; - ret = si2168_cmd_execute(s, &cmd); - if (ret) - goto err; - /* cold state - try to download firmware */ dev_info(&s->client->dev, "%s: found a '%s' in cold state\n", KBUILD_MODNAME, si2168_ops.info.name); diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h index 2a343e896f40..53f7f06ae343 100644 --- a/drivers/media/dvb-frontends/si2168_priv.h +++ b/drivers/media/dvb-frontends/si2168_priv.h @@ -22,7 +22,7 @@ #include <linux/firmware.h> #include <linux/i2c-mux.h> -#define SI2168_FIRMWARE "dvb-demod-si2168-01.fw" +#define SI2168_FIRMWARE "dvb-demod-si2168-02.fw" /* state struct */ struct si2168 { diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c index 522fe00f5eee..9619be5d4827 100644 --- a/drivers/media/dvb-frontends/tda10071.c +++ b/drivers/media/dvb-frontends/tda10071.c @@ -668,6 +668,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe) struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret, i; u8 mode, rolloff, pilot, inversion, div; + fe_modulation_t modulation; dev_dbg(&priv->i2c->dev, "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n", @@ -702,10 +703,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe) switch (c->delivery_system) { case SYS_DVBS: + modulation = QPSK; rolloff = 0; pilot = 2; break; case SYS_DVBS2: + modulation = c->modulation; + switch (c->rolloff) { case ROLLOFF_20: rolloff = 2; @@ -750,7 +754,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe) for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) { if (c->delivery_system == TDA10071_MODCOD[i].delivery_system && - c->modulation == TDA10071_MODCOD[i].modulation && + modulation == TDA10071_MODCOD[i].modulation && c->fec_inner == TDA10071_MODCOD[i].fec) { mode = TDA10071_MODCOD[i].val; dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n", @@ -834,10 +838,10 @@ static int tda10071_get_frontend(struct dvb_frontend *fe) switch ((buf[1] >> 0) & 0x01) { case 0: - c->inversion = INVERSION_OFF; + c->inversion = INVERSION_ON; break; case 1: - c->inversion = INVERSION_ON; + c->inversion = INVERSION_OFF; break; } @@ -856,7 +860,7 @@ static int tda10071_get_frontend(struct dvb_frontend *fe) if (ret) goto error; - c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0); + c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000; return ret; error: diff --git a/drivers/media/dvb-frontends/tda10071_priv.h b/drivers/media/dvb-frontends/tda10071_priv.h index 4baf14bfb65a..420486192736 100644 --- a/drivers/media/dvb-frontends/tda10071_priv.h +++ b/drivers/media/dvb-frontends/tda10071_priv.h @@ -55,6 +55,7 @@ static struct tda10071_modcod { { SYS_DVBS2, QPSK, FEC_8_9, 0x0a }, { SYS_DVBS2, QPSK, FEC_9_10, 0x0b }, /* 8PSK */ + { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00 }, { SYS_DVBS2, PSK_8, FEC_3_5, 0x0c }, { SYS_DVBS2, PSK_8, FEC_2_3, 0x0d }, { SYS_DVBS2, PSK_8, FEC_3_4, 0x0e }, diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c index e65c760e4e8b..0006d6bf8c18 100644 --- a/drivers/media/pci/saa7134/saa7134-empress.c +++ b/drivers/media/pci/saa7134/saa7134-empress.c @@ -179,7 +179,7 @@ static const struct v4l2_file_operations ts_fops = .read = vb2_fop_read, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops ts_ioctl_ops = { diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index a7ed16497903..1e4ec697fb10 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -269,6 +269,7 @@ err: list_del(&buf->list); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); } + spin_unlock_irqrestore(&common->irqlock, flags); return ret; } diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c index 5bb085b19bcb..b431b58f39e3 100644 --- a/drivers/media/platform/davinci/vpif_display.c +++ b/drivers/media/platform/davinci/vpif_display.c @@ -233,6 +233,7 @@ err: list_del(&buf->list); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); } + spin_unlock_irqrestore(&common->irqlock, flags); return ret; } diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c index 271a752cee54..fa4cc7b880aa 100644 --- a/drivers/media/tuners/si2157.c +++ b/drivers/media/tuners/si2157.c @@ -57,7 +57,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd) jiffies_to_msecs(jiffies) - (jiffies_to_msecs(timeout) - TIMEOUT)); - if (!(buf[0] >> 7) & 0x01) { + if (!((buf[0] >> 7) & 0x01)) { ret = -ETIMEDOUT; goto err_mutex_unlock; } else { diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 021e4d35e4d7..7b9b75f60774 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -704,15 +704,41 @@ static int af9035_read_config(struct dvb_usb_device *d) if (ret < 0) goto err; - if (tmp == 0x00) - dev_dbg(&d->udev->dev, - "%s: [%d]tuner not set, using default\n", - __func__, i); - else + dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n", + __func__, i, tmp); + + /* tuner sanity check */ + if (state->chip_type == 0x9135) { + if (state->chip_version == 0x02) { + /* IT9135 BX (v2) */ + switch (tmp) { + case AF9033_TUNER_IT9135_60: + case AF9033_TUNER_IT9135_61: + case AF9033_TUNER_IT9135_62: + state->af9033_config[i].tuner = tmp; + break; + } + } else { + /* IT9135 AX (v1) */ + switch (tmp) { + case AF9033_TUNER_IT9135_38: + case AF9033_TUNER_IT9135_51: + case AF9033_TUNER_IT9135_52: + state->af9033_config[i].tuner = tmp; + break; + } + } + } else { + /* AF9035 */ state->af9033_config[i].tuner = tmp; + } - dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n", - __func__, i, state->af9033_config[i].tuner); + if (state->af9033_config[i].tuner != tmp) { + dev_info(&d->udev->dev, + "%s: [%d] overriding tuner from %02x to %02x\n", + KBUILD_MODNAME, i, tmp, + state->af9033_config[i].tuner); + } switch (state->af9033_config[i].tuner) { case AF9033_TUNER_TUA9001: diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c index 2fd1c5e31a0f..339adce7c7a5 100644 --- a/drivers/media/usb/gspca/pac7302.c +++ b/drivers/media/usb/gspca/pac7302.c @@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x093a, 0x2620)}, {USB_DEVICE(0x093a, 0x2621)}, {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP}, + {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x2625)}, {USB_DEVICE(0x093a, 0x2626)}, diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c index 0500c4175d5f..6bce01a674f9 100644 --- a/drivers/media/usb/hdpvr/hdpvr-video.c +++ b/drivers/media/usb/hdpvr/hdpvr-video.c @@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb) } /*=========================================================================*/ -/* bufffer bits */ +/* buffer bits */ /* function expects dev->io_mutex to be hold by caller */ int hdpvr_cancel_queue(struct hdpvr_device *dev) @@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl) case V4L2_CID_MPEG_AUDIO_ENCODING: if (dev->flags & HDPVR_FLAG_AC3_CAP) { opt->audio_codec = ctrl->val; - return hdpvr_set_audio(dev, opt->audio_input, + return hdpvr_set_audio(dev, opt->audio_input + 1, opt->audio_codec); } return 0; @@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent, v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, V4L2_CID_MPEG_AUDIO_ENCODING, ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC, - 0x7, V4L2_MPEG_AUDIO_ENCODING_AAC); + 0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC); v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, V4L2_CID_MPEG_VIDEO_ENCODING, V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3, diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c index 4ae54caadd03..ce1c9f5d9dee 100644 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -610,10 +610,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait) aspect.denominator = 9; } else if (ratio == 34) { aspect.numerator = 4; - aspect.numerator = 3; + aspect.denominator = 3; } else if (ratio == 68) { aspect.numerator = 15; - aspect.numerator = 9; + aspect.denominator = 9; } else { aspect.numerator = hor_landscape + 99; aspect.denominator = 100; diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index e4ec355704a6..a7543ba3e190 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -52,6 +52,11 @@ /* Atmel chips */ #define AT49BV640D 0x02de #define AT49BV640DT 0x02db +/* Sharp chips */ +#define LH28F640BFHE_PTTL90 0x00b0 +#define LH28F640BFHE_PBTL90 0x00b1 +#define LH28F640BFHE_PTTL70A 0x00b2 +#define LH28F640BFHE_PBTL70A 0x00b3 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); @@ -258,6 +263,36 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd) (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e; }; +static int is_LH28F640BF(struct cfi_private *cfi) +{ + /* Sharp LH28F640BF Family */ + if (cfi->mfr == CFI_MFR_SHARP && ( + cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 || + cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A)) + return 1; + return 0; +} + +static void fixup_LH28F640BF(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + struct cfi_pri_intelext *extp = cfi->cmdset_priv; + + /* Reset the Partition Configuration Register on LH28F640BF + * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */ + if (is_LH28F640BF(cfi)) { + printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n"); + map_write(map, CMD(0x60), 0); + map_write(map, CMD(0x04), 0); + + /* We have set one single partition thus + * Simultaneous Operations are not allowed */ + printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n"); + extp->FeatureSupport &= ~512; + } +} + static void fixup_use_point(struct mtd_info *mtd) { struct map_info *map = mtd->priv; @@ -309,6 +344,8 @@ static struct cfi_fixup cfi_fixup_table[] = { { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct }, { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb }, { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock }, + { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock }, + { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF }, { 0, 0, NULL } }; @@ -1649,6 +1686,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, initial_adr = adr; cmd_adr = adr & ~(wbufsize-1); + /* Sharp LH28F640BF chips need the first address for the + * Page Buffer Program command. See Table 5 of + * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */ + if (is_LH28F640BF(cfi)) + cmd_adr = adr; + /* Let's determine this according to the interleave only once */ write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9); diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c index 7df86948e6d4..b4f61c7fc161 100644 --- a/drivers/mtd/devices/elm.c +++ b/drivers/mtd/devices/elm.c @@ -475,6 +475,7 @@ static int elm_context_save(struct elm_info *info) ELM_SYNDROME_FRAGMENT_1 + offset); regs->elm_syndrome_fragment_0[i] = elm_read_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset); + break; default: return -EINVAL; } @@ -520,6 +521,7 @@ static int elm_context_restore(struct elm_info *info) regs->elm_syndrome_fragment_1[i]); elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset, regs->elm_syndrome_fragment_0[i]); + break; default: return -EINVAL; } diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 41167e9e991e..4f3e80c68a26 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -4047,8 +4047,10 @@ int nand_scan_tail(struct mtd_info *mtd) ecc->layout->oobavail += ecc->layout->oobfree[i].length; mtd->oobavail = ecc->layout->oobavail; - /* ECC sanity check: warn noisily if it's too weak */ - WARN_ON(!nand_ecc_strength_good(mtd)); + /* ECC sanity check: warn if it's too weak */ + if (!nand_ecc_strength_good(mtd)) + pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n", + mtd->name); /* * Set the number of read / write steps for one page depending on ECC diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index b04e7d059888..0431b46d9fd9 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -125,7 +125,7 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id, parent = *p; av = rb_entry(parent, struct ubi_ainf_volume, rb); - if (vol_id < av->vol_id) + if (vol_id > av->vol_id) p = &(*p)->rb_left; else p = &(*p)->rb_right; @@ -423,7 +423,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, pnum, err); ret = err > 0 ? UBI_BAD_FASTMAP : err; goto out; - } else if (ret == UBI_IO_BITFLIPS) + } else if (err == UBI_IO_BITFLIPS) scrub = 1; /* diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 3a451b6cd3d5..701f86cd5993 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4068,7 +4068,7 @@ static int bond_check_params(struct bond_params *params) } if (ad_select) { - bond_opt_initstr(&newval, lacp_rate); + bond_opt_initstr(&newval, ad_select); valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT), &newval); if (!valptr) { diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 141160ef249a..5776e503e4c5 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -654,13 +654,13 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) work_done = bcm_sysport_tx_reclaim(ring->priv, ring); - if (work_done < budget) { + if (work_done == 0) { napi_complete(napi); /* re-enable TX interrupt */ intrl2_1_mask_clear(ring->priv, BIT(ring->index)); } - return work_done; + return 0; } static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) @@ -1254,28 +1254,17 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv, usleep_range(1000, 2000); } -static inline int umac_reset(struct bcm_sysport_priv *priv) +static inline void umac_reset(struct bcm_sysport_priv *priv) { - unsigned int timeout = 0; u32 reg; - int ret = 0; - - umac_writel(priv, 0, UMAC_CMD); - while (timeout++ < 1000) { - reg = umac_readl(priv, UMAC_CMD); - if (!(reg & CMD_SW_RESET)) - break; - - udelay(1); - } - - if (timeout == 1000) { - dev_err(&priv->pdev->dev, - "timeout waiting for MAC to come out of reset\n"); - ret = -ETIMEDOUT; - } - return ret; + reg = umac_readl(priv, UMAC_CMD); + reg |= CMD_SW_RESET; + umac_writel(priv, reg, UMAC_CMD); + udelay(10); + reg = umac_readl(priv, UMAC_CMD); + reg &= ~CMD_SW_RESET; + umac_writel(priv, reg, UMAC_CMD); } static void umac_set_hw_addr(struct bcm_sysport_priv *priv, @@ -1303,11 +1292,7 @@ static int bcm_sysport_open(struct net_device *dev) int ret; /* Reset UniMAC */ - ret = umac_reset(priv); - if (ret) { - netdev_err(dev, "UniMAC reset failed\n"); - return ret; - } + umac_reset(priv); /* Flush TX and RX FIFOs at TOPCTRL level */ topctrl_flush(priv); @@ -1589,12 +1574,6 @@ static int bcm_sysport_probe(struct platform_device *pdev) BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); dev->needed_headroom += sizeof(struct bcm_tsb); - /* We are interfaced to a switch which handles the multicast - * filtering for us, so we do not support programming any - * multicast hash table in this Ethernet MAC. - */ - dev->flags &= ~IFF_MULTICAST; - /* libphy will adjust the link state accordingly */ netif_carrier_off(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 47c5814114e1..4b875da1c7ed 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -797,7 +797,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, return; } - bnx2x_frag_free(fp, new_data); + if (new_data) + bnx2x_frag_free(fp, new_data); drop: /* drop the packet and keep the buffer in the bin */ DP(NETIF_MSG_RX_STATUS, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 2887034523e0..6a8b1453a1b9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12937,7 +12937,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt) * without the default SB. * For VFs there is no default SB, then we return (index+1). */ - pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control); + pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control); index = control & PCI_MSIX_FLAGS_QSIZE; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 5ba1cfbd60da..16281ad2da12 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1408,13 +1408,6 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv) if (cb->skb) continue; - /* set the DMA descriptor length once and for all - * it will only change if we support dynamically sizing - * priv->rx_buf_len, but we do not - */ - dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr, - priv->rx_buf_len << DMA_BUFLENGTH_SHIFT); - ret = bcmgenet_rx_refill(priv, cb); if (ret) break; @@ -2535,14 +2528,17 @@ static int bcmgenet_probe(struct platform_device *pdev) netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); - err = register_netdev(dev); - if (err) - goto err_clk_disable; + /* libphy will determine the link state */ + netif_carrier_off(dev); /* Turn off the main clock, WOL clock is handled separately */ if (!IS_ERR(priv->clk)) clk_disable_unprepare(priv->clk); + err = register_netdev(dev); + if (err) + goto err; + return err; err_clk_disable: diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 0f117105fed1..e23c993b1362 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -331,9 +331,9 @@ struct bcmgenet_mib_counters { #define EXT_ENERGY_DET_MASK (1 << 12) #define EXT_RGMII_OOB_CTRL 0x0C -#define RGMII_MODE_EN (1 << 0) #define RGMII_LINK (1 << 4) #define OOB_DISABLE (1 << 5) +#define RGMII_MODE_EN (1 << 6) #define ID_MODE_DIS (1 << 16) #define EXT_GPHY_CTRL 0x1C diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 34a26e42f19d..1e187fb760f8 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2902,7 +2902,7 @@ static int be_open(struct net_device *netdev) for_all_evt_queues(adapter, eqo, i) { napi_enable(&eqo->napi); be_enable_busy_poll(eqo); - be_eq_notify(adapter, eqo->q.id, true, false, 0); + be_eq_notify(adapter, eqo->q.id, true, true, 0); } adapter->flags |= BE_FLAGS_NAPI_ENABLED; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index fab39e295441..36fc429298e3 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -2990,11 +2990,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) if (ug_info->rxExtendedFiltering) { size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; if (ug_info->largestexternallookupkeysize == - QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; if (ug_info->largestexternallookupkeysize == - QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) + QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES) size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; } diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index a2db388cc31e..ee74f9536b31 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1481,6 +1481,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) s32 ret_val; u16 i, rar_count = mac->rar_entry_count; + if ((hw->mac.type >= e1000_i210) && + !(igb_get_flash_presence_i210(hw))) { + ret_val = igb_pll_workaround_i210(hw); + if (ret_val) + return ret_val; + } + /* Initialize identification LED */ ret_val = igb_id_led_init(hw); if (ret_val) { diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 2a8bb35c2df2..217f8138851b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -46,14 +46,15 @@ #define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ /* Physical Func Reset Done Indication */ -#define E1000_CTRL_EXT_PFRSTD 0x00004000 -#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 -#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 -#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 -#define E1000_CTRL_EXT_EIAME 0x01000000 -#define E1000_CTRL_EXT_IRCA 0x00000001 +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 /* Interrupt delay cancellation */ /* Driver loaded bit for FW */ #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 @@ -62,6 +63,7 @@ /* packet buffer parity error detection enabled */ /* descriptor FIFO parity error detection enable */ #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 #define E1000_I2CCMD_REG_ADDR_SHIFT 16 #define E1000_I2CCMD_PHY_ADDR_SHIFT 24 #define E1000_I2CCMD_OPCODE_READ 0x08000000 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 89925e405849..ce55ea5d750c 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -567,4 +567,7 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw); /* These functions must be implemented by drivers */ s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); #endif /* _E1000_HW_H_ */ diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 337161f440dd..65d931669f81 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -834,3 +834,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw) } return ret_val; } + +/** + * igb_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +s32 igb_pll_workaround_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; + u16 nvm_word, phy_word, pci_word, tmp_nvm; + int i; + + /* Get and set needed register values */ + wuc = rd32(E1000_WUC); + mdicnfg = rd32(E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; + wr32(E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ + ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, + &nvm_word); + if (ret_val) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ + igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | + E1000_PHY_PLL_FREQ_REG), &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { + ret_val = 0; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); + wr32(E1000_CTRL_EXT, ctrl_ext); + + wr32(E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); + wr32(E1000_EEARBC_I210, reg_val); + + igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + usleep_range(1000, 2000); + pci_word &= ~E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); + wr32(E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ + wr32(E1000_WUC, wuc); + } + /* restore MDICNFG setting */ + wr32(E1000_MDICNFG, mdicnfg); + return ret_val; +} diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index 9f34976687ba..3442b6357d01 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -33,6 +33,7 @@ s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data); s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); s32 igb_init_nvm_params_i210(struct e1000_hw *hw); bool igb_get_flash_presence_i210(struct e1000_hw *hw); +s32 igb_pll_workaround_i210(struct e1000_hw *hw); #define E1000_STM_OPCODE 0xDB00 #define E1000_EEPROM_FLASH_SIZE_WORD 0x11 @@ -78,4 +79,15 @@ enum E1000_INVM_STRUCTURE_TYPE { #define NVM_LED_1_CFG_DEFAULT_I211 0x0184 #define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C +/* PLL Defines */ +#define E1000_PCI_PMCSR 0x44 +#define E1000_PCI_PMCSR_D3 0x03 +#define E1000_MAX_PLL_TRIES 5 +#define E1000_PHY_PLL_UNCONF 0xFF +#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000 +#define E1000_PHY_PLL_FREQ_REG 0x000E +#define E1000_INVM_DEFAULT_AL 0x202F +#define E1000_INVM_AUTOLOAD 0x0A +#define E1000_INVM_PLL_WO_VAL 0x0010 + #endif diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 1cc4b1a7e597..f5ba4e4eafb9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -66,6 +66,7 @@ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ #define E1000_PBS 0x01008 /* Packet Buffer Size */ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ #define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f145adbb55ac..a9537ba7a5a0 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -7215,6 +7215,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) } } +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) { struct igb_adapter *adapter = hw->back; @@ -7578,6 +7592,8 @@ static int igb_sriov_reinit(struct pci_dev *dev) if (netif_running(netdev)) igb_close(netdev); + else + igb_reset(adapter); igb_clear_interrupt_scheme(adapter); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 45beca17fa50..dadd9a5f6323 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1207,7 +1207,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; - if (l3_proto == swab16(ETH_P_IP)) + if (l3_proto == htons(ETH_P_IP)) command |= MVNETA_TXD_IP_CSUM; else command |= MVNETA_TX_L3_IP6; @@ -2529,7 +2529,7 @@ static void mvneta_adjust_link(struct net_device *ndev) if (phydev->speed == SPEED_1000) val |= MVNETA_GMAC_CONFIG_GMII_SPEED; - else + else if (phydev->speed == SPEED_100) val |= MVNETA_GMAC_CONFIG_MII_SPEED; mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 80f725228f5b..56022d647837 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -294,8 +294,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, init_completion(&cq->free); cq->irq = priv->eq_table.eq[cq->vector].irq; - cq->irq_affinity_change = false; - return 0; err_radix: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 4b2130760eed..82322b1c8411 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -128,11 +128,16 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n", name); } + } } else { cq->vector = (cq->ring + 1 + priv->port) % mdev->dev->caps.num_comp_vectors; } + + cq->irq_desc = + irq_to_desc(mlx4_eq_get_irq(mdev->dev, + cq->vector)); } else { /* For TX we use the same irq per ring we assigned for the RX */ @@ -187,8 +192,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); if (priv->mdev->dev->caps.comp_pool && cq->vector) { - if (!cq->is_tx) - irq_set_affinity_hint(cq->mcq.irq, NULL); mlx4_release_eq(priv->mdev->dev, cq->vector); } cq->vector = 0; @@ -204,6 +207,7 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) if (!cq->is_tx) { napi_hash_del(&cq->napi); synchronize_rcu(); + irq_set_affinity_hint(cq->mcq.irq, NULL); } netif_napi_del(&cq->napi); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index fa1a069e14e6..68d763d2d030 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -417,6 +417,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev, coal->tx_coalesce_usecs = priv->tx_usecs; coal->tx_max_coalesced_frames = priv->tx_frames; + coal->tx_max_coalesced_frames_irq = priv->tx_work_limit; + coal->rx_coalesce_usecs = priv->rx_usecs; coal->rx_max_coalesced_frames = priv->rx_frames; @@ -426,6 +428,7 @@ static int mlx4_en_get_coalesce(struct net_device *dev, coal->rx_coalesce_usecs_high = priv->rx_usecs_high; coal->rate_sample_interval = priv->sample_interval; coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal; + return 0; } @@ -434,6 +437,9 @@ static int mlx4_en_set_coalesce(struct net_device *dev, { struct mlx4_en_priv *priv = netdev_priv(dev); + if (!coal->tx_max_coalesced_frames_irq) + return -EINVAL; + priv->rx_frames = (coal->rx_max_coalesced_frames == MLX4_EN_AUTO_CONF) ? MLX4_EN_RX_COAL_TARGET : @@ -457,6 +463,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev, priv->rx_usecs_high = coal->rx_coalesce_usecs_high; priv->sample_interval = coal->rate_sample_interval; priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; + priv->tx_work_limit = coal->tx_max_coalesced_frames_irq; return mlx4_en_moderation_update(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 7d4fb7bf2593..7345c43b019e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2336,7 +2336,7 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev, struct mlx4_en_priv *priv = netdev_priv(dev); __be16 current_port; - if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)) + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) return; if (sa_family == AF_INET6) @@ -2473,6 +2473,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, MLX4_WQE_CTRL_SOLICITED); priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; priv->tx_ring_num = prof->tx_ring_num; + priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index d2d415732d99..5535862f27cc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -40,6 +40,7 @@ #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/vmalloc.h> +#include <linux/irq.h> #include "mlx4_en.h" @@ -782,6 +783,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud PKT_HASH_TYPE_L3); skb_record_rx_queue(gro_skb, cq->ring); + skb_mark_napi_id(gro_skb, &cq->napi); if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { timestamp = mlx4_en_get_cqe_ts(cqe); @@ -896,16 +898,25 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) /* If we used up all the quota - we're probably not done yet... */ if (done == budget) { + int cpu_curr; + const struct cpumask *aff; + INC_PERF_COUNTER(priv->pstats.napi_quota); - if (unlikely(cq->mcq.irq_affinity_change)) { - cq->mcq.irq_affinity_change = false; + + cpu_curr = smp_processor_id(); + aff = irq_desc_get_irq_data(cq->irq_desc)->affinity; + + if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) { + /* Current cpu is not according to smp_irq_affinity - + * probably affinity changed. need to stop this NAPI + * poll, and restart it on the right CPU + */ napi_complete(napi); mlx4_en_arm_cq(priv, cq); return 0; } } else { /* Done for now */ - cq->mcq.irq_affinity_change = false; napi_complete(napi); mlx4_en_arm_cq(priv, cq); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 8be7483f8236..5045bab59633 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -351,9 +351,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) return cnt; } -static int mlx4_en_process_tx_cq(struct net_device *dev, - struct mlx4_en_cq *cq, - int budget) +static bool mlx4_en_process_tx_cq(struct net_device *dev, + struct mlx4_en_cq *cq) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; @@ -372,9 +371,10 @@ static int mlx4_en_process_tx_cq(struct net_device *dev, int factor = priv->cqe_factor; u64 timestamp = 0; int done = 0; + int budget = priv->tx_work_limit; if (!priv->port_up) - return 0; + return true; index = cons_index & size_mask; cqe = &buf[(index << factor) + factor]; @@ -447,7 +447,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev, netif_tx_wake_queue(ring->tx_queue); ring->wake_queue++; } - return done; + return done < budget; } void mlx4_en_tx_irq(struct mlx4_cq *mcq) @@ -467,24 +467,16 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget) struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); struct net_device *dev = cq->dev; struct mlx4_en_priv *priv = netdev_priv(dev); - int done; + int clean_complete; - done = mlx4_en_process_tx_cq(dev, cq, budget); + clean_complete = mlx4_en_process_tx_cq(dev, cq); + if (!clean_complete) + return budget; - /* If we used up all the quota - we're probably not done yet... */ - if (done < budget) { - /* Done for now */ - cq->mcq.irq_affinity_change = false; - napi_complete(napi); - mlx4_en_arm_cq(priv, cq); - return done; - } else if (unlikely(cq->mcq.irq_affinity_change)) { - cq->mcq.irq_affinity_change = false; - napi_complete(napi); - mlx4_en_arm_cq(priv, cq); - return 0; - } - return budget; + napi_complete(napi); + mlx4_en_arm_cq(priv, cq); + + return 0; } static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index d954ec1eac17..2a004b347e1d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -53,11 +53,6 @@ enum { MLX4_EQ_ENTRY_SIZE = 0x20 }; -struct mlx4_irq_notify { - void *arg; - struct irq_affinity_notify notify; -}; - #define MLX4_EQ_STATUS_OK ( 0 << 28) #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) #define MLX4_EQ_OWNER_SW ( 0 << 24) @@ -1088,57 +1083,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev) iounmap(priv->clr_base); } -static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) -{ - struct mlx4_irq_notify *n = container_of(notify, - struct mlx4_irq_notify, - notify); - struct mlx4_priv *priv = (struct mlx4_priv *)n->arg; - struct radix_tree_iter iter; - void **slot; - - radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) { - struct mlx4_cq *cq = (struct mlx4_cq *)(*slot); - - if (cq->irq == notify->irq) - cq->irq_affinity_change = true; - } -} - -static void mlx4_release_irq_notifier(struct kref *ref) -{ - struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify, - notify.kref); - kfree(n); -} - -static void mlx4_assign_irq_notifier(struct mlx4_priv *priv, - struct mlx4_dev *dev, int irq) -{ - struct mlx4_irq_notify *irq_notifier = NULL; - int err = 0; - - irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL); - if (!irq_notifier) { - mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n", - irq); - return; - } - - irq_notifier->notify.irq = irq; - irq_notifier->notify.notify = mlx4_irq_notifier_notify; - irq_notifier->notify.release = mlx4_release_irq_notifier; - irq_notifier->arg = priv; - err = irq_set_affinity_notifier(irq, &irq_notifier->notify); - if (err) { - kfree(irq_notifier); - irq_notifier = NULL; - mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq); - } -} - - int mlx4_alloc_eq_table(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -1409,8 +1353,6 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, continue; /*we dont want to break here*/ } - mlx4_assign_irq_notifier(priv, dev, - priv->eq_table.eq[vec].irq); eq_set_ci(&priv->eq_table.eq[vec], 1); } @@ -1427,6 +1369,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, } EXPORT_SYMBOL(mlx4_assign_eq); +int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return priv->eq_table.eq[vec].irq; +} +EXPORT_SYMBOL(mlx4_eq_get_irq); + void mlx4_release_eq(struct mlx4_dev *dev, int vec) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -1438,9 +1388,6 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec) Belonging to a legacy EQ*/ mutex_lock(&priv->msix_ctl.pool_lock); if (priv->msix_ctl.pool_bm & 1ULL << i) { - irq_set_affinity_notifier( - priv->eq_table.eq[vec].irq, - NULL); free_irq(priv->eq_table.eq[vec].irq, &priv->eq_table.eq[vec]); priv->msix_ctl.pool_bm &= ~(1ULL << i); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 0e15295bedd6..d72a5a894fc6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -126,6 +126,8 @@ enum { #define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \ MLX4_EN_NUM_UP) +#define MLX4_EN_DEFAULT_TX_WORK 256 + /* Target number of packets to coalesce with interrupt moderation */ #define MLX4_EN_RX_COAL_TARGET 44 #define MLX4_EN_RX_COAL_TIME 0x10 @@ -343,6 +345,7 @@ struct mlx4_en_cq { #define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) spinlock_t poll_lock; /* protects from LLS/napi conflicts */ #endif /* CONFIG_NET_RX_BUSY_POLL */ + struct irq_desc *irq_desc; }; struct mlx4_en_port_profile { @@ -542,6 +545,7 @@ struct mlx4_en_priv { __be32 ctrl_flags; u32 flags; u8 num_tx_rings_p_up; + u32 tx_work_limit; u32 tx_ring_num; u32 rx_ring_num; u32 rx_skb_size; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index ba0401d4af50..184c3615f479 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -94,6 +94,11 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, write_lock_irq(&table->lock); err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr); write_unlock_irq(&table->lock); + if (err) { + mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n", + mlx5_base_mkey(mr->key), err); + mlx5_core_destroy_mkey(dev, mr); + } return err; } @@ -104,12 +109,22 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) struct mlx5_mr_table *table = &dev->priv.mr_table; struct mlx5_destroy_mkey_mbox_in in; struct mlx5_destroy_mkey_mbox_out out; + struct mlx5_core_mr *deleted_mr; unsigned long flags; int err; memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); + write_lock_irqsave(&table->lock, flags); + deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key)); + write_unlock_irqrestore(&table->lock, flags); + if (!deleted_mr) { + mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n", + mlx5_base_mkey(mr->key)); + return -ENOENT; + } + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); @@ -119,10 +134,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr) if (out.hdr.status) return mlx5_cmd_status_to_err(&out.hdr); - write_lock_irqsave(&table->lock, flags); - radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key)); - write_unlock_irqrestore(&table->lock, flags); - return err; } EXPORT_SYMBOL(mlx5_core_destroy_mkey); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index be425ad5e824..61623e9af574 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -538,6 +538,7 @@ enum rtl_register_content { MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Rdy_to_L23 = (1 << 1), /* L23 Enable */ Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ /* Config4 register */ @@ -4239,6 +4240,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); break; case RTL_GIGA_MAC_VER_40: + RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: @@ -4897,6 +4900,21 @@ static void rtl_enable_clock_request(struct pci_dev *pdev) PCI_EXP_LNKCTL_CLKREQ_EN); } +static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) +{ + void __iomem *ioaddr = tp->mmio_addr; + u8 data; + + data = RTL_R8(Config3); + + if (enable) + data |= Rdy_to_L23; + else + data &= ~Rdy_to_L23; + + RTL_W8(Config3, data); +} + #define R8168_CPCMD_QUIRK_MASK (\ EnableBist | \ Mac_dbgo_oe | \ @@ -5246,6 +5264,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp) }; rtl_hw_start_8168f(tp); + rtl_pcie_state_l2l3_enable(tp, false); rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); @@ -5284,6 +5303,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); } static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) @@ -5536,6 +5557,8 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); + + rtl_pcie_state_l2l3_enable(tp, false); } static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) @@ -5571,6 +5594,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); } static void rtl_hw_start_8106(struct rtl8169_private *tp) @@ -5583,6 +5608,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + + rtl_pcie_state_l2l3_enable(tp, false); } static void rtl_hw_start_8101(struct net_device *dev) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index b3e148ef5683..9d3748361a1e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -320,11 +320,8 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw) static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart) { - u32 value; - - value = readl(ioaddr + GMAC_AN_CTRL); /* auto negotiation enable and External Loopback enable */ - value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; + u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; if (restart) value |= GMAC_AN_CTRL_RAN; diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 7e6628a91514..1e2bcf5f89e1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -145,7 +145,7 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, x->rx_msg_type_delay_req++; else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP) x->rx_msg_type_delay_resp++; - else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) + else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ) x->rx_msg_type_pdelay_req++; else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP) x->rx_msg_type_pdelay_resp++; diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 1c24a8f368bd..fd411d6e19a2 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac) return vp; } +static void vnet_cleanup(void) +{ + struct vnet *vp; + struct net_device *dev; + + mutex_lock(&vnet_list_mutex); + while (!list_empty(&vnet_list)) { + vp = list_first_entry(&vnet_list, struct vnet, list); + list_del(&vp->list); + dev = vp->dev; + /* vio_unregister_driver() should have cleaned up port_list */ + BUG_ON(!list_empty(&vp->port_list)); + unregister_netdev(dev); + free_netdev(dev); + } + mutex_unlock(&vnet_list_mutex); +} + static const char *local_mac_prop = "local-mac-address"; static struct vnet *vnet_find_parent(struct mdesc_handle *hp, @@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_dev *vdev) kfree(port); - unregister_netdev(vp->dev); } return 0; } @@ -1268,6 +1285,7 @@ static int __init vnet_init(void) static void __exit vnet_exit(void) { vio_unregister_driver(&vnet_port_driver); + vnet_cleanup(); } module_init(vnet_init); diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index eb78203cd58e..2aa57270838f 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -291,7 +291,11 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type); static int dfx_rcv_init(DFX_board_t *bp, int get_buffers); static void dfx_rcv_queue_process(DFX_board_t *bp); +#ifdef DYNAMIC_BUFFERS static void dfx_rcv_flush(DFX_board_t *bp); +#else +static inline void dfx_rcv_flush(DFX_board_t *bp) {} +#endif static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, struct net_device *dev); @@ -2849,7 +2853,7 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type) * Align an sk_buff to a boundary power of 2 * */ - +#ifdef DYNAMIC_BUFFERS static void my_skb_align(struct sk_buff *skb, int n) { unsigned long x = (unsigned long)skb->data; @@ -2859,7 +2863,7 @@ static void my_skb_align(struct sk_buff *skb, int n) skb_reserve(skb, v - x); } - +#endif /* * ================ @@ -3074,10 +3078,7 @@ static void dfx_rcv_queue_process( break; } else { -#ifndef DYNAMIC_BUFFERS - if (! rx_in_place) -#endif - { + if (!rx_in_place) { /* Receive buffer allocated, pass receive packet up */ skb_copy_to_linear_data(skb, @@ -3453,10 +3454,6 @@ static void dfx_rcv_flush( DFX_board_t *bp ) } } -#else -static inline void dfx_rcv_flush( DFX_board_t *bp ) -{ -} #endif /* DYNAMIC_BUFFERS */ /* diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 6a999e6814a0..9408157a246c 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1323,15 +1323,15 @@ static bool dp83640_rxtstamp(struct phy_device *phydev, { struct dp83640_private *dp83640 = phydev->priv; - if (!dp83640->hwts_rx_en) - return false; - if (is_status_frame(skb, type)) { decode_status_frame(dp83640, skb); kfree_skb(skb); return true; } + if (!dp83640->hwts_rx_en) + return false; + SKB_PTP_TYPE(skb) = type; skb_queue_tail(&dp83640->rx_queue, skb); schedule_work(&dp83640->ts_work); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 2e58aa54484c..4eaadcfcb0fe 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -187,6 +187,50 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np) return d ? to_mii_bus(d) : NULL; } EXPORT_SYMBOL(of_mdio_find_bus); + +/* Walk the list of subnodes of a mdio bus and look for a node that matches the + * phy's address with its 'reg' property. If found, set the of_node pointer for + * the phy. This allows auto-probed pyh devices to be supplied with information + * passed in via DT. + */ +static void of_mdiobus_link_phydev(struct mii_bus *mdio, + struct phy_device *phydev) +{ + struct device *dev = &phydev->dev; + struct device_node *child; + + if (dev->of_node || !mdio->dev.of_node) + return; + + for_each_available_child_of_node(mdio->dev.of_node, child) { + int addr; + int ret; + + ret = of_property_read_u32(child, "reg", &addr); + if (ret < 0) { + dev_err(dev, "%s has invalid PHY address\n", + child->full_name); + continue; + } + + /* A PHY must have a reg property in the range [0-31] */ + if (addr >= PHY_MAX_ADDR) { + dev_err(dev, "%s PHY address %i is too large\n", + child->full_name, addr); + continue; + } + + if (addr == phydev->addr) { + dev->of_node = child; + return; + } + } +} +#else /* !IS_ENABLED(CONFIG_OF_MDIO) */ +static inline void of_mdiobus_link_phydev(struct mii_bus *mdio, + struct phy_device *phydev) +{ +} #endif /** diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 91d6c1272fcf..d5b77ef3a210 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -539,7 +539,7 @@ static int get_filter(void __user *arg, struct sock_filter **p) { struct sock_fprog uprog; struct sock_filter *code = NULL; - int len, err; + int len; if (copy_from_user(&uprog, arg, sizeof(uprog))) return -EFAULT; @@ -554,12 +554,6 @@ static int get_filter(void __user *arg, struct sock_filter **p) if (IS_ERR(code)) return PTR_ERR(code); - err = sk_chk_filter(code, uprog.len); - if (err) { - kfree(code); - return err; - } - *p = code; return uprog.len; } @@ -763,10 +757,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) }; ppp_lock(ppp); - if (ppp->pass_filter) + if (ppp->pass_filter) { sk_unattached_filter_destroy(ppp->pass_filter); - err = sk_unattached_filter_create(&ppp->pass_filter, - &fprog); + ppp->pass_filter = NULL; + } + if (fprog.filter != NULL) + err = sk_unattached_filter_create(&ppp->pass_filter, + &fprog); + else + err = 0; kfree(code); ppp_unlock(ppp); } @@ -784,10 +783,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) }; ppp_lock(ppp); - if (ppp->active_filter) + if (ppp->active_filter) { sk_unattached_filter_destroy(ppp->active_filter); - err = sk_unattached_filter_create(&ppp->active_filter, - &fprog); + ppp->active_filter = NULL; + } + if (fprog.filter != NULL) + err = sk_unattached_filter_create(&ppp->active_filter, + &fprog); + else + err = 0; kfree(code); ppp_unlock(ppp); } diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 2ea7efd11857..6c9c16d76935 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.hdrlen = (sizeof(struct pppoe_hdr) + dev->hard_header_len); - po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr); + po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2; po->chan.private = sk; po->chan.ops = &pppoe_chan_ops; diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index a3a05869309d..a4272ed62da8 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -258,10 +258,8 @@ struct hso_serial { * so as not to drop characters on the floor. */ int curr_rx_urb_idx; - u16 curr_rx_urb_offset; u8 rx_urb_filled[MAX_RX_URBS]; struct tasklet_struct unthrottle_tasklet; - struct work_struct retry_unthrottle_workqueue; }; struct hso_device { @@ -1252,14 +1250,6 @@ static void hso_unthrottle(struct tty_struct *tty) tasklet_hi_schedule(&serial->unthrottle_tasklet); } -static void hso_unthrottle_workfunc(struct work_struct *work) -{ - struct hso_serial *serial = - container_of(work, struct hso_serial, - retry_unthrottle_workqueue); - hso_unthrottle_tasklet(serial); -} - /* open the requested serial port */ static int hso_serial_open(struct tty_struct *tty, struct file *filp) { @@ -1295,8 +1285,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) tasklet_init(&serial->unthrottle_tasklet, (void (*)(unsigned long))hso_unthrottle_tasklet, (unsigned long)serial); - INIT_WORK(&serial->retry_unthrottle_workqueue, - hso_unthrottle_workfunc); result = hso_start_serial_device(serial->parent, GFP_KERNEL); if (result) { hso_stop_serial_device(serial->parent); @@ -1345,7 +1333,6 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp) if (!usb_gone) hso_stop_serial_device(serial->parent); tasklet_kill(&serial->unthrottle_tasklet); - cancel_work_sync(&serial->retry_unthrottle_workqueue); } if (!usb_gone) @@ -2013,8 +2000,7 @@ static void ctrl_callback(struct urb *urb) static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial) { struct tty_struct *tty; - int write_length_remaining = 0; - int curr_write_len; + int count; /* Sanity check */ if (urb == NULL || serial == NULL) { @@ -2024,29 +2010,28 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial) tty = tty_port_tty_get(&serial->port); + if (tty && test_bit(TTY_THROTTLED, &tty->flags)) { + tty_kref_put(tty); + return -1; + } + /* Push data to tty */ - write_length_remaining = urb->actual_length - - serial->curr_rx_urb_offset; D1("data to push to tty"); - while (write_length_remaining) { - if (tty && test_bit(TTY_THROTTLED, &tty->flags)) { - tty_kref_put(tty); - return -1; - } - curr_write_len = tty_insert_flip_string(&serial->port, - urb->transfer_buffer + serial->curr_rx_urb_offset, - write_length_remaining); - serial->curr_rx_urb_offset += curr_write_len; - write_length_remaining -= curr_write_len; + count = tty_buffer_request_room(&serial->port, urb->actual_length); + if (count >= urb->actual_length) { + tty_insert_flip_string(&serial->port, urb->transfer_buffer, + urb->actual_length); tty_flip_buffer_push(&serial->port); + } else { + dev_warn(&serial->parent->usb->dev, + "dropping data, %d bytes lost\n", urb->actual_length); } + tty_kref_put(tty); - if (write_length_remaining == 0) { - serial->curr_rx_urb_offset = 0; - serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; - } - return write_length_remaining; + serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; + + return 0; } @@ -2217,7 +2202,6 @@ static int hso_stop_serial_device(struct hso_device *hso_dev) } } serial->curr_rx_urb_idx = 0; - serial->curr_rx_urb_offset = 0; if (serial->tx_urb) usb_kill_urb(serial->tx_urb); diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c index 5d95a13dbe2a..735f7dadb9a0 100644 --- a/drivers/net/usb/huawei_cdc_ncm.c +++ b/drivers/net/usb/huawei_cdc_ncm.c @@ -194,6 +194,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = { { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76), .driver_info = (unsigned long)&huawei_cdc_ncm_info, }, + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16), + .driver_info = (unsigned long)&huawei_cdc_ncm_info, + }, /* Terminating entry */ { diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index cf62d7e8329f..22756db53dca 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -667,6 +667,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, + {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ @@ -741,6 +742,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, {QMI_FIXED_INTF(0x19d2, 0x1425, 2)}, {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ + {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ @@ -756,6 +758,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */ {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ + {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 25431965a625..7bad2d316637 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1359,7 +1359,7 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb, struct sk_buff_head seg_list; struct sk_buff *segs, *nskb; - features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO); + features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); segs = skb_gso_segment(skb, features); if (IS_ERR(segs) || !segs) goto drop; @@ -3204,8 +3204,13 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev, struct r8152 *tp = netdev_priv(dev); struct tally_counter tally; + if (usb_autopm_get_interface(tp->intf) < 0) + return; + generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA); + usb_autopm_put_interface(tp->intf); + data[0] = le64_to_cpu(tally.tx_packets); data[1] = le64_to_cpu(tally.rx_packets); data[2] = le64_to_cpu(tally.tx_errors); diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 424db65e4396..d07bf4cb893f 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1714,6 +1714,18 @@ static int smsc95xx_resume(struct usb_interface *intf) return ret; } +static int smsc95xx_reset_resume(struct usb_interface *intf) +{ + struct usbnet *dev = usb_get_intfdata(intf); + int ret; + + ret = smsc95xx_reset(dev); + if (ret < 0) + return ret; + + return smsc95xx_resume(intf); +} + static void smsc95xx_rx_csum_offload(struct sk_buff *skb) { skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2); @@ -2004,7 +2016,7 @@ static struct usb_driver smsc95xx_driver = { .probe = usbnet_probe, .suspend = smsc95xx_suspend, .resume = smsc95xx_resume, - .reset_resume = smsc95xx_resume, + .reset_resume = smsc95xx_reset_resume, .disconnect = usbnet_disconnect, .disable_hub_initiated_lpm = 1, .supports_autosuspend = 1, diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 93ace042d0aa..1f041271f7fe 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2363,7 +2363,7 @@ static char *type_strings[] = { "FarSync TE1" }; -static void +static int fst_init_card(struct fst_card_info *card) { int i; @@ -2374,24 +2374,21 @@ fst_init_card(struct fst_card_info *card) * we'll have to revise it in some way then. */ for (i = 0; i < card->nports; i++) { - err = register_hdlc_device(card->ports[i].dev); - if (err < 0) { - int j; + err = register_hdlc_device(card->ports[i].dev); + if (err < 0) { pr_err("Cannot register HDLC device for port %d (errno %d)\n", - i, -err); - for (j = i; j < card->nports; j++) { - free_netdev(card->ports[j].dev); - card->ports[j].dev = NULL; - } - card->nports = i; - break; - } + i, -err); + while (i--) + unregister_hdlc_device(card->ports[i].dev); + return err; + } } pr_info("%s-%s: %s IRQ%d, %d ports\n", port_to_dev(&card->ports[0])->name, port_to_dev(&card->ports[card->nports - 1])->name, type_strings[card->type], card->irq, card->nports); + return 0; } static const struct net_device_ops fst_ops = { @@ -2447,15 +2444,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Try to enable the device */ if ((err = pci_enable_device(pdev)) != 0) { pr_err("Failed to enable card. Err %d\n", -err); - kfree(card); - return err; + goto enable_fail; } if ((err = pci_request_regions(pdev, "FarSync")) !=0) { pr_err("Failed to allocate regions. Err %d\n", -err); - pci_disable_device(pdev); - kfree(card); - return err; + goto regions_fail; } /* Get virtual addresses of memory regions */ @@ -2464,30 +2458,21 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) card->phys_ctlmem = pci_resource_start(pdev, 3); if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) { pr_err("Physical memory remap failed\n"); - pci_release_regions(pdev); - pci_disable_device(pdev); - kfree(card); - return -ENODEV; + err = -ENODEV; + goto ioremap_physmem_fail; } if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) { pr_err("Control memory remap failed\n"); - pci_release_regions(pdev); - pci_disable_device(pdev); - iounmap(card->mem); - kfree(card); - return -ENODEV; + err = -ENODEV; + goto ioremap_ctlmem_fail; } dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem); /* Register the interrupt handler */ if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) { pr_err("Unable to register interrupt %d\n", card->irq); - pci_release_regions(pdev); - pci_disable_device(pdev); - iounmap(card->ctlmem); - iounmap(card->mem); - kfree(card); - return -ENODEV; + err = -ENODEV; + goto irq_fail; } /* Record info we need */ @@ -2513,13 +2498,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) while (i--) free_netdev(card->ports[i].dev); pr_err("FarSync: out of memory\n"); - free_irq(card->irq, card); - pci_release_regions(pdev); - pci_disable_device(pdev); - iounmap(card->ctlmem); - iounmap(card->mem); - kfree(card); - return -ENODEV; + err = -ENOMEM; + goto hdlcdev_fail; } card->ports[i].dev = dev; card->ports[i].card = card; @@ -2565,9 +2545,16 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, card); /* Remainder of card setup */ + if (no_of_cards_added >= FST_MAX_CARDS) { + pr_err("FarSync: too many cards\n"); + err = -ENOMEM; + goto card_array_fail; + } fst_card_array[no_of_cards_added] = card; card->card_no = no_of_cards_added++; /* Record instance and bump it */ - fst_init_card(card); + err = fst_init_card(card); + if (err) + goto init_card_fail; if (card->family == FST_FAMILY_TXU) { /* * Allocate a dma buffer for transmit and receives @@ -2577,29 +2564,46 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) &card->rx_dma_handle_card); if (card->rx_dma_handle_host == NULL) { pr_err("Could not allocate rx dma buffer\n"); - fst_disable_intr(card); - pci_release_regions(pdev); - pci_disable_device(pdev); - iounmap(card->ctlmem); - iounmap(card->mem); - kfree(card); - return -ENOMEM; + err = -ENOMEM; + goto rx_dma_fail; } card->tx_dma_handle_host = pci_alloc_consistent(card->device, FST_MAX_MTU, &card->tx_dma_handle_card); if (card->tx_dma_handle_host == NULL) { pr_err("Could not allocate tx dma buffer\n"); - fst_disable_intr(card); - pci_release_regions(pdev); - pci_disable_device(pdev); - iounmap(card->ctlmem); - iounmap(card->mem); - kfree(card); - return -ENOMEM; + err = -ENOMEM; + goto tx_dma_fail; } } return 0; /* Success */ + +tx_dma_fail: + pci_free_consistent(card->device, FST_MAX_MTU, + card->rx_dma_handle_host, + card->rx_dma_handle_card); +rx_dma_fail: + fst_disable_intr(card); + for (i = 0 ; i < card->nports ; i++) + unregister_hdlc_device(card->ports[i].dev); +init_card_fail: + fst_card_array[card->card_no] = NULL; +card_array_fail: + for (i = 0 ; i < card->nports ; i++) + free_netdev(card->ports[i].dev); +hdlcdev_fail: + free_irq(card->irq, card); +irq_fail: + iounmap(card->ctlmem); +ioremap_ctlmem_fail: + iounmap(card->mem); +ioremap_physmem_fail: + pci_release_regions(pdev); +regions_fail: + pci_disable_device(pdev); +enable_fail: + kfree(card); + return err; } /* diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 5895f1978691..fa9fdfa128c1 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -122,8 +122,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu) { struct x25_asy *sl = netdev_priv(dev); unsigned char *xbuff, *rbuff; - int len = 2 * newmtu; + int len; + if (newmtu > 65534) + return -EINVAL; + + len = 2 * newmtu; xbuff = kmalloc(len + 4, GFP_ATOMIC); rbuff = kmalloc(len + 4, GFP_ATOMIC); diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 82017f56e661..e6c56c5bb0f6 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -795,7 +795,11 @@ int ath10k_core_start(struct ath10k *ar) if (status) goto err_htc_stop; - ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) + ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1; + else + ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; + INIT_LIST_HEAD(&ar->arvifs); if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 6c102b1312ff..eebc860c3655 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -312,7 +312,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, int msdu_len, msdu_chaining = 0; struct sk_buff *msdu; struct htt_rx_desc *rx_desc; - bool corrupted = false; lockdep_assert_held(&htt->rx_ring.lock); @@ -439,9 +438,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & RX_MSDU_END_INFO0_LAST_MSDU; - if (msdu_chaining && !last_msdu) - corrupted = true; - if (last_msdu) { msdu->next = NULL; break; @@ -457,20 +453,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, msdu_chaining = -1; /* - * Apparently FW sometimes reports weird chained MSDU sequences with - * more than one rx descriptor. This seems like a bug but needs more - * analyzing. For the time being fix it by dropping such sequences to - * avoid blowing up the host system. - */ - if (corrupted) { - ath10k_warn("failed to pop chained msdus, dropping\n"); - ath10k_htt_rx_free_msdu_chain(*head_msdu); - *head_msdu = NULL; - *tail_msdu = NULL; - msdu_chaining = -EINVAL; - } - - /* * Don't refill the ring yet. * * First, the elements popped here are still in use - it is not diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c index 6db51a666f61..d06fcb05adf2 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c @@ -1184,8 +1184,6 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) bus->bus_priv.usb = bus_pub; dev_set_drvdata(dev, bus); bus->ops = &brcmf_usb_bus_ops; - bus->chip = bus_pub->devid; - bus->chiprev = bus_pub->chiprev; bus->proto_type = BRCMF_PROTO_BCDC; bus->always_use_fws_queue = true; @@ -1194,6 +1192,9 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) if (ret) goto fail; } + bus->chip = bus_pub->devid; + bus->chiprev = bus_pub->chiprev; + /* request firmware here */ brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL, brcmf_usb_probe_phase2); diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index ed50de6362ed..6dc5dd3ced44 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -1068,13 +1068,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) /* recalculate basic rates */ iwl_calc_basic_rates(priv, ctx); - /* - * force CTS-to-self frames protection if RTS-CTS is not preferred - * one aggregation protection method - */ - if (!priv->hw_params.use_rts_for_aggregation) - ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; - if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; @@ -1480,11 +1473,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, else ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; - if (bss_conf->use_cts_prot) - ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; - else - ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; - memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); if (vif->type == NL80211_IFTYPE_AP || diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index 0aa7c0085c9f..b1a33322b9ba 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -88,6 +88,7 @@ * P2P client interfaces simultaneously if they are in different bindings. * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and * P2P client interfaces simultaneously if they are in same bindings. + * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 8b5302777632..725ba49576bf 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -667,10 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, if (vif->bss_conf.qos) cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); - if (vif->bss_conf.use_cts_prot) { + if (vif->bss_conf.use_cts_prot) cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); - cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN); - } + IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", vif->bss_conf.use_cts_prot, vif->bss_conf.ht_operation_mode); diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 7215f5980186..9bfb90680cdc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -303,6 +303,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; } + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT && + !iwlwifi_mod_params.uapsd_disable) { + hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD; + hw->uapsd_queues = IWL_UAPSD_AC_INFO; + hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; + } + hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); hw->chanctx_data_size = sizeof(u16); @@ -1159,8 +1166,12 @@ static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac, bcast_mac = &cmd->macs[mvmvif->id]; - /* enable filtering only for associated stations */ - if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) + /* + * enable filtering only for associated stations, but not for P2P + * Clients + */ + if (vif->type != NL80211_IFTYPE_STATION || vif->p2p || + !vif->bss_conf.assoc) return; bcast_mac->default_discard = 1; @@ -1237,10 +1248,6 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm, if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) return 0; - /* bcast filtering isn't supported for P2P client */ - if (vif->p2p) - return 0; - if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) return 0; diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 4b6c7d4bd199..eac2b424f6a0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -588,9 +588,7 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm, struct iwl_scan_offload_cmd *scan, struct iwl_mvm_scan_params *params) { - scan->channel_count = - mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels + - mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; + scan->channel_count = req->n_channels; scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME); scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH); scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT; @@ -669,61 +667,37 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req, struct iwl_scan_channel_cfg *channels, enum ieee80211_band band, - int *head, int *tail, + int *head, u32 ssid_bitmap, struct iwl_mvm_scan_params *params) { - struct ieee80211_supported_band *s_band; - int n_channels = req->n_channels; - int i, j, index = 0; - bool partial; + int i, index = 0; - /* - * We have to configure all supported channels, even if we don't want to - * scan on them, but we have to send channels in the order that we want - * to scan. So add requested channels to head of the list and others to - * the end. - */ - s_band = &mvm->nvm_data->bands[band]; - - for (i = 0; i < s_band->n_channels && *head <= *tail; i++) { - partial = false; - for (j = 0; j < n_channels; j++) - if (s_band->channels[i].center_freq == - req->channels[j]->center_freq) { - index = *head; - (*head)++; - /* - * Channels that came with the request will be - * in partial scan . - */ - partial = true; - break; - } - if (!partial) { - index = *tail; - (*tail)--; - } - channels->channel_number[index] = - cpu_to_le16(ieee80211_frequency_to_channel( - s_band->channels[i].center_freq)); + for (i = 0; i < req->n_channels; i++) { + struct ieee80211_channel *chan = req->channels[i]; + + if (chan->band != band) + continue; + + index = *head; + (*head)++; + + channels->channel_number[index] = cpu_to_le16(chan->hw_value); channels->dwell_time[index][0] = params->dwell[band].active; channels->dwell_time[index][1] = params->dwell[band].passive; channels->iter_count[index] = cpu_to_le16(1); channels->iter_interval[index] = 0; - if (!(s_band->channels[i].flags & IEEE80211_CHAN_NO_IR)) + if (!(chan->flags & IEEE80211_CHAN_NO_IR)) channels->type[index] |= cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE); channels->type[index] |= - cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL); - if (partial) - channels->type[index] |= - cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL); + cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL | + IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL); - if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40) + if (chan->flags & IEEE80211_CHAN_NO_HT40) channels->type[index] |= cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW); @@ -740,7 +714,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm, int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; int head = 0; - int tail = band_2ghz + band_5ghz - 1; u32 ssid_bitmap; int cmd_len; int ret; @@ -772,7 +745,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm, &scan_cfg->scan_cmd.tx_cmd[0], scan_cfg->data); iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg, - IEEE80211_BAND_2GHZ, &head, &tail, + IEEE80211_BAND_2GHZ, &head, ssid_bitmap, ¶ms); } if (band_5ghz) { @@ -782,7 +755,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm, scan_cfg->data + SCAN_OFFLOAD_PROBE_REQ_SIZE); iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg, - IEEE80211_BAND_5GHZ, &head, &tail, + IEEE80211_BAND_5GHZ, &head, ssid_bitmap, ¶ms); } diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 7091a18d5a72..98950e45c7b0 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -367,6 +367,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, @@ -380,7 +381,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c index 5b32106182f8..fe0f66f73507 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/mwifiex/11n_aggr.c @@ -185,6 +185,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, skb_reserve(skb_aggr, headroom + sizeof(struct txpd)); tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr); + memset(tx_info_aggr, 0, sizeof(*tx_info_aggr)); tx_info_aggr->bss_type = tx_info_src->bss_type; tx_info_aggr->bss_num = tx_info_src->bss_num; diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index e95dec91a561..b511613bba2d 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -220,6 +220,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, } tx_info = MWIFIEX_SKB_TXCB(skb); + memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; tx_info->pkt_len = pkt_len; diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 8dee6c86f4f1..c161141f6c39 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -453,6 +453,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter) if (skb) { rx_info = MWIFIEX_SKB_RXCB(skb); + memset(rx_info, 0, sizeof(*rx_info)); rx_info->bss_num = priv->bss_num; rx_info->bss_type = priv->bss_type; } diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index cbabc12fbda3..e91cd0fa5ca8 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -645,6 +645,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } tx_info = MWIFIEX_SKB_TXCB(skb); + memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; tx_info->pkt_len = skb->len; diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c index 5fce7e78a36e..70eb863c7249 100644 --- a/drivers/net/wireless/mwifiex/sta_tx.c +++ b/drivers/net/wireless/mwifiex/sta_tx.c @@ -150,6 +150,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags) return -1; tx_info = MWIFIEX_SKB_TXCB(skb); + memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN); diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c index e73034fbbde9..0e88364e0c67 100644 --- a/drivers/net/wireless/mwifiex/tdls.c +++ b/drivers/net/wireless/mwifiex/tdls.c @@ -605,6 +605,7 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer, } tx_info = MWIFIEX_SKB_TXCB(skb); + memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; @@ -760,6 +761,7 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer, skb->priority = MWIFIEX_PRIO_VI; tx_info = MWIFIEX_SKB_TXCB(skb); + memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT; diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c index 37f26afd4314..fd7e5b9b4581 100644 --- a/drivers/net/wireless/mwifiex/txrx.c +++ b/drivers/net/wireless/mwifiex/txrx.c @@ -55,6 +55,7 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter, return -1; } + memset(rx_info, 0, sizeof(*rx_info)); rx_info->bss_num = priv->bss_num; rx_info->bss_type = priv->bss_type; diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c index 9a56bc61cb1d..b0601b91cc4f 100644 --- a/drivers/net/wireless/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/mwifiex/uap_txrx.c @@ -175,6 +175,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, } tx_info = MWIFIEX_SKB_TXCB(skb); + memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT; diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index e11dab2216c6..832006b5aab1 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -231,9 +231,12 @@ static enum hrtimer_restart rt2800usb_tx_sta_fifo_timeout(struct hrtimer *timer) */ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev) { - __le32 reg; + __le32 *reg; u32 fw_mode; + reg = kmalloc(sizeof(*reg), GFP_KERNEL); + if (reg == NULL) + return -ENOMEM; /* cannot use rt2x00usb_register_read here as it uses different * mode (MULTI_READ vs. DEVICE_MODE) and does not pass the * magic value USB_MODE_AUTORUN (0x11) to the device, thus the @@ -241,8 +244,9 @@ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev) */ rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE, USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN, - ®, sizeof(reg), REGISTER_TIMEOUT_FIRMWARE); - fw_mode = le32_to_cpu(reg); + reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE); + fw_mode = le32_to_cpu(*reg); + kfree(reg); if ((fw_mode & 0x00000003) == 2) return 1; @@ -261,6 +265,7 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev, int status; u32 offset; u32 length; + int retval; /* * Check which section of the firmware we need. @@ -278,7 +283,10 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev, /* * Write firmware to device. */ - if (rt2800usb_autorun_detect(rt2x00dev)) { + retval = rt2800usb_autorun_detect(rt2x00dev); + if (retval < 0) + return retval; + if (retval) { rt2x00_info(rt2x00dev, "Firmware loading not required - NIC in AutoRun mode\n"); } else { @@ -763,7 +771,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry, */ static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev) { - if (rt2800usb_autorun_detect(rt2x00dev)) + int retval; + + retval = rt2800usb_autorun_detect(rt2x00dev); + if (retval < 0) + return retval; + if (retval) return 1; return rt2800_efuse_detect(rt2x00dev); } @@ -772,7 +785,10 @@ static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev) { int retval; - if (rt2800usb_efuse_detect(rt2x00dev)) + retval = rt2800usb_efuse_detect(rt2x00dev); + if (retval < 0) + return retval; + if (retval) retval = rt2800_read_eeprom_efuse(rt2x00dev); else retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 1844a47636b6..c65b636bcab9 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, { struct gnttab_map_grant_ref *gop_map = *gopp_map; u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; + /* This always points to the shinfo of the skb being checked, which + * could be either the first or the one on the frag_list + */ struct skb_shared_info *shinfo = skb_shinfo(skb); + /* If this is non-NULL, we are currently checking the frag_list skb, and + * this points to the shinfo of the first one + */ + struct skb_shared_info *first_shinfo = NULL; int nr_frags = shinfo->nr_frags; + const bool sharedslot = nr_frags && + frag_get_pending_idx(&shinfo->frags[0]) == pending_idx; int i, err; - struct sk_buff *first_skb = NULL; /* Check status of header. */ err = (*gopp_copy)->status; - (*gopp_copy)++; if (unlikely(err)) { if (net_ratelimit()) netdev_dbg(queue->vif->dev, @@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, (*gopp_copy)->status, pending_idx, (*gopp_copy)->source.u.ref); - xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); + /* The first frag might still have this slot mapped */ + if (!sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_ERROR); } + (*gopp_copy)++; check_frags: for (i = 0; i < nr_frags; i++, gop_map++) { @@ -1062,8 +1073,19 @@ check_frags: pending_idx, gop_map->handle); /* Had a previous error? Invalidate this fragment. */ - if (unlikely(err)) + if (unlikely(err)) { xenvif_idx_unmap(queue, pending_idx); + /* If the mapping of the first frag was OK, but + * the header's copy failed, and they are + * sharing a slot, send an error + */ + if (i == 0 && sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_ERROR); + else + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); + } continue; } @@ -1075,42 +1097,53 @@ check_frags: gop_map->status, pending_idx, gop_map->ref); + xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); /* Not the first error? Preceding frags already invalidated. */ if (err) continue; - /* First error: invalidate preceding fragments. */ + + /* First error: if the header haven't shared a slot with the + * first frag, release it as well. + */ + if (!sharedslot) + xenvif_idx_release(queue, + XENVIF_TX_CB(skb)->pending_idx, + XEN_NETIF_RSP_OKAY); + + /* Invalidate preceding fragments of this skb. */ for (j = 0; j < i; j++) { pending_idx = frag_get_pending_idx(&shinfo->frags[j]); xenvif_idx_unmap(queue, pending_idx); + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); + } + + /* And if we found the error while checking the frag_list, unmap + * the first skb's frags + */ + if (first_shinfo) { + for (j = 0; j < first_shinfo->nr_frags; j++) { + pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]); + xenvif_idx_unmap(queue, pending_idx); + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); + } } /* Remember the error: invalidate all subsequent fragments. */ err = newerr; } - if (skb_has_frag_list(skb)) { - first_skb = skb; - skb = shinfo->frag_list; - shinfo = skb_shinfo(skb); + if (skb_has_frag_list(skb) && !first_shinfo) { + first_shinfo = skb_shinfo(skb); + shinfo = skb_shinfo(skb_shinfo(skb)->frag_list); nr_frags = shinfo->nr_frags; goto check_frags; } - /* There was a mapping error in the frag_list skb. We have to unmap - * the first skb's frags - */ - if (first_skb && err) { - int j; - shinfo = skb_shinfo(first_skb); - for (j = 0; j < shinfo->nr_frags; j++) { - pending_idx = frag_get_pending_idx(&shinfo->frags[j]); - xenvif_idx_unmap(queue, pending_idx); - } - } - *gopp_map = gop_map; return err; } @@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) /* Check the remap error code. */ if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { + /* If there was an error, xenvif_tx_check_gop is + * expected to release all the frags which were mapped, + * so kfree_skb shouldn't do it again + */ skb_shinfo(skb)->nr_frags = 0; + if (skb_has_frag_list(skb)) { + struct sk_buff *nskb = + skb_shinfo(skb)->frag_list; + skb_shinfo(nskb)->nr_frags = 0; + } kfree_skb(skb); continue; } @@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) tx_unmap_op.status); BUG(); } - - xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY); } static inline int rx_work_todo(struct xenvif_queue *queue) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 2ccb4a02368b..055222bae6e4 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1439,16 +1439,11 @@ static void xennet_disconnect_backend(struct netfront_info *info) unsigned int i = 0; unsigned int num_queues = info->netdev->real_num_tx_queues; + netif_carrier_off(info->netdev); + for (i = 0; i < num_queues; ++i) { struct netfront_queue *queue = &info->queues[i]; - /* Stop old i/f to prevent errors whilst we rebuild the state. */ - spin_lock_bh(&queue->rx_lock); - spin_lock_irq(&queue->tx_lock); - netif_carrier_off(queue->info->netdev); - spin_unlock_irq(&queue->tx_lock); - spin_unlock_bh(&queue->rx_lock); - if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) unbind_from_irqhandler(queue->tx_irq, queue); if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { @@ -1458,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info) queue->tx_evtchn = queue->rx_evtchn = 0; queue->tx_irq = queue->rx_irq = 0; + napi_synchronize(&queue->napi); + /* End access and free the pages */ xennet_end_access(queue->tx_ring_ref, queue->tx.sring); xennet_end_access(queue->rx_ring_ref, queue->rx.sring); @@ -2046,13 +2043,15 @@ static int xennet_connect(struct net_device *dev) /* By now, the queue structures have been set up */ for (j = 0; j < num_queues; ++j) { queue = &np->queues[j]; - spin_lock_bh(&queue->rx_lock); - spin_lock_irq(&queue->tx_lock); /* Step 1: Discard all pending TX packet fragments. */ + spin_lock_irq(&queue->tx_lock); xennet_release_tx_bufs(queue); + spin_unlock_irq(&queue->tx_lock); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ + spin_lock_bh(&queue->rx_lock); + for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { skb_frag_t *frag; const struct page *page; @@ -2076,6 +2075,8 @@ static int xennet_connect(struct net_device *dev) } queue->rx.req_prod_pvt = requeue_idx; + + spin_unlock_bh(&queue->rx_lock); } /* @@ -2087,13 +2088,17 @@ static int xennet_connect(struct net_device *dev) netif_carrier_on(np->netdev); for (j = 0; j < num_queues; ++j) { queue = &np->queues[j]; + notify_remote_via_irq(queue->tx_irq); if (queue->tx_irq != queue->rx_irq) notify_remote_via_irq(queue->rx_irq); - xennet_tx_buf_gc(queue); - xennet_alloc_rx_buffers(queue); + spin_lock_irq(&queue->tx_lock); + xennet_tx_buf_gc(queue); spin_unlock_irq(&queue->tx_lock); + + spin_lock_bh(&queue->rx_lock); + xennet_alloc_rx_buffers(queue); spin_unlock_bh(&queue->rx_lock); } diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index a3bf2122a8d5..401b2453da45 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -182,40 +182,6 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) } EXPORT_SYMBOL(of_mdiobus_register); -/** - * of_mdiobus_link_phydev - Find a device node for a phy - * @mdio: pointer to mii_bus structure - * @phydev: phydev for which the of_node pointer should be set - * - * Walk the list of subnodes of a mdio bus and look for a node that matches the - * phy's address with its 'reg' property. If found, set the of_node pointer for - * the phy. This allows auto-probed pyh devices to be supplied with information - * passed in via DT. - */ -void of_mdiobus_link_phydev(struct mii_bus *mdio, - struct phy_device *phydev) -{ - struct device *dev = &phydev->dev; - struct device_node *child; - - if (dev->of_node || !mdio->dev.of_node) - return; - - for_each_available_child_of_node(mdio->dev.of_node, child) { - int addr; - - addr = of_mdio_parse_addr(&mdio->dev, child); - if (addr < 0) - continue; - - if (addr == phydev->addr) { - dev->of_node = child; - return; - } - } -} -EXPORT_SYMBOL(of_mdiobus_link_phydev); - /* Helper function for of_phy_find_device */ static int of_phy_match(struct device *dev, void *phy_np) { diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig index 2872ece81f35..44333bd8f908 100644 --- a/drivers/parport/Kconfig +++ b/drivers/parport/Kconfig @@ -5,6 +5,12 @@ # Parport configuration. # +config ARCH_MIGHT_HAVE_PC_PARPORT + bool + help + Select this config option from the architecture Kconfig if + the architecture might have PC parallel port hardware. + menuconfig PARPORT tristate "Parallel port support" depends on HAS_IOMEM @@ -31,12 +37,6 @@ menuconfig PARPORT If unsure, say Y. -config ARCH_MIGHT_HAVE_PC_PARPORT - bool - help - Select this config option from the architecture Kconfig if - the architecture might have PC parallel port hardware. - if PARPORT config PARPORT_PC diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index 1bd6363bc95e..9f43916637ca 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c @@ -1431,7 +1431,7 @@ static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc) status = readl(info->irqmux_base); - for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK) + for_each_set_bit(n, &status, info->nbanks) __gpio_irq_handler(&info->banks[n]); chained_irq_exit(chip, desc); diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 15b3459f8656..220acb4cbee5 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -633,7 +633,6 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data) } else raw3270_writesf_readpart(rp); memset(&rp->init_reset, 0, sizeof(rp->init_reset)); - memset(&rp->init_data, 0, sizeof(rp->init_data)); } static int diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 69ef4f8cfac8..4038437ff033 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -901,10 +901,15 @@ static int ap_device_probe(struct device *dev) int rc; ap_dev->drv = ap_drv; + + spin_lock_bh(&ap_device_list_lock); + list_add(&ap_dev->list, &ap_device_list); + spin_unlock_bh(&ap_device_list_lock); + rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; - if (!rc) { + if (rc) { spin_lock_bh(&ap_device_list_lock); - list_add(&ap_dev->list, &ap_device_list); + list_del_init(&ap_dev->list); spin_unlock_bh(&ap_device_list_lock); } return rc; diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig index 78b0fba7047e..8afc6fee40c5 100644 --- a/drivers/staging/media/omap4iss/Kconfig +++ b/drivers/staging/media/omap4iss/Kconfig @@ -1,6 +1,6 @@ config VIDEO_OMAP4 bool "OMAP 4 Camera support" - depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4 + depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4 select VIDEOBUF2_DMA_CONTIG ---help--- Driver for an OMAP 4 ISS controller. diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 9d2b673f90e3..b8125aa64ad8 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1169,8 +1169,8 @@ static int ep_enable(struct usb_ep *ep, if (hwep->type == USB_ENDPOINT_XFER_CONTROL) cap |= QH_IOS; - if (hwep->num) - cap |= QH_ZLT; + + cap |= QH_ZLT; cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT; /* * For ISO-TX, we set mult at QH as the largest value, and use diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 21b99b4b4082..0e950ad8cb25 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -889,6 +889,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1) if (!hub_is_superspeed(hub->hdev)) return -EINVAL; + ret = hub_port_status(hub, port1, &portstatus, &portchange); + if (ret < 0) + return ret; + + /* + * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI + * Controller [1022:7814] will have spurious result making the following + * usb 3.0 device hotplugging route to the 2.0 root hub and recognized + * as high-speed device if we set the usb 3.0 port link state to + * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we + * check the state here to avoid the bug. + */ + if ((portstatus & USB_PORT_STAT_LINK_STATE) == + USB_SS_PORT_LS_RX_DETECT) { + dev_dbg(&hub->ports[port1 - 1]->dev, + "Not disabling port; link state is RxDetect\n"); + return ret; + } + ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); if (ret) return ret; diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index b7a506f2bb14..5c660c77f03b 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -426,20 +426,18 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) * p2m are consistent. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { - unsigned long p; - struct page *scratch_page = get_balloon_scratch_page(); - if (!PageHighMem(page)) { + struct page *scratch_page = get_balloon_scratch_page(); + ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte(page_to_pfn(scratch_page), PAGE_KERNEL_RO), 0); BUG_ON(ret); - } - p = page_to_pfn(scratch_page); - __set_phys_to_machine(pfn, pfn_to_mfn(p)); - put_balloon_scratch_page(); + put_balloon_scratch_page(); + } + __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } #endif diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index c3667b202f2f..5f1e1f3cd186 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -88,7 +88,6 @@ static int xen_suspend(void *data) if (!si->cancelled) { xen_irq_resume(); - xen_console_resume(); xen_timer_resume(); } @@ -135,6 +134,10 @@ static void do_suspend(void) err = stop_machine(xen_suspend, &si, cpumask_of(0)); + /* Resume console as early as possible. */ + if (!si.cancelled) + xen_console_resume(); + raw_notifier_call_chain(&xen_resume_notifier, 0, NULL); dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE); @@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm) static void put_reqs_available(struct kioctx *ctx, unsigned nr) { struct kioctx_cpu *kcpu; + unsigned long flags; preempt_disable(); kcpu = this_cpu_ptr(ctx->cpu); + local_irq_save(flags); kcpu->reqs_available += nr; + while (kcpu->reqs_available >= ctx->req_batch * 2) { kcpu->reqs_available -= ctx->req_batch; atomic_add(ctx->req_batch, &ctx->reqs_available); } + local_irq_restore(flags); preempt_enable(); } @@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx) { struct kioctx_cpu *kcpu; bool ret = false; + unsigned long flags; preempt_disable(); kcpu = this_cpu_ptr(ctx->cpu); + local_irq_save(flags); if (!kcpu->reqs_available) { int old, avail = atomic_read(&ctx->reqs_available); @@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx) ret = true; kcpu->reqs_available--; out: + local_irq_restore(flags); preempt_enable(); return ret; } diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index e12441c7cf1d..7187b14faa6c 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -484,8 +484,19 @@ void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid) log_list); list_del_init(&ordered->log_list); spin_unlock_irq(&log->log_extents_lock[index]); + + if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) && + !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) { + struct inode *inode = ordered->inode; + u64 start = ordered->file_offset; + u64 end = ordered->file_offset + ordered->len - 1; + + WARN_ON(!inode); + filemap_fdatawrite_range(inode->i_mapping, start, end); + } wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)); + btrfs_put_ordered_extent(ordered); spin_lock_irq(&log->log_extents_lock[index]); } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6104676857f5..6cb82f62cb7c 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1680,11 +1680,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) if (device->bdev == root->fs_info->fs_devices->latest_bdev) root->fs_info->fs_devices->latest_bdev = next_device->bdev; - if (device->bdev) + if (device->bdev) { device->fs_devices->open_devices--; - - /* remove sysfs entry */ - btrfs_kobj_rm_device(root->fs_info, device); + /* remove sysfs entry */ + btrfs_kobj_rm_device(root->fs_info, device); + } call_rcu(&device->rcu, free_device); diff --git a/fs/coredump.c b/fs/coredump.c index 0b2528fb640e..a93f7e6ea4cf 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -306,7 +306,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm, if (unlikely(nr < 0)) return nr; - tsk->flags = PF_DUMPCORE; + tsk->flags |= PF_DUMPCORE; if (atomic_read(&mm->mm_users) == nr + 1) goto done; /* diff --git a/fs/direct-io.c b/fs/direct-io.c index 98040ba388ac..194d0d122cae 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -198,9 +198,8 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) * L1 cache. */ static inline struct page *dio_get_page(struct dio *dio, - struct dio_submit *sdio, size_t *from, size_t *to) + struct dio_submit *sdio) { - int n; if (dio_pages_present(sdio) == 0) { int ret; @@ -209,10 +208,7 @@ static inline struct page *dio_get_page(struct dio *dio, return ERR_PTR(ret); BUG_ON(dio_pages_present(sdio) == 0); } - n = sdio->head++; - *from = n ? 0 : sdio->from; - *to = (n == sdio->tail - 1) ? sdio->to : PAGE_SIZE; - return dio->pages[n]; + return dio->pages[sdio->head]; } /** @@ -911,11 +907,15 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, while (sdio->block_in_file < sdio->final_block_in_request) { struct page *page; size_t from, to; - page = dio_get_page(dio, sdio, &from, &to); + + page = dio_get_page(dio, sdio); if (IS_ERR(page)) { ret = PTR_ERR(page); goto out; } + from = sdio->head ? 0 : sdio->from; + to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE; + sdio->head++; while (from < to) { unsigned this_chunk_bytes; /* # of bytes mapped */ diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 098f97bdcf1b..ca887314aba9 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -643,9 +643,8 @@ struct fuse_copy_state { unsigned long seglen; unsigned long addr; struct page *pg; - void *mapaddr; - void *buf; unsigned len; + unsigned offset; unsigned move_pages:1; }; @@ -666,23 +665,17 @@ static void fuse_copy_finish(struct fuse_copy_state *cs) if (cs->currbuf) { struct pipe_buffer *buf = cs->currbuf; - if (!cs->write) { - kunmap_atomic(cs->mapaddr); - } else { - kunmap_atomic(cs->mapaddr); + if (cs->write) buf->len = PAGE_SIZE - cs->len; - } cs->currbuf = NULL; - cs->mapaddr = NULL; - } else if (cs->mapaddr) { - kunmap_atomic(cs->mapaddr); + } else if (cs->pg) { if (cs->write) { flush_dcache_page(cs->pg); set_page_dirty_lock(cs->pg); } put_page(cs->pg); - cs->mapaddr = NULL; } + cs->pg = NULL; } /* @@ -691,7 +684,7 @@ static void fuse_copy_finish(struct fuse_copy_state *cs) */ static int fuse_copy_fill(struct fuse_copy_state *cs) { - unsigned long offset; + struct page *page; int err; unlock_request(cs->fc, cs->req); @@ -706,14 +699,12 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) BUG_ON(!cs->nr_segs); cs->currbuf = buf; - cs->mapaddr = kmap_atomic(buf->page); + cs->pg = buf->page; + cs->offset = buf->offset; cs->len = buf->len; - cs->buf = cs->mapaddr + buf->offset; cs->pipebufs++; cs->nr_segs--; } else { - struct page *page; - if (cs->nr_segs == cs->pipe->buffers) return -EIO; @@ -726,8 +717,8 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) buf->len = 0; cs->currbuf = buf; - cs->mapaddr = kmap_atomic(page); - cs->buf = cs->mapaddr; + cs->pg = page; + cs->offset = 0; cs->len = PAGE_SIZE; cs->pipebufs++; cs->nr_segs++; @@ -740,14 +731,13 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) cs->iov++; cs->nr_segs--; } - err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg); + err = get_user_pages_fast(cs->addr, 1, cs->write, &page); if (err < 0) return err; BUG_ON(err != 1); - offset = cs->addr % PAGE_SIZE; - cs->mapaddr = kmap_atomic(cs->pg); - cs->buf = cs->mapaddr + offset; - cs->len = min(PAGE_SIZE - offset, cs->seglen); + cs->pg = page; + cs->offset = cs->addr % PAGE_SIZE; + cs->len = min(PAGE_SIZE - cs->offset, cs->seglen); cs->seglen -= cs->len; cs->addr += cs->len; } @@ -760,15 +750,20 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) { unsigned ncpy = min(*size, cs->len); if (val) { + void *pgaddr = kmap_atomic(cs->pg); + void *buf = pgaddr + cs->offset; + if (cs->write) - memcpy(cs->buf, *val, ncpy); + memcpy(buf, *val, ncpy); else - memcpy(*val, cs->buf, ncpy); + memcpy(*val, buf, ncpy); + + kunmap_atomic(pgaddr); *val += ncpy; } *size -= ncpy; cs->len -= ncpy; - cs->buf += ncpy; + cs->offset += ncpy; return ncpy; } @@ -874,8 +869,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) out_fallback_unlock: unlock_page(newpage); out_fallback: - cs->mapaddr = kmap_atomic(buf->page); - cs->buf = cs->mapaddr + buf->offset; + cs->pg = buf->page; + cs->offset = buf->offset; err = lock_request(cs->fc, cs->req); if (err) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 42198359fa1b..0c6048247a34 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -198,7 +198,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) inode = ACCESS_ONCE(entry->d_inode); if (inode && is_bad_inode(inode)) goto invalid; - else if (fuse_dentry_time(entry) < get_jiffies_64()) { + else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) || + (flags & LOOKUP_REVAL)) { int err; struct fuse_entry_out outarg; struct fuse_req *req; @@ -814,13 +815,6 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent, return err; } -static int fuse_rename(struct inode *olddir, struct dentry *oldent, - struct inode *newdir, struct dentry *newent) -{ - return fuse_rename_common(olddir, oldent, newdir, newent, 0, - FUSE_RENAME, sizeof(struct fuse_rename_in)); -} - static int fuse_rename2(struct inode *olddir, struct dentry *oldent, struct inode *newdir, struct dentry *newent, unsigned int flags) @@ -831,17 +825,30 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent, if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) return -EINVAL; - if (fc->no_rename2 || fc->minor < 23) - return -EINVAL; + if (flags) { + if (fc->no_rename2 || fc->minor < 23) + return -EINVAL; - err = fuse_rename_common(olddir, oldent, newdir, newent, flags, - FUSE_RENAME2, sizeof(struct fuse_rename2_in)); - if (err == -ENOSYS) { - fc->no_rename2 = 1; - err = -EINVAL; + err = fuse_rename_common(olddir, oldent, newdir, newent, flags, + FUSE_RENAME2, + sizeof(struct fuse_rename2_in)); + if (err == -ENOSYS) { + fc->no_rename2 = 1; + err = -EINVAL; + } + } else { + err = fuse_rename_common(olddir, oldent, newdir, newent, 0, + FUSE_RENAME, + sizeof(struct fuse_rename_in)); } + return err; +} +static int fuse_rename(struct inode *olddir, struct dentry *oldent, + struct inode *newdir, struct dentry *newent) +{ + return fuse_rename2(olddir, oldent, newdir, newent, 0); } static int fuse_link(struct dentry *entry, struct inode *newdir, @@ -985,7 +992,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat, int err; bool r; - if (fi->i_time < get_jiffies_64()) { + if (time_before64(fi->i_time, get_jiffies_64())) { r = true; err = fuse_do_getattr(inode, stat, file); } else { @@ -1171,7 +1178,7 @@ static int fuse_permission(struct inode *inode, int mask) ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) { struct fuse_inode *fi = get_fuse_inode(inode); - if (fi->i_time < get_jiffies_64()) { + if (time_before64(fi->i_time, get_jiffies_64())) { refreshed = true; err = fuse_perm_getattr(inode, mask); diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 6e16dad13e9b..40ac2628ddcf 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1687,7 +1687,7 @@ static int fuse_writepage_locked(struct page *page) error = -EIO; req->ff = fuse_write_file_get(fc, fi); if (!req->ff) - goto err_free; + goto err_nofile; fuse_write_fill(req, req->ff, page_offset(page), 0); @@ -1715,6 +1715,8 @@ static int fuse_writepage_locked(struct page *page) return 0; +err_nofile: + __free_page(tmp_page); err_free: fuse_request_free(req); err: @@ -1955,8 +1957,8 @@ static int fuse_writepages(struct address_space *mapping, data.ff = NULL; err = -ENOMEM; - data.orig_pages = kzalloc(sizeof(struct page *) * - FUSE_MAX_PAGES_PER_REQ, + data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, + sizeof(struct page *), GFP_NOFS); if (!data.orig_pages) goto out; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 754dcf23de8a..03246cd9d47a 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -478,6 +478,17 @@ static const match_table_t tokens = { {OPT_ERR, NULL} }; +static int fuse_match_uint(substring_t *s, unsigned int *res) +{ + int err = -ENOMEM; + char *buf = match_strdup(s); + if (buf) { + err = kstrtouint(buf, 10, res); + kfree(buf); + } + return err; +} + static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) { char *p; @@ -488,6 +499,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) while ((p = strsep(&opt, ",")) != NULL) { int token; int value; + unsigned uv; substring_t args[MAX_OPT_ARGS]; if (!*p) continue; @@ -511,18 +523,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) break; case OPT_USER_ID: - if (match_int(&args[0], &value)) + if (fuse_match_uint(&args[0], &uv)) return 0; - d->user_id = make_kuid(current_user_ns(), value); + d->user_id = make_kuid(current_user_ns(), uv); if (!uid_valid(d->user_id)) return 0; d->user_id_present = 1; break; case OPT_GROUP_ID: - if (match_int(&args[0], &value)) + if (fuse_match_uint(&args[0], &uv)) return 0; - d->group_id = make_kgid(current_user_ns(), value); + d->group_id = make_kgid(current_user_ns(), uv); if (!gid_valid(d->group_id)) return 0; d->group_id_present = 1; @@ -895,9 +907,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) fc->writeback_cache = 1; if (arg->time_gran && arg->time_gran <= 1000000000) fc->sb->s_time_gran = arg->time_gran; - else - fc->sb->s_time_gran = 1000000000; - } else { ra_pages = fc->max_read / PAGE_CACHE_SIZE; fc->no_lock = 1; @@ -926,7 +935,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | - FUSE_WRITEBACK_CACHE; + FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT; req->in.h.opcode = FUSE_INIT; req->in.numargs = 1; req->in.args[0].size = sizeof(*arg); @@ -1006,7 +1015,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION); - if (!parse_fuse_opt((char *) data, &d, is_bdev)) + if (!parse_fuse_opt(data, &d, is_bdev)) goto err; if (is_bdev) { diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 4fc3a3046174..26b3f952e6b1 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -981,7 +981,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl) int error = 0; state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; - flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE; + flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT; mutex_lock(&fp->f_fl_mutex); @@ -991,7 +991,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl) goto out; flock_lock_file_wait(file, &(struct file_lock){.fl_type = F_UNLCK}); - gfs2_glock_dq_wait(fl_gh); + gfs2_glock_dq(fl_gh); gfs2_holder_reinit(state, flags, fl_gh); } else { error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 770e16716d81..7f513b1ceb2c 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -731,14 +731,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, cachep = gfs2_glock_aspace_cachep; else cachep = gfs2_glock_cachep; - gl = kmem_cache_alloc(cachep, GFP_KERNEL); + gl = kmem_cache_alloc(cachep, GFP_NOFS); if (!gl) return -ENOMEM; memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); if (glops->go_flags & GLOF_LVB) { - gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL); + gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS); if (!gl->gl_lksb.sb_lvbptr) { kmem_cache_free(cachep, gl); return -ENOMEM; @@ -1383,12 +1383,16 @@ __acquires(&lru_lock) gl = list_entry(list->next, struct gfs2_glock, gl_lru); list_del_init(&gl->gl_lru); if (!spin_trylock(&gl->gl_spin)) { +add_back_to_lru: list_add(&gl->gl_lru, &lru_list); atomic_inc(&lru_count); continue; } + if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { + spin_unlock(&gl->gl_spin); + goto add_back_to_lru; + } clear_bit(GLF_LRU, &gl->gl_flags); - spin_unlock(&lru_lock); gl->gl_lockref.count++; if (demote_ok(gl)) handle_callback(gl, LM_ST_UNLOCKED, 0, false); @@ -1396,7 +1400,7 @@ __acquires(&lru_lock) if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) gl->gl_lockref.count--; spin_unlock(&gl->gl_spin); - spin_lock(&lru_lock); + cond_resched_lock(&lru_lock); } } @@ -1421,7 +1425,7 @@ static long gfs2_scan_glock_lru(int nr) gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); /* Test for being demotable */ - if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { + if (!test_bit(GLF_LOCK, &gl->gl_flags)) { list_move(&gl->gl_lru, &dispose); atomic_dec(&lru_count); freed++; diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index fc1100781bbc..2ffc67dce87f 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -234,8 +234,8 @@ static void inode_go_sync(struct gfs2_glock *gl) * inode_go_inval - prepare a inode glock to be released * @gl: the glock * @flags: - * - * Normally we invlidate everything, but if we are moving into + * + * Normally we invalidate everything, but if we are moving into * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we * can keep hold of the metadata, since it won't have changed. * diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 992ca5b1e045..641383a9c1bb 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -1030,8 +1030,8 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots, new_size = old_size + RECOVER_SIZE_INC; - submit = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); - result = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); + submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS); + result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS); if (!submit || !result) { kfree(submit); kfree(result); diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index db629d1bd1bd..f4cb9c0d6bbd 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -337,7 +337,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le /** * gfs2_free_extlen - Return extent length of free blocks - * @rbm: Starting position + * @rrbm: Starting position * @len: Max length to check * * Starting at the block specified by the rbm, see how many free blocks @@ -2522,7 +2522,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state) /** * gfs2_rlist_free - free a resource group list - * @list: the list of resource groups + * @rlist: the list of resource groups * */ diff --git a/fs/namei.c b/fs/namei.c index 985c6f368485..9eb787e5c167 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2256,9 +2256,10 @@ done: goto out; } path->dentry = dentry; - path->mnt = mntget(nd->path.mnt); + path->mnt = nd->path.mnt; if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW)) return 1; + mntget(path->mnt); follow_mount(path); error = 0; out: diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 8f98138cbc43..f11b9eed0de1 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -756,7 +756,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) spin_unlock(&dreq->lock); while (!list_empty(&hdr->pages)) { - bool do_destroy = true; req = nfs_list_entry(hdr->pages.next); nfs_list_remove_request(req); @@ -765,7 +764,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) case NFS_IOHDR_NEED_COMMIT: kref_get(&req->wb_kref); nfs_mark_request_commit(req, hdr->lseg, &cinfo); - do_destroy = false; } nfs_unlock_and_release_request(req); } diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index e0193d63630c..617f36611d4a 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -244,6 +244,7 @@ void nfs_pgio_data_release(struct nfs_pgio_data *); int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *); int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *, const struct rpc_call_ops *, int, int); +void nfs_free_request(struct nfs_page *req); static inline void nfs_iocounter_init(struct nfs_io_counter *c) { diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index 871d6eda8dba..8f854dde4150 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c @@ -247,3 +247,46 @@ const struct xattr_handler *nfs3_xattr_handlers[] = { &posix_acl_default_xattr_handler, NULL, }; + +static int +nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data, + size_t size, ssize_t *result) +{ + struct posix_acl *acl; + char *p = data + *result; + + acl = get_acl(inode, type); + if (!acl) + return 0; + + posix_acl_release(acl); + + *result += strlen(name); + *result += 1; + if (!size) + return 0; + if (*result > size) + return -ERANGE; + + strcpy(p, name); + return 0; +} + +ssize_t +nfs3_listxattr(struct dentry *dentry, char *data, size_t size) +{ + struct inode *inode = dentry->d_inode; + ssize_t result = 0; + int error; + + error = nfs3_list_one_acl(inode, ACL_TYPE_ACCESS, + POSIX_ACL_XATTR_ACCESS, data, size, &result); + if (error) + return error; + + error = nfs3_list_one_acl(inode, ACL_TYPE_DEFAULT, + POSIX_ACL_XATTR_DEFAULT, data, size, &result); + if (error) + return error; + return result; +} diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index e7daa42bbc86..f0afa291fd58 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -885,7 +885,7 @@ static const struct inode_operations nfs3_dir_inode_operations = { .getattr = nfs_getattr, .setattr = nfs_setattr, #ifdef CONFIG_NFS_V3_ACL - .listxattr = generic_listxattr, + .listxattr = nfs3_listxattr, .getxattr = generic_getxattr, .setxattr = generic_setxattr, .removexattr = generic_removexattr, @@ -899,7 +899,7 @@ static const struct inode_operations nfs3_file_inode_operations = { .getattr = nfs_getattr, .setattr = nfs_setattr, #ifdef CONFIG_NFS_V3_ACL - .listxattr = generic_listxattr, + .listxattr = nfs3_listxattr, .getxattr = generic_getxattr, .setxattr = generic_setxattr, .removexattr = generic_removexattr, diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 745a612dbe22..0be5050638f7 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -29,8 +29,6 @@ static struct kmem_cache *nfs_page_cachep; static const struct rpc_call_ops nfs_pgio_common_ops; -static void nfs_free_request(struct nfs_page *); - static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) { p->npages = pagecount; @@ -232,20 +230,28 @@ nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev) WARN_ON_ONCE(prev == req); if (!prev) { + /* a head request */ req->wb_head = req; req->wb_this_page = req; } else { + /* a subrequest */ WARN_ON_ONCE(prev->wb_this_page != prev->wb_head); WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags)); req->wb_head = prev->wb_head; req->wb_this_page = prev->wb_this_page; prev->wb_this_page = req; + /* All subrequests take a ref on the head request until + * nfs_page_group_destroy is called */ + kref_get(&req->wb_head->wb_kref); + /* grab extra ref if head request has extra ref from * the write/commit path to handle handoff between write * and commit lists */ - if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) + if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) { + set_bit(PG_INODE_REF, &req->wb_flags); kref_get(&req->wb_kref); + } } } @@ -262,6 +268,10 @@ nfs_page_group_destroy(struct kref *kref) struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); struct nfs_page *tmp, *next; + /* subrequests must release the ref on the head request */ + if (req->wb_head != req) + nfs_release_request(req->wb_head); + if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) return; @@ -387,7 +397,7 @@ static void nfs_clear_request(struct nfs_page *req) * * Note: Should never be called with the spinlock held! */ -static void nfs_free_request(struct nfs_page *req) +void nfs_free_request(struct nfs_page *req) { WARN_ON_ONCE(req->wb_this_page != req); @@ -917,7 +927,6 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, nfs_pageio_doio(desc); if (desc->pg_error < 0) return 0; - desc->pg_moreio = 0; if (desc->pg_recoalesce) return 0; /* retry add_request for this subreq */ @@ -964,6 +973,7 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) desc->pg_count = 0; desc->pg_base = 0; desc->pg_recoalesce = 0; + desc->pg_moreio = 0; while (!list_empty(&head)) { struct nfs_page *req; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index f05f321f9d3d..962c9ee758be 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -46,6 +46,7 @@ static const struct rpc_call_ops nfs_commit_ops; static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; static const struct nfs_commit_completion_ops nfs_commit_completion_ops; static const struct nfs_rw_ops nfs_rw_write_ops; +static void nfs_clear_request_commit(struct nfs_page *req); static struct kmem_cache *nfs_wdata_cachep; static mempool_t *nfs_wdata_mempool; @@ -91,8 +92,15 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); } +/* + * nfs_page_find_head_request_locked - find head request associated with @page + * + * must be called while holding the inode lock. + * + * returns matching head request with reference held, or NULL if not found. + */ static struct nfs_page * -nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page) +nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page) { struct nfs_page *req = NULL; @@ -104,25 +112,33 @@ nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page) /* Linearly search the commit list for the correct req */ list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) { if (freq->wb_page == page) { - req = freq; + req = freq->wb_head; break; } } } - if (req) + if (req) { + WARN_ON_ONCE(req->wb_head != req); + kref_get(&req->wb_kref); + } return req; } -static struct nfs_page *nfs_page_find_request(struct page *page) +/* + * nfs_page_find_head_request - find head request associated with @page + * + * returns matching head request with reference held, or NULL if not found. + */ +static struct nfs_page *nfs_page_find_head_request(struct page *page) { struct inode *inode = page_file_mapping(page)->host; struct nfs_page *req = NULL; spin_lock(&inode->i_lock); - req = nfs_page_find_request_locked(NFS_I(inode), page); + req = nfs_page_find_head_request_locked(NFS_I(inode), page); spin_unlock(&inode->i_lock); return req; } @@ -274,36 +290,246 @@ static void nfs_end_page_writeback(struct nfs_page *req) clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); } -static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock) + +/* nfs_page_group_clear_bits + * @req - an nfs request + * clears all page group related bits from @req + */ +static void +nfs_page_group_clear_bits(struct nfs_page *req) +{ + clear_bit(PG_TEARDOWN, &req->wb_flags); + clear_bit(PG_UNLOCKPAGE, &req->wb_flags); + clear_bit(PG_UPTODATE, &req->wb_flags); + clear_bit(PG_WB_END, &req->wb_flags); + clear_bit(PG_REMOVE, &req->wb_flags); +} + + +/* + * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req + * + * this is a helper function for nfs_lock_and_join_requests + * + * @inode - inode associated with request page group, must be holding inode lock + * @head - head request of page group, must be holding head lock + * @req - request that couldn't lock and needs to wait on the req bit lock + * @nonblock - if true, don't actually wait + * + * NOTE: this must be called holding page_group bit lock and inode spin lock + * and BOTH will be released before returning. + * + * returns 0 on success, < 0 on error. + */ +static int +nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head, + struct nfs_page *req, bool nonblock) + __releases(&inode->i_lock) +{ + struct nfs_page *tmp; + int ret; + + /* relinquish all the locks successfully grabbed this run */ + for (tmp = head ; tmp != req; tmp = tmp->wb_this_page) + nfs_unlock_request(tmp); + + WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); + + /* grab a ref on the request that will be waited on */ + kref_get(&req->wb_kref); + + nfs_page_group_unlock(head); + spin_unlock(&inode->i_lock); + + /* release ref from nfs_page_find_head_request_locked */ + nfs_release_request(head); + + if (!nonblock) + ret = nfs_wait_on_request(req); + else + ret = -EAGAIN; + nfs_release_request(req); + + return ret; +} + +/* + * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests + * + * @destroy_list - request list (using wb_this_page) terminated by @old_head + * @old_head - the old head of the list + * + * All subrequests must be locked and removed from all lists, so at this point + * they are only "active" in this function, and possibly in nfs_wait_on_request + * with a reference held by some other context. + */ +static void +nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, + struct nfs_page *old_head) +{ + while (destroy_list) { + struct nfs_page *subreq = destroy_list; + + destroy_list = (subreq->wb_this_page == old_head) ? + NULL : subreq->wb_this_page; + + WARN_ON_ONCE(old_head != subreq->wb_head); + + /* make sure old group is not used */ + subreq->wb_head = subreq; + subreq->wb_this_page = subreq; + + nfs_clear_request_commit(subreq); + + /* subreq is now totally disconnected from page group or any + * write / commit lists. last chance to wake any waiters */ + nfs_unlock_request(subreq); + + if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) { + /* release ref on old head request */ + nfs_release_request(old_head); + + nfs_page_group_clear_bits(subreq); + + /* release the PG_INODE_REF reference */ + if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) + nfs_release_request(subreq); + else + WARN_ON_ONCE(1); + } else { + WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags)); + /* zombie requests have already released the last + * reference and were waiting on the rest of the + * group to complete. Since it's no longer part of a + * group, simply free the request */ + nfs_page_group_clear_bits(subreq); + nfs_free_request(subreq); + } + } +} + +/* + * nfs_lock_and_join_requests - join all subreqs to the head req and return + * a locked reference, cancelling any pending + * operations for this page. + * + * @page - the page used to lookup the "page group" of nfs_page structures + * @nonblock - if true, don't block waiting for request locks + * + * This function joins all sub requests to the head request by first + * locking all requests in the group, cancelling any pending operations + * and finally updating the head request to cover the whole range covered by + * the (former) group. All subrequests are removed from any write or commit + * lists, unlinked from the group and destroyed. + * + * Returns a locked, referenced pointer to the head request - which after + * this call is guaranteed to be the only request associated with the page. + * Returns NULL if no requests are found for @page, or a ERR_PTR if an + * error was encountered. + */ +static struct nfs_page * +nfs_lock_and_join_requests(struct page *page, bool nonblock) { struct inode *inode = page_file_mapping(page)->host; - struct nfs_page *req; + struct nfs_page *head, *subreq; + struct nfs_page *destroy_list = NULL; + unsigned int total_bytes; int ret; +try_again: + total_bytes = 0; + + WARN_ON_ONCE(destroy_list); + spin_lock(&inode->i_lock); - for (;;) { - req = nfs_page_find_request_locked(NFS_I(inode), page); - if (req == NULL) - break; - if (nfs_lock_request(req)) - break; - /* Note: If we hold the page lock, as is the case in nfs_writepage, - * then the call to nfs_lock_request() will always - * succeed provided that someone hasn't already marked the - * request as dirty (in which case we don't care). - */ + + /* + * A reference is taken only on the head request which acts as a + * reference to the whole page group - the group will not be destroyed + * until the head reference is released. + */ + head = nfs_page_find_head_request_locked(NFS_I(inode), page); + + if (!head) { spin_unlock(&inode->i_lock); - if (!nonblock) - ret = nfs_wait_on_request(req); - else - ret = -EAGAIN; - nfs_release_request(req); - if (ret != 0) + return NULL; + } + + /* lock each request in the page group */ + nfs_page_group_lock(head); + subreq = head; + do { + /* + * Subrequests are always contiguous, non overlapping + * and in order. If not, it's a programming error. + */ + WARN_ON_ONCE(subreq->wb_offset != + (head->wb_offset + total_bytes)); + + /* keep track of how many bytes this group covers */ + total_bytes += subreq->wb_bytes; + + if (!nfs_lock_request(subreq)) { + /* releases page group bit lock and + * inode spin lock and all references */ + ret = nfs_unroll_locks_and_wait(inode, head, + subreq, nonblock); + + if (ret == 0) + goto try_again; + return ERR_PTR(ret); - spin_lock(&inode->i_lock); + } + + subreq = subreq->wb_this_page; + } while (subreq != head); + + /* Now that all requests are locked, make sure they aren't on any list. + * Commit list removal accounting is done after locks are dropped */ + subreq = head; + do { + nfs_list_remove_request(subreq); + subreq = subreq->wb_this_page; + } while (subreq != head); + + /* unlink subrequests from head, destroy them later */ + if (head->wb_this_page != head) { + /* destroy list will be terminated by head */ + destroy_list = head->wb_this_page; + head->wb_this_page = head; + + /* change head request to cover whole range that + * the former page group covered */ + head->wb_bytes = total_bytes; } + + /* + * prepare head request to be added to new pgio descriptor + */ + nfs_page_group_clear_bits(head); + + /* + * some part of the group was still on the inode list - otherwise + * the group wouldn't be involved in async write. + * grab a reference for the head request, iff it needs one. + */ + if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags)) + kref_get(&head->wb_kref); + + nfs_page_group_unlock(head); + + /* drop lock to clear_request_commit the head req and clean up + * requests on destroy list */ spin_unlock(&inode->i_lock); - return req; + + nfs_destroy_unlinked_subrequests(destroy_list, head); + + /* clean up commit list state */ + nfs_clear_request_commit(head); + + /* still holds ref on head from nfs_page_find_head_request_locked + * and still has lock on head from lock loop */ + return head; } /* @@ -316,7 +542,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, struct nfs_page *req; int ret = 0; - req = nfs_find_and_lock_request(page, nonblock); + req = nfs_lock_and_join_requests(page, nonblock); if (!req) goto out; ret = PTR_ERR(req); @@ -448,7 +674,9 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) set_page_private(req->wb_page, (unsigned long)req); } nfsi->npages++; - set_bit(PG_INODE_REF, &req->wb_flags); + /* this a head request for a page group - mark it as having an + * extra reference so sub groups can follow suit */ + WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); kref_get(&req->wb_kref); spin_unlock(&inode->i_lock); } @@ -474,7 +702,9 @@ static void nfs_inode_remove_request(struct nfs_page *req) nfsi->npages--; spin_unlock(&inode->i_lock); } - nfs_release_request(req); + + if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) + nfs_release_request(req); } static void @@ -638,7 +868,6 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr) { struct nfs_commit_info cinfo; unsigned long bytes = 0; - bool do_destroy; if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) goto out; @@ -668,7 +897,6 @@ remove_req: next: nfs_unlock_request(req); nfs_end_page_writeback(req); - do_destroy = !test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags); nfs_release_request(req); } out: @@ -769,7 +997,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, spin_lock(&inode->i_lock); for (;;) { - req = nfs_page_find_request_locked(NFS_I(inode), page); + req = nfs_page_find_head_request_locked(NFS_I(inode), page); if (req == NULL) goto out_unlock; @@ -877,7 +1105,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page) * dropped page. */ do { - req = nfs_page_find_request(page); + req = nfs_page_find_head_request(page); if (req == NULL) return 0; l_ctx = req->wb_lock_context; @@ -1569,27 +1797,28 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) struct nfs_page *req; int ret = 0; - for (;;) { - wait_on_page_writeback(page); - req = nfs_page_find_request(page); - if (req == NULL) - break; - if (nfs_lock_request(req)) { - nfs_clear_request_commit(req); - nfs_inode_remove_request(req); - /* - * In case nfs_inode_remove_request has marked the - * page as being dirty - */ - cancel_dirty_page(page, PAGE_CACHE_SIZE); - nfs_unlock_and_release_request(req); - break; - } - ret = nfs_wait_on_request(req); - nfs_release_request(req); - if (ret < 0) - break; + wait_on_page_writeback(page); + + /* blocking call to cancel all requests and join to a single (head) + * request */ + req = nfs_lock_and_join_requests(page, false); + + if (IS_ERR(req)) { + ret = PTR_ERR(req); + } else if (req) { + /* all requests from this page have been cancelled by + * nfs_lock_and_join_requests, so just remove the head + * request from the inode / page_private pointer and + * release it */ + nfs_inode_remove_request(req); + /* + * In case nfs_inode_remove_request has marked the + * page as being dirty + */ + cancel_dirty_page(page, PAGE_CACHE_SIZE); + nfs_unlock_and_release_request(req); } + return ret; } diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index b56b1cc02718..944275c8f56d 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2879,6 +2879,7 @@ again: * return the conflicting open: */ if (conf->len) { + kfree(conf->data); conf->len = 0; conf->data = NULL; goto again; @@ -2891,6 +2892,7 @@ again: if (conf->len) { p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8); p = xdr_encode_opaque(p, conf->data, conf->len); + kfree(conf->data); } else { /* non - nfsv4 lock in conflict, no clientid nor owner */ p = xdr_encode_hyper(p, (u64)0); /* clientid */ *p++ = cpu_to_be32(0); /* length of owner name */ @@ -2907,7 +2909,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid); else if (nfserr == nfserr_denied) nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied); - kfree(lock->lk_denied.ld_owner.data); + return nfserr; } diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 9cd5f63715c0..7f30bdc57d13 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -702,6 +702,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) struct dquot *dquot; unsigned long freed = 0; + spin_lock(&dq_list_lock); head = free_dquots.prev; while (head != &free_dquots && sc->nr_to_scan) { dquot = list_entry(head, struct dquot, dq_free); @@ -713,6 +714,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) freed++; head = free_dquots.prev; } + spin_unlock(&dq_list_lock); return freed; } diff --git a/fs/xattr.c b/fs/xattr.c index 3377dff18404..c69e6d43a0d2 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -843,7 +843,7 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size) /* wrap around? */ len = sizeof(*new_xattr) + size; - if (len <= sizeof(*new_xattr)) + if (len < sizeof(*new_xattr)) return NULL; new_xattr = kmalloc(len, GFP_KERNEL); diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 96175df211b1..75c3fe5f3d9d 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -4298,8 +4298,8 @@ xfs_bmapi_delay( } -int -__xfs_bmapi_allocate( +static int +xfs_bmapi_allocate( struct xfs_bmalloca *bma) { struct xfs_mount *mp = bma->ip->i_mount; @@ -4578,9 +4578,6 @@ xfs_bmapi_write( bma.flist = flist; bma.firstblock = firstblock; - if (flags & XFS_BMAPI_STACK_SWITCH) - bma.stack_switch = 1; - while (bno < end && n < *nmap) { inhole = eof || bma.got.br_startoff > bno; wasdelay = !inhole && isnullstartblock(bma.got.br_startblock); diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h index 38ba36e9b2f0..b879ca56a64c 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/xfs_bmap.h @@ -77,7 +77,6 @@ typedef struct xfs_bmap_free * from written to unwritten, otherwise convert from unwritten to written. */ #define XFS_BMAPI_CONVERT 0x040 -#define XFS_BMAPI_STACK_SWITCH 0x080 #define XFS_BMAPI_FLAGS \ { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ @@ -86,8 +85,7 @@ typedef struct xfs_bmap_free { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ { XFS_BMAPI_CONTIG, "CONTIG" }, \ - { XFS_BMAPI_CONVERT, "CONVERT" }, \ - { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" } + { XFS_BMAPI_CONVERT, "CONVERT" } static inline int xfs_bmapi_aflag(int w) diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 703b3ec1796c..64731ef3324d 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -249,59 +249,6 @@ xfs_bmap_rtalloc( } /* - * Stack switching interfaces for allocation - */ -static void -xfs_bmapi_allocate_worker( - struct work_struct *work) -{ - struct xfs_bmalloca *args = container_of(work, - struct xfs_bmalloca, work); - unsigned long pflags; - unsigned long new_pflags = PF_FSTRANS; - - /* - * we are in a transaction context here, but may also be doing work - * in kswapd context, and hence we may need to inherit that state - * temporarily to ensure that we don't block waiting for memory reclaim - * in any way. - */ - if (args->kswapd) - new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; - - current_set_flags_nested(&pflags, new_pflags); - - args->result = __xfs_bmapi_allocate(args); - complete(args->done); - - current_restore_flags_nested(&pflags, new_pflags); -} - -/* - * Some allocation requests often come in with little stack to work on. Push - * them off to a worker thread so there is lots of stack to use. Otherwise just - * call directly to avoid the context switch overhead here. - */ -int -xfs_bmapi_allocate( - struct xfs_bmalloca *args) -{ - DECLARE_COMPLETION_ONSTACK(done); - - if (!args->stack_switch) - return __xfs_bmapi_allocate(args); - - - args->done = &done; - args->kswapd = current_is_kswapd(); - INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker); - queue_work(xfs_alloc_wq, &args->work); - wait_for_completion(&done); - destroy_work_on_stack(&args->work); - return args->result; -} - -/* * Check if the endoff is outside the last extent. If so the caller will grow * the allocation to a stripe unit boundary. All offsets are considered outside * the end of file for an empty fork, so 1 is returned in *eof in that case. diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h index 075f72232a64..2fdb72d2c908 100644 --- a/fs/xfs/xfs_bmap_util.h +++ b/fs/xfs/xfs_bmap_util.h @@ -55,8 +55,6 @@ struct xfs_bmalloca { bool userdata;/* set if is user data */ bool aeof; /* allocated space at eof */ bool conv; /* overwriting unwritten extents */ - bool stack_switch; - bool kswapd; /* allocation in kswapd context */ int flags; struct completion *done; struct work_struct work; @@ -66,8 +64,6 @@ struct xfs_bmalloca { int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist, int *committed); int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); -int xfs_bmapi_allocate(struct xfs_bmalloca *args); -int __xfs_bmapi_allocate(struct xfs_bmalloca *args); int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, int whichfork, int *eof); int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip, diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index bf810c6baf2b..cf893bc1e373 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -33,6 +33,7 @@ #include "xfs_error.h" #include "xfs_trace.h" #include "xfs_cksum.h" +#include "xfs_alloc.h" /* * Cursor allocation zone. @@ -2323,7 +2324,7 @@ error1: * record (to be inserted into parent). */ STATIC int /* error */ -xfs_btree_split( +__xfs_btree_split( struct xfs_btree_cur *cur, int level, union xfs_btree_ptr *ptrp, @@ -2503,6 +2504,85 @@ error0: return error; } +struct xfs_btree_split_args { + struct xfs_btree_cur *cur; + int level; + union xfs_btree_ptr *ptrp; + union xfs_btree_key *key; + struct xfs_btree_cur **curp; + int *stat; /* success/failure */ + int result; + bool kswapd; /* allocation in kswapd context */ + struct completion *done; + struct work_struct work; +}; + +/* + * Stack switching interfaces for allocation + */ +static void +xfs_btree_split_worker( + struct work_struct *work) +{ + struct xfs_btree_split_args *args = container_of(work, + struct xfs_btree_split_args, work); + unsigned long pflags; + unsigned long new_pflags = PF_FSTRANS; + + /* + * we are in a transaction context here, but may also be doing work + * in kswapd context, and hence we may need to inherit that state + * temporarily to ensure that we don't block waiting for memory reclaim + * in any way. + */ + if (args->kswapd) + new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; + + current_set_flags_nested(&pflags, new_pflags); + + args->result = __xfs_btree_split(args->cur, args->level, args->ptrp, + args->key, args->curp, args->stat); + complete(args->done); + + current_restore_flags_nested(&pflags, new_pflags); +} + +/* + * BMBT split requests often come in with little stack to work on. Push + * them off to a worker thread so there is lots of stack to use. For the other + * btree types, just call directly to avoid the context switch overhead here. + */ +STATIC int /* error */ +xfs_btree_split( + struct xfs_btree_cur *cur, + int level, + union xfs_btree_ptr *ptrp, + union xfs_btree_key *key, + struct xfs_btree_cur **curp, + int *stat) /* success/failure */ +{ + struct xfs_btree_split_args args; + DECLARE_COMPLETION_ONSTACK(done); + + if (cur->bc_btnum != XFS_BTNUM_BMAP) + return __xfs_btree_split(cur, level, ptrp, key, curp, stat); + + args.cur = cur; + args.level = level; + args.ptrp = ptrp; + args.key = key; + args.curp = curp; + args.stat = stat; + args.done = &done; + args.kswapd = current_is_kswapd(); + INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker); + queue_work(xfs_alloc_wq, &args.work); + wait_for_completion(&done); + destroy_work_on_stack(&args.work); + return args.result; +} + + /* * Copy the old inode root contents into a real block and make the * broot point to it. diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 6c5eb4c551e3..6d3ec2b6ee29 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -749,8 +749,7 @@ xfs_iomap_write_allocate( * pointer that the caller gave to us. */ error = xfs_bmapi_write(tp, ip, map_start_fsb, - count_fsb, - XFS_BMAPI_STACK_SWITCH, + count_fsb, 0, &first_block, 1, imap, &nimaps, &free_list); if (error) diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c index c3453b11f563..7703fa6770ff 100644 --- a/fs/xfs/xfs_sb.c +++ b/fs/xfs/xfs_sb.c @@ -483,10 +483,16 @@ xfs_sb_quota_to_disk( } /* - * GQUOTINO and PQUOTINO cannot be used together in versions - * of superblock that do not have pquotino. from->sb_flags - * tells us which quota is active and should be copied to - * disk. + * GQUOTINO and PQUOTINO cannot be used together in versions of + * superblock that do not have pquotino. from->sb_flags tells us which + * quota is active and should be copied to disk. If neither are active, + * make sure we write NULLFSINO to the sb_gquotino field as a quota + * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature + * bit is set. + * + * Note that we don't need to handle the sb_uquotino or sb_pquotino here + * as they do not require any translation. Hence the main sb field loop + * will write them appropriately from the in-core superblock. */ if ((*fields & XFS_SB_GQUOTINO) && (from->sb_qflags & XFS_GQUOTA_ACCT)) @@ -494,6 +500,17 @@ xfs_sb_quota_to_disk( else if ((*fields & XFS_SB_PQUOTINO) && (from->sb_qflags & XFS_PQUOTA_ACCT)) to->sb_gquotino = cpu_to_be64(from->sb_pquotino); + else { + /* + * We can't rely on just the fields being logged to tell us + * that it is safe to write NULLFSINO - we should only do that + * if quotas are not actually enabled. Hence only write + * NULLFSINO if both in-core quota inodes are NULL. + */ + if (from->sb_gquotino == NULLFSINO && + from->sb_pquotino == NULLFSINO) + to->sb_gquotino = cpu_to_be64(NULLFSINO); + } *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO); } diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index ec4112d257bc..8f8ae95c6e27 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -482,8 +482,8 @@ extern struct cpufreq_governor cpufreq_gov_conservative; *********************************************************************/ /* Special Values of .frequency field */ -#define CPUFREQ_ENTRY_INVALID ~0 -#define CPUFREQ_TABLE_END ~1 +#define CPUFREQ_ENTRY_INVALID ~0u +#define CPUFREQ_TABLE_END ~1u /* Special Values of .flags field */ #define CPUFREQ_BOOST_FREQ (1 << 0) diff --git a/include/linux/libata.h b/include/linux/libata.h index 5ab4e3a76721..92abb497ab14 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -593,6 +593,7 @@ struct ata_host { struct device *dev; void __iomem * const *iomap; unsigned int n_ports; + unsigned int n_tags; /* nr of NCQ tags */ void *private_data; struct ata_port_operations *ops; unsigned long flags; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index b12f4bbd064c..35b51e7af886 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -578,8 +578,6 @@ struct mlx4_cq { u32 cons_index; u16 irq; - bool irq_affinity_change; - __be32 *set_ci_db; __be32 *arm_db; int arm_sn; @@ -1167,6 +1165,8 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, int *vector); void mlx4_release_eq(struct mlx4_dev *dev, int vec); +int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec); + int mlx4_get_phys_port_id(struct mlx4_dev *dev); int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 11692dea18aa..42aa9b9ecd5f 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -17,6 +17,7 @@ #include <linux/lockdep.h> #include <linux/atomic.h> #include <asm/processor.h> +#include <linux/osq_lock.h> /* * Simple, straightforward mutexes with strict semantics: @@ -46,7 +47,6 @@ * - detects multi-task circular deadlocks and prints out all affected * locks and tasks (and only those tasks) */ -struct optimistic_spin_queue; struct mutex { /* 1: unlocked, 0: locked, negative: locked, possible waiters */ atomic_t count; @@ -56,7 +56,7 @@ struct mutex { struct task_struct *owner; #endif #ifdef CONFIG_MUTEX_SPIN_ON_OWNER - struct optimistic_spin_queue *osq; /* Spinner MCS lock */ + struct optimistic_spin_queue osq; /* Spinner MCS lock */ #endif #ifdef CONFIG_DEBUG_MUTEXES const char *name; diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index a70c9493d55a..d449018d0726 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -25,9 +25,6 @@ struct phy_device *of_phy_attach(struct net_device *dev, extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); -extern void of_mdiobus_link_phydev(struct mii_bus *mdio, - struct phy_device *phydev); - #else /* CONFIG_OF */ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) { @@ -63,11 +60,6 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np) { return NULL; } - -static inline void of_mdiobus_link_phydev(struct mii_bus *mdio, - struct phy_device *phydev) -{ -} #endif /* CONFIG_OF */ #if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY) diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h new file mode 100644 index 000000000000..90230d5811c5 --- /dev/null +++ b/include/linux/osq_lock.h @@ -0,0 +1,27 @@ +#ifndef __LINUX_OSQ_LOCK_H +#define __LINUX_OSQ_LOCK_H + +/* + * An MCS like lock especially tailored for optimistic spinning for sleeping + * lock implementations (mutex, rwsem, etc). + */ + +#define OSQ_UNLOCKED_VAL (0) + +struct optimistic_spin_queue { + /* + * Stores an encoded value of the CPU # of the tail node in the queue. + * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL. + */ + atomic_t tail; +}; + +/* Init macro and function. */ +#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) } + +static inline void osq_lock_init(struct optimistic_spin_queue *lock) +{ + atomic_set(&lock->tail, OSQ_UNLOCKED_VAL); +} + +#endif diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 0a97b583ee8d..e1474ae18c88 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -399,6 +399,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping, } /* + * Get the offset in PAGE_SIZE. + * (TODO: hugepage should have ->index in PAGE_SIZE) + */ +static inline pgoff_t page_to_pgoff(struct page *page) +{ + if (unlikely(PageHeadHuge(page))) + return page->index << compound_order(page); + else + return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); +} + +/* * Return byte-offset into filesystem object for page. */ static inline loff_t page_offset(struct page *page) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 5a75d19aa661..6a94cc8b1ca0 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -44,7 +44,6 @@ #include <linux/debugobjects.h> #include <linux/bug.h> #include <linux/compiler.h> -#include <linux/percpu.h> #include <asm/barrier.h> extern int rcu_expedited; /* for sysctl */ @@ -300,41 +299,6 @@ bool __rcu_is_watching(void); #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ /* - * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings. - */ - -#define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */ -DECLARE_PER_CPU(int, rcu_cond_resched_count); -void rcu_resched(void); - -/* - * Is it time to report RCU quiescent states? - * - * Note unsynchronized access to rcu_cond_resched_count. Yes, we might - * increment some random CPU's count, and possibly also load the result from - * yet another CPU's count. We might even clobber some other CPU's attempt - * to zero its counter. This is all OK because the goal is not precision, - * but rather reasonable amortization of rcu_note_context_switch() overhead - * and extremely high probability of avoiding RCU CPU stall warnings. - * Note that this function has to be preempted in just the wrong place, - * many thousands of times in a row, for anything bad to happen. - */ -static inline bool rcu_should_resched(void) -{ - return raw_cpu_inc_return(rcu_cond_resched_count) >= - RCU_COND_RESCHED_LIM; -} - -/* - * Report quiscent states to RCU if it is time to do so. - */ -static inline void rcu_cond_resched(void) -{ - if (unlikely(rcu_should_resched())) - rcu_resched(); -} - -/* * Infrastructure to implement the synchronize_() primitives in * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. */ @@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf); * initialization. */ #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD +void init_rcu_head(struct rcu_head *head); +void destroy_rcu_head(struct rcu_head *head); void init_rcu_head_on_stack(struct rcu_head *head); void destroy_rcu_head_on_stack(struct rcu_head *head); #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ +static inline void init_rcu_head(struct rcu_head *head) +{ +} + +static inline void destroy_rcu_head(struct rcu_head *head) +{ +} + static inline void init_rcu_head_on_stack(struct rcu_head *head) { } diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h index d5b13bc07a0b..561e8615528d 100644 --- a/include/linux/rwsem-spinlock.h +++ b/include/linux/rwsem-spinlock.h @@ -15,13 +15,13 @@ #ifdef __KERNEL__ /* * the rw-semaphore definition - * - if activity is 0 then there are no active readers or writers - * - if activity is +ve then that is the number of active readers - * - if activity is -1 then there is one active writer + * - if count is 0 then there are no active readers or writers + * - if count is +ve then that is the number of active readers + * - if count is -1 then there is one active writer * - if wait_list is not empty, then there are processes waiting for the semaphore */ struct rw_semaphore { - __s32 activity; + __s32 count; raw_spinlock_t wait_lock; struct list_head wait_list; #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 8d79708146aa..035d3c57fc8a 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -13,10 +13,11 @@ #include <linux/kernel.h> #include <linux/list.h> #include <linux/spinlock.h> - #include <linux/atomic.h> +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER +#include <linux/osq_lock.h> +#endif -struct optimistic_spin_queue; struct rw_semaphore; #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK @@ -25,15 +26,15 @@ struct rw_semaphore; /* All arch specific implementations share the same struct */ struct rw_semaphore { long count; - raw_spinlock_t wait_lock; struct list_head wait_list; -#ifdef CONFIG_SMP + raw_spinlock_t wait_lock; +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER + struct optimistic_spin_queue osq; /* spinner MCS lock */ /* * Write owner. Used as a speculative check to see * if the owner is running on the cpu. */ struct task_struct *owner; - struct optimistic_spin_queue *osq; /* spinner MCS lock */ #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; @@ -64,22 +65,19 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) # define __RWSEM_DEP_MAP_INIT(lockname) #endif -#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK) -#define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, \ - __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ - LIST_HEAD_INIT((name).wait_list), \ - NULL, /* owner */ \ - NULL /* mcs lock */ \ - __RWSEM_DEP_MAP_INIT(name) } +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER +#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL #else -#define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, \ - __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ - LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEP_MAP_INIT(name) } +#define __RWSEM_OPT_INIT(lockname) #endif +#define __RWSEM_INITIALIZER(name) \ + { .count = RWSEM_UNLOCKED_VALUE, \ + .wait_list = LIST_HEAD_INIT((name).wait_list), \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ + __RWSEM_OPT_INIT(name) \ + __RWSEM_DEP_MAP_INIT(name) } + #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) diff --git a/include/linux/sched.h b/include/linux/sched.h index 41a195385081..02757d105cdb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -872,21 +872,21 @@ enum cpu_idle_type { #define SD_NUMA 0x4000 /* cross-node balancing */ #ifdef CONFIG_SCHED_SMT -static inline const int cpu_smt_flags(void) +static inline int cpu_smt_flags(void) { return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; } #endif #ifdef CONFIG_SCHED_MC -static inline const int cpu_core_flags(void) +static inline int cpu_core_flags(void) { return SD_SHARE_PKG_RESOURCES; } #endif #ifdef CONFIG_NUMA -static inline const int cpu_numa_flags(void) +static inline int cpu_numa_flags(void) { return SD_NUMA; } @@ -999,7 +999,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); bool cpus_share_cache(int this_cpu, int that_cpu); typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); -typedef const int (*sched_domain_flags_f)(void); +typedef int (*sched_domain_flags_f)(void); #define SDTL_OVERLAP 0x01 diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 7277caf3743d..47f425464f84 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -203,7 +203,6 @@ struct neigh_table { void (*proxy_redo)(struct sk_buff *skb); char *id; struct neigh_parms parms; - /* HACK. gc_* should follow parms without a gap! */ int gc_interval; int gc_thresh1; int gc_thresh2; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 713b0b88bd5a..c4d86198d3d6 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -6,6 +6,7 @@ #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/nf_tables.h> +#include <linux/u64_stats_sync.h> #include <net/netlink.h> #define NFT_JUMP_STACK_SIZE 16 @@ -528,8 +529,9 @@ enum nft_chain_type { }; struct nft_stats { - u64 bytes; - u64 pkts; + u64 bytes; + u64 pkts; + struct u64_stats_sync syncp; }; #define NFT_HOOK_OPS_MAX 2 diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h index 079030c853d8..e2070960bac0 100644 --- a/include/net/netns/ieee802154_6lowpan.h +++ b/include/net/netns/ieee802154_6lowpan.h @@ -16,7 +16,7 @@ struct netns_sysctl_lowpan { struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl; struct netns_frags frags; - u16 max_dsize; + int max_dsize; }; #endif diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h index 26a394cb91a8..eee608b12cc9 100644 --- a/include/net/netns/nftables.h +++ b/include/net/netns/nftables.h @@ -13,8 +13,8 @@ struct netns_nftables { struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; + unsigned int base_seq; u8 gencursor; - u8 genctr; }; #endif diff --git a/include/net/sock.h b/include/net/sock.h index 173cae485de1..156350745700 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1768,9 +1768,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst) static inline void sk_dst_set(struct sock *sk, struct dst_entry *dst) { - spin_lock(&sk->sk_dst_lock); - __sk_dst_set(sk, dst); - spin_unlock(&sk->sk_dst_lock); + struct dst_entry *old_dst; + + sk_tx_queue_clear(sk); + old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); + dst_release(old_dst); } static inline void @@ -1782,9 +1784,7 @@ __sk_dst_reset(struct sock *sk) static inline void sk_dst_reset(struct sock *sk) { - spin_lock(&sk->sk_dst_lock); - __sk_dst_reset(sk); - spin_unlock(&sk->sk_dst_lock); + sk_dst_set(sk, NULL); } struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 40b5ca8a1b1f..25084a052a1e 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -101,6 +101,7 @@ * - add FATTR_CTIME * - add ctime and ctimensec to fuse_setattr_in * - add FUSE_RENAME2 request + * - add FUSE_NO_OPEN_SUPPORT flag */ #ifndef _LINUX_FUSE_H @@ -229,6 +230,7 @@ struct fuse_file_lock { * FUSE_READDIRPLUS_AUTO: adaptive readdirplus * FUSE_ASYNC_DIO: asynchronous direct I/O submission * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes + * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -247,6 +249,7 @@ struct fuse_file_lock { #define FUSE_READDIRPLUS_AUTO (1 << 14) #define FUSE_ASYNC_DIO (1 << 15) #define FUSE_WRITEBACK_CACHE (1 << 16) +#define FUSE_NO_OPEN_SUPPORT (1 << 17) /** * CUSE INIT request/reply flags diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index 35536d9c0964..76768ee812b2 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks @@ -220,9 +220,16 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE endif +config ARCH_SUPPORTS_ATOMIC_RMW + bool + config MUTEX_SPIN_ON_OWNER def_bool y - depends on SMP && !DEBUG_MUTEXES + depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW + +config RWSEM_SPIN_ON_OWNER + def_bool y + depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW config ARCH_USE_QUEUE_RWLOCK bool diff --git a/kernel/events/core.c b/kernel/events/core.c index a33d9a2bcbd7..6b17ac1b0c2a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2320,7 +2320,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn, next_parent = rcu_dereference(next_ctx->parent_ctx); /* If neither context have a parent context; they cannot be clones. */ - if (!parent && !next_parent) + if (!parent || !next_parent) goto unlock; if (next_parent == ctx || next_ctx == parent || next_parent == parent) { @@ -7458,7 +7458,19 @@ __perf_event_exit_task(struct perf_event *child_event, struct perf_event_context *child_ctx, struct task_struct *child) { - perf_remove_from_context(child_event, true); + /* + * Do not destroy the 'original' grouping; because of the context + * switch optimization the original events could've ended up in a + * random child task. + * + * If we were to destroy the original group, all group related + * operations would cease to function properly after this random + * child dies. + * + * Do destroy all inherited groups, we don't care about those + * and being thorough is better. + */ + perf_remove_from_context(child_event, !!child_event->parent); /* * It can happen that the parent exits first, and has events @@ -7474,7 +7486,7 @@ __perf_event_exit_task(struct perf_event *child_event, static void perf_event_exit_task_context(struct task_struct *child, int ctxn) { struct perf_event *child_event, *next; - struct perf_event_context *child_ctx; + struct perf_event_context *child_ctx, *parent_ctx; unsigned long flags; if (likely(!child->perf_event_ctxp[ctxn])) { @@ -7499,6 +7511,15 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) raw_spin_lock(&child_ctx->lock); task_ctx_sched_out(child_ctx); child->perf_event_ctxp[ctxn] = NULL; + + /* + * In order to avoid freeing: child_ctx->parent_ctx->task + * under perf_event_context::lock, grab another reference. + */ + parent_ctx = child_ctx->parent_ctx; + if (parent_ctx) + get_ctx(parent_ctx); + /* * If this context is a clone; unclone it so it can't get * swapped to another process while we're removing all @@ -7509,6 +7530,13 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) raw_spin_unlock_irqrestore(&child_ctx->lock, flags); /* + * Now that we no longer hold perf_event_context::lock, drop + * our extra child_ctx->parent_ctx reference. + */ + if (parent_ctx) + put_ctx(parent_ctx); + + /* * Report the task dead after unscheduling the events so that we * won't get any samples after PERF_RECORD_EXIT. We can however still * get a few PERF_RECORD_READ events. diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 3214289df5a7..734e9a7d280b 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2037,19 +2037,23 @@ static int __init populate_kprobe_blacklist(unsigned long *start, { unsigned long *iter; struct kprobe_blacklist_entry *ent; - unsigned long offset = 0, size = 0; + unsigned long entry, offset = 0, size = 0; for (iter = start; iter < end; iter++) { - if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) { - pr_err("Failed to find blacklist %p\n", (void *)*iter); + entry = arch_deref_entry_point((void *)*iter); + + if (!kernel_text_address(entry) || + !kallsyms_lookup_size_offset(entry, &size, &offset)) { + pr_err("Failed to find blacklist at %p\n", + (void *)entry); continue; } ent = kmalloc(sizeof(*ent), GFP_KERNEL); if (!ent) return -ENOMEM; - ent->start_addr = *iter; - ent->end_addr = *iter + size; + ent->start_addr = entry; + ent->end_addr = entry + size; INIT_LIST_HEAD(&ent->list); list_add_tail(&ent->list, &kprobe_blacklist); } diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c index 838dc9e00669..be9ee1559fca 100644 --- a/kernel/locking/mcs_spinlock.c +++ b/kernel/locking/mcs_spinlock.c @@ -14,21 +14,47 @@ * called from interrupt context and we have preemption disabled while * spinning. */ -static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node); +static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node); + +/* + * We use the value 0 to represent "no CPU", thus the encoded value + * will be the CPU number incremented by 1. + */ +static inline int encode_cpu(int cpu_nr) +{ + return cpu_nr + 1; +} + +static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val) +{ + int cpu_nr = encoded_cpu_val - 1; + + return per_cpu_ptr(&osq_node, cpu_nr); +} /* * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. * Can return NULL in case we were the last queued and we updated @lock instead. */ -static inline struct optimistic_spin_queue * -osq_wait_next(struct optimistic_spin_queue **lock, - struct optimistic_spin_queue *node, - struct optimistic_spin_queue *prev) +static inline struct optimistic_spin_node * +osq_wait_next(struct optimistic_spin_queue *lock, + struct optimistic_spin_node *node, + struct optimistic_spin_node *prev) { - struct optimistic_spin_queue *next = NULL; + struct optimistic_spin_node *next = NULL; + int curr = encode_cpu(smp_processor_id()); + int old; + + /* + * If there is a prev node in queue, then the 'old' value will be + * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if + * we're currently last in queue, then the queue will then become empty. + */ + old = prev ? prev->cpu : OSQ_UNLOCKED_VAL; for (;;) { - if (*lock == node && cmpxchg(lock, node, prev) == node) { + if (atomic_read(&lock->tail) == curr && + atomic_cmpxchg(&lock->tail, curr, old) == curr) { /* * We were the last queued, we moved @lock back. @prev * will now observe @lock and will complete its @@ -59,18 +85,23 @@ osq_wait_next(struct optimistic_spin_queue **lock, return next; } -bool osq_lock(struct optimistic_spin_queue **lock) +bool osq_lock(struct optimistic_spin_queue *lock) { - struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); - struct optimistic_spin_queue *prev, *next; + struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); + struct optimistic_spin_node *prev, *next; + int curr = encode_cpu(smp_processor_id()); + int old; node->locked = 0; node->next = NULL; + node->cpu = curr; - node->prev = prev = xchg(lock, node); - if (likely(prev == NULL)) + old = atomic_xchg(&lock->tail, curr); + if (old == OSQ_UNLOCKED_VAL) return true; + prev = decode_cpu(old); + node->prev = prev; ACCESS_ONCE(prev->next) = node; /* @@ -149,20 +180,21 @@ unqueue: return false; } -void osq_unlock(struct optimistic_spin_queue **lock) +void osq_unlock(struct optimistic_spin_queue *lock) { - struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); - struct optimistic_spin_queue *next; + struct optimistic_spin_node *node, *next; + int curr = encode_cpu(smp_processor_id()); /* * Fast path for the uncontended case. */ - if (likely(cmpxchg(lock, node, NULL) == node)) + if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr)) return; /* * Second most likely case. */ + node = this_cpu_ptr(&osq_node); next = xchg(&node->next, NULL); if (next) { ACCESS_ONCE(next->locked) = 1; diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index a2dbac4aca6b..74356dc0ce29 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h @@ -118,12 +118,13 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) * mutex_lock()/rwsem_down_{read,write}() etc. */ -struct optimistic_spin_queue { - struct optimistic_spin_queue *next, *prev; +struct optimistic_spin_node { + struct optimistic_spin_node *next, *prev; int locked; /* 1 if lock acquired */ + int cpu; /* encoded CPU # value */ }; -extern bool osq_lock(struct optimistic_spin_queue **lock); -extern void osq_unlock(struct optimistic_spin_queue **lock); +extern bool osq_lock(struct optimistic_spin_queue *lock); +extern void osq_unlock(struct optimistic_spin_queue *lock); #endif /* __LINUX_MCS_SPINLOCK_H */ diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index bc73d33c6760..acca2c1a3c5e 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -60,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) INIT_LIST_HEAD(&lock->wait_list); mutex_clear_owner(lock); #ifdef CONFIG_MUTEX_SPIN_ON_OWNER - lock->osq = NULL; + osq_lock_init(&lock->osq); #endif debug_mutex_init(lock, name, key); diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c index 9be8a9144978..2c93571162cb 100644 --- a/kernel/locking/rwsem-spinlock.c +++ b/kernel/locking/rwsem-spinlock.c @@ -26,7 +26,7 @@ int rwsem_is_locked(struct rw_semaphore *sem) unsigned long flags; if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { - ret = (sem->activity != 0); + ret = (sem->count != 0); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); } return ret; @@ -46,7 +46,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, debug_check_no_locks_freed((void *)sem, sizeof(*sem)); lockdep_init_map(&sem->dep_map, name, key, 0); #endif - sem->activity = 0; + sem->count = 0; raw_spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); } @@ -95,7 +95,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) waiter = list_entry(next, struct rwsem_waiter, list); } while (waiter->type != RWSEM_WAITING_FOR_WRITE); - sem->activity += woken; + sem->count += woken; out: return sem; @@ -126,9 +126,9 @@ void __sched __down_read(struct rw_semaphore *sem) raw_spin_lock_irqsave(&sem->wait_lock, flags); - if (sem->activity >= 0 && list_empty(&sem->wait_list)) { + if (sem->count >= 0 && list_empty(&sem->wait_list)) { /* granted */ - sem->activity++; + sem->count++; raw_spin_unlock_irqrestore(&sem->wait_lock, flags); goto out; } @@ -170,9 +170,9 @@ int __down_read_trylock(struct rw_semaphore *sem) raw_spin_lock_irqsave(&sem->wait_lock, flags); - if (sem->activity >= 0 && list_empty(&sem->wait_list)) { + if (sem->count >= 0 && list_empty(&sem->wait_list)) { /* granted */ - sem->activity++; + sem->count++; ret = 1; } @@ -206,7 +206,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) * itself into sleep and waiting for system woke it or someone * else in the head of the wait list up. */ - if (sem->activity == 0) + if (sem->count == 0) break; set_task_state(tsk, TASK_UNINTERRUPTIBLE); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); @@ -214,7 +214,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) raw_spin_lock_irqsave(&sem->wait_lock, flags); } /* got the lock */ - sem->activity = -1; + sem->count = -1; list_del(&waiter.list); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); @@ -235,9 +235,9 @@ int __down_write_trylock(struct rw_semaphore *sem) raw_spin_lock_irqsave(&sem->wait_lock, flags); - if (sem->activity == 0) { + if (sem->count == 0) { /* got the lock */ - sem->activity = -1; + sem->count = -1; ret = 1; } @@ -255,7 +255,7 @@ void __up_read(struct rw_semaphore *sem) raw_spin_lock_irqsave(&sem->wait_lock, flags); - if (--sem->activity == 0 && !list_empty(&sem->wait_list)) + if (--sem->count == 0 && !list_empty(&sem->wait_list)) sem = __rwsem_wake_one_writer(sem); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); @@ -270,7 +270,7 @@ void __up_write(struct rw_semaphore *sem) raw_spin_lock_irqsave(&sem->wait_lock, flags); - sem->activity = 0; + sem->count = 0; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 1); @@ -287,7 +287,7 @@ void __downgrade_write(struct rw_semaphore *sem) raw_spin_lock_irqsave(&sem->wait_lock, flags); - sem->activity = 1; + sem->count = 1; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 0); diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index dacc32142fcc..a2391ac135c8 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -82,9 +82,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, sem->count = RWSEM_UNLOCKED_VALUE; raw_spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); -#ifdef CONFIG_SMP +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER sem->owner = NULL; - sem->osq = NULL; + osq_lock_init(&sem->osq); #endif } @@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) return false; } -#ifdef CONFIG_SMP +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER /* * Try to acquire write lock before the writer has been put on wait queue. */ @@ -285,10 +285,10 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) { struct task_struct *owner; - bool on_cpu = true; + bool on_cpu = false; if (need_resched()) - return 0; + return false; rcu_read_lock(); owner = ACCESS_ONCE(sem->owner); @@ -297,9 +297,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) rcu_read_unlock(); /* - * If sem->owner is not set, the rwsem owner may have - * just acquired it and not set the owner yet or the rwsem - * has been released. + * If sem->owner is not set, yet we have just recently entered the + * slowpath, then there is a possibility reader(s) may have the lock. + * To be safe, avoid spinning in these situations. */ return on_cpu; } diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 42f806de49d4..e2d3bc7f03b4 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -12,7 +12,7 @@ #include <linux/atomic.h> -#if defined(CONFIG_SMP) && defined(CONFIG_RWSEM_XCHGADD_ALGORITHM) +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER static inline void rwsem_set_owner(struct rw_semaphore *sem) { sem->owner = current; diff --git a/kernel/power/process.c b/kernel/power/process.c index 0ca8d83e2369..4ee194eb524b 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -186,6 +186,7 @@ void thaw_processes(void) printk("Restarting tasks ... "); + __usermodehelper_set_disable_depth(UMH_FREEZING); thaw_workqueues(); read_lock(&tasklist_lock); diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 4dd8822f732a..ed35a4790afe 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -306,7 +306,7 @@ int suspend_devices_and_enter(suspend_state_t state) error = suspend_ops->begin(state); if (error) goto Close; - } else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) { + } else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) { error = freeze_ops->begin(); if (error) goto Close; @@ -335,7 +335,7 @@ int suspend_devices_and_enter(suspend_state_t state) Close: if (need_suspend_ops(state) && suspend_ops->end) suspend_ops->end(); - else if (state == PM_SUSPEND_FREEZE && freeze_ops->end) + else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end) freeze_ops->end(); return error; diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f1ba77363fbb..625d0b0cd75a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -206,6 +206,70 @@ void rcu_bh_qs(int cpu) rdp->passed_quiesce = 1; } +static DEFINE_PER_CPU(int, rcu_sched_qs_mask); + +static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { + .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, + .dynticks = ATOMIC_INIT(1), +#ifdef CONFIG_NO_HZ_FULL_SYSIDLE + .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE, + .dynticks_idle = ATOMIC_INIT(1), +#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ +}; + +/* + * Let the RCU core know that this CPU has gone through the scheduler, + * which is a quiescent state. This is called when the need for a + * quiescent state is urgent, so we burn an atomic operation and full + * memory barriers to let the RCU core know about it, regardless of what + * this CPU might (or might not) do in the near future. + * + * We inform the RCU core by emulating a zero-duration dyntick-idle + * period, which we in turn do by incrementing the ->dynticks counter + * by two. + */ +static void rcu_momentary_dyntick_idle(void) +{ + unsigned long flags; + struct rcu_data *rdp; + struct rcu_dynticks *rdtp; + int resched_mask; + struct rcu_state *rsp; + + local_irq_save(flags); + + /* + * Yes, we can lose flag-setting operations. This is OK, because + * the flag will be set again after some delay. + */ + resched_mask = raw_cpu_read(rcu_sched_qs_mask); + raw_cpu_write(rcu_sched_qs_mask, 0); + + /* Find the flavor that needs a quiescent state. */ + for_each_rcu_flavor(rsp) { + rdp = raw_cpu_ptr(rsp->rda); + if (!(resched_mask & rsp->flavor_mask)) + continue; + smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */ + if (ACCESS_ONCE(rdp->mynode->completed) != + ACCESS_ONCE(rdp->cond_resched_completed)) + continue; + + /* + * Pretend to be momentarily idle for the quiescent state. + * This allows the grace-period kthread to record the + * quiescent state, with no need for this CPU to do anything + * further. + */ + rdtp = this_cpu_ptr(&rcu_dynticks); + smp_mb__before_atomic(); /* Earlier stuff before QS. */ + atomic_add(2, &rdtp->dynticks); /* QS. */ + smp_mb__after_atomic(); /* Later stuff after QS. */ + break; + } + local_irq_restore(flags); +} + /* * Note a context switch. This is a quiescent state for RCU-sched, * and requires special handling for preemptible RCU. @@ -216,19 +280,12 @@ void rcu_note_context_switch(int cpu) trace_rcu_utilization(TPS("Start context switch")); rcu_sched_qs(cpu); rcu_preempt_note_context_switch(cpu); + if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) + rcu_momentary_dyntick_idle(); trace_rcu_utilization(TPS("End context switch")); } EXPORT_SYMBOL_GPL(rcu_note_context_switch); -static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { - .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, - .dynticks = ATOMIC_INIT(1), -#ifdef CONFIG_NO_HZ_FULL_SYSIDLE - .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE, - .dynticks_idle = ATOMIC_INIT(1), -#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ -}; - static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ static long qhimark = 10000; /* If this many pending, ignore blimit. */ static long qlowmark = 100; /* Once only this many pending, use blimit. */ @@ -243,6 +300,13 @@ static ulong jiffies_till_next_fqs = ULONG_MAX; module_param(jiffies_till_first_fqs, ulong, 0644); module_param(jiffies_till_next_fqs, ulong, 0644); +/* + * How long the grace period must be before we start recruiting + * quiescent-state help from rcu_note_context_switch(). + */ +static ulong jiffies_till_sched_qs = HZ / 20; +module_param(jiffies_till_sched_qs, ulong, 0644); + static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp); static void force_qs_rnp(struct rcu_state *rsp, @@ -853,6 +917,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, bool *isidle, unsigned long *maxj) { unsigned int curr; + int *rcrmp; unsigned int snap; curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks); @@ -893,27 +958,43 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, } /* - * There is a possibility that a CPU in adaptive-ticks state - * might run in the kernel with the scheduling-clock tick disabled - * for an extended time period. Invoke rcu_kick_nohz_cpu() to - * force the CPU to restart the scheduling-clock tick in this - * CPU is in this state. - */ - rcu_kick_nohz_cpu(rdp->cpu); - - /* - * Alternatively, the CPU might be running in the kernel - * for an extended period of time without a quiescent state. - * Attempt to force the CPU through the scheduler to gain the - * needed quiescent state, but only if the grace period has gone - * on for an uncommonly long time. If there are many stuck CPUs, - * we will beat on the first one until it gets unstuck, then move - * to the next. Only do this for the primary flavor of RCU. + * A CPU running for an extended time within the kernel can + * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode, + * even context-switching back and forth between a pair of + * in-kernel CPU-bound tasks cannot advance grace periods. + * So if the grace period is old enough, make the CPU pay attention. + * Note that the unsynchronized assignments to the per-CPU + * rcu_sched_qs_mask variable are safe. Yes, setting of + * bits can be lost, but they will be set again on the next + * force-quiescent-state pass. So lost bit sets do not result + * in incorrect behavior, merely in a grace period lasting + * a few jiffies longer than it might otherwise. Because + * there are at most four threads involved, and because the + * updates are only once every few jiffies, the probability of + * lossage (and thus of slight grace-period extension) is + * quite low. + * + * Note that if the jiffies_till_sched_qs boot/sysfs parameter + * is set too high, we override with half of the RCU CPU stall + * warning delay. */ - if (rdp->rsp == rcu_state_p && + rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu); + if (ULONG_CMP_GE(jiffies, + rdp->rsp->gp_start + jiffies_till_sched_qs) || ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { - rdp->rsp->jiffies_resched += 5; - resched_cpu(rdp->cpu); + if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) { + ACCESS_ONCE(rdp->cond_resched_completed) = + ACCESS_ONCE(rdp->mynode->completed); + smp_mb(); /* ->cond_resched_completed before *rcrmp. */ + ACCESS_ONCE(*rcrmp) = + ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask; + resched_cpu(rdp->cpu); /* Force CPU into scheduler. */ + rdp->rsp->jiffies_resched += 5; /* Enable beating. */ + } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { + /* Time to beat on that CPU again! */ + resched_cpu(rdp->cpu); /* Force CPU into scheduler. */ + rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */ + } } return 0; @@ -3491,6 +3572,7 @@ static void __init rcu_init_one(struct rcu_state *rsp, "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */ + static u8 fl_mask = 0x1; int cpustride = 1; int i; int j; @@ -3509,6 +3591,8 @@ static void __init rcu_init_one(struct rcu_state *rsp, for (i = 1; i < rcu_num_lvls; i++) rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1]; rcu_init_levelspread(rsp); + rsp->flavor_mask = fl_mask; + fl_mask <<= 1; /* Initialize the elements themselves, starting from the leaves. */ diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index bf2c1e669691..0f69a79c5b7d 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -307,6 +307,9 @@ struct rcu_data { /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ unsigned long offline_fqs; /* Kicked due to being offline. */ + unsigned long cond_resched_completed; + /* Grace period that needs help */ + /* from cond_resched(). */ /* 5) __rcu_pending() statistics. */ unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ @@ -392,6 +395,7 @@ struct rcu_state { struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */ u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ + u8 flavor_mask; /* bit in flavor mask. */ struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ void (*call)(struct rcu_head *head, /* call_rcu() flavor. */ void (*func)(struct rcu_head *head)); @@ -563,7 +567,7 @@ static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp); static void do_nocb_deferred_wakeup(struct rcu_data *rdp); static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp); -static void rcu_kick_nohz_cpu(int cpu); +static void __maybe_unused rcu_kick_nohz_cpu(int cpu); static bool init_nocb_callback_list(struct rcu_data *rdp); static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq); static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index cbc2c45265e2..02ac0fb186b8 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2404,7 +2404,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp) * if an adaptive-ticks CPU is failing to respond to the current grace * period and has not be idle from an RCU perspective, kick it. */ -static void rcu_kick_nohz_cpu(int cpu) +static void __maybe_unused rcu_kick_nohz_cpu(int cpu) { #ifdef CONFIG_NO_HZ_FULL if (tick_nohz_full_cpu(cpu)) diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index a2aeb4df0f60..bc7883570530 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -200,12 +200,12 @@ void wait_rcu_gp(call_rcu_func_t crf) EXPORT_SYMBOL_GPL(wait_rcu_gp); #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD -static inline void debug_init_rcu_head(struct rcu_head *head) +void init_rcu_head(struct rcu_head *head) { debug_object_init(head, &rcuhead_debug_descr); } -static inline void debug_rcu_head_free(struct rcu_head *head) +void destroy_rcu_head(struct rcu_head *head) { debug_object_free(head, &rcuhead_debug_descr); } @@ -350,21 +350,3 @@ static int __init check_cpu_stall_init(void) early_initcall(check_cpu_stall_init); #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ - -/* - * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings. - */ - -DEFINE_PER_CPU(int, rcu_cond_resched_count); - -/* - * Report a set of RCU quiescent states, for use by cond_resched() - * and friends. Out of line due to being called infrequently. - */ -void rcu_resched(void) -{ - preempt_disable(); - __this_cpu_write(rcu_cond_resched_count, 0); - rcu_note_context_switch(smp_processor_id()); - preempt_enable(); -} diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8705125bb9b1..415ab0268b51 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3573,9 +3573,10 @@ static int _sched_setscheduler(struct task_struct *p, int policy, }; /* - * Fixup the legacy SCHED_RESET_ON_FORK hack + * Fixup the legacy SCHED_RESET_ON_FORK hack, except if + * the policy=-1 was passed by sched_setparam(). */ - if (policy & SCHED_RESET_ON_FORK) { + if ((policy != -1) && (policy & SCHED_RESET_ON_FORK)) { attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; policy &= ~SCHED_RESET_ON_FORK; attr.sched_policy = policy; @@ -4162,7 +4163,6 @@ static void __cond_resched(void) int __sched _cond_resched(void) { - rcu_cond_resched(); if (should_resched()) { __cond_resched(); return 1; @@ -4181,18 +4181,15 @@ EXPORT_SYMBOL(_cond_resched); */ int __cond_resched_lock(spinlock_t *lock) { - bool need_rcu_resched = rcu_should_resched(); int resched = should_resched(); int ret = 0; lockdep_assert_held(lock); - if (spin_needbreak(lock) || resched || need_rcu_resched) { + if (spin_needbreak(lock) || resched) { spin_unlock(lock); if (resched) __cond_resched(); - else if (unlikely(need_rcu_resched)) - rcu_resched(); else cpu_relax(); ret = 1; @@ -4206,7 +4203,6 @@ int __sched __cond_resched_softirq(void) { BUG_ON(!in_softirq()); - rcu_cond_resched(); /* BH disabled OK, just recording QSes. */ if (should_resched()) { local_bh_enable(); __cond_resched(); diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 695f9773bb60..627b3c34b821 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -608,7 +608,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) avg_atom = p->se.sum_exec_runtime; if (nr_switches) - do_div(avg_atom, nr_switches); + avg_atom = div64_ul(avg_atom, nr_switches); else avg_atom = -1LL; diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 88c9c65a430d..fe75444ae7ec 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -585,9 +585,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, struct itimerspec *new_setting, struct itimerspec *old_setting) { + ktime_t exp; + if (!rtcdev) return -ENOTSUPP; + if (flags & ~TIMER_ABSTIME) + return -EINVAL; + if (old_setting) alarm_timer_get(timr, old_setting); @@ -597,8 +602,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, /* start the timer */ timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval); - alarm_start(&timr->it.alarm.alarmtimer, - timespec_to_ktime(new_setting->it_value)); + exp = timespec_to_ktime(new_setting->it_value); + /* Convert (if necessary) to absolute time */ + if (flags != TIMER_ABSTIME) { + ktime_t now; + + now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime(); + exp = ktime_add(now, exp); + } + + alarm_start(&timr->it.alarm.alarmtimer, exp); return 0; } @@ -730,6 +743,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, if (!alarmtimer_get_rtcdev()) return -ENOTSUPP; + if (flags & ~TIMER_ABSTIME) + return -EINVAL; + if (!capable(CAP_WAKE_ALARM)) return -EPERM; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 5b372e3ed675..ac9d1dad630b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -265,12 +265,12 @@ static void update_ftrace_function(void) func = ftrace_ops_list_func; } + update_function_graph_func(); + /* If there's no change, then do nothing more here */ if (ftrace_trace_function == func) return; - update_function_graph_func(); - /* * If we are using the list function, it doesn't care * about the function_trace_ops. diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 7c56c3d06943..ff7027199a9a 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, struct ring_buffer_per_cpu *cpu_buffer; struct rb_irq_work *work; - if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || - (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) - return POLLIN | POLLRDNORM; - if (cpu == RING_BUFFER_ALL_CPUS) work = &buffer->irq_work; else { diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f243444a3772..291397e66669 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -466,6 +466,12 @@ int __trace_puts(unsigned long ip, const char *str, int size) struct print_entry *entry; unsigned long irq_flags; int alloc; + int pc; + + if (!(trace_flags & TRACE_ITER_PRINTK)) + return 0; + + pc = preempt_count(); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; @@ -475,7 +481,7 @@ int __trace_puts(unsigned long ip, const char *str, int size) local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, - irq_flags, preempt_count()); + irq_flags, pc); if (!event) return 0; @@ -492,6 +498,7 @@ int __trace_puts(unsigned long ip, const char *str, int size) entry->buf[size] = '\0'; __buffer_unlock_commit(buffer, event); + ftrace_trace_stack(buffer, irq_flags, 4, pc); return size; } @@ -509,6 +516,12 @@ int __trace_bputs(unsigned long ip, const char *str) struct bputs_entry *entry; unsigned long irq_flags; int size = sizeof(struct bputs_entry); + int pc; + + if (!(trace_flags & TRACE_ITER_PRINTK)) + return 0; + + pc = preempt_count(); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; @@ -516,7 +529,7 @@ int __trace_bputs(unsigned long ip, const char *str) local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, - irq_flags, preempt_count()); + irq_flags, pc); if (!event) return 0; @@ -525,6 +538,7 @@ int __trace_bputs(unsigned long ip, const char *str) entry->str = str; __buffer_unlock_commit(buffer, event); + ftrace_trace_stack(buffer, irq_flags, 4, pc); return 1; } @@ -809,7 +823,7 @@ static struct { { trace_clock_local, "local", 1 }, { trace_clock_global, "global", 1 }, { trace_clock_counter, "counter", 0 }, - { trace_clock_jiffies, "uptime", 1 }, + { trace_clock_jiffies, "uptime", 0 }, { trace_clock, "perf", 1 }, ARCH_TRACE_CLOCKS }; diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 26dc348332b7..57b67b1f24d1 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -59,13 +59,14 @@ u64 notrace trace_clock(void) /* * trace_jiffy_clock(): Simply use jiffies as a clock counter. + * Note that this use of jiffies_64 is not completely safe on + * 32-bit systems. But the window is tiny, and the effect if + * we are affected is that we will have an obviously bogus + * timestamp on a trace event - i.e. not life threatening. */ u64 notrace trace_clock_jiffies(void) { - u64 jiffy = jiffies - INITIAL_JIFFIES; - - /* Return nsecs */ - return (u64)jiffies_to_usecs(jiffy) * 1000ULL; + return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES); } /* diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index f99e0b3bca8c..2de53628689f 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -470,6 +470,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file) list_del(&file->list); remove_subsystem(file->system); + free_event_filter(file->filter); kmem_cache_free(file_cachep, file); } diff --git a/lib/cpumask.c b/lib/cpumask.c index c101230658eb..b6513a9f2892 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -191,7 +191,7 @@ int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) i %= num_online_cpus(); - if (!cpumask_of_node(numa_node)) { + if (numa_node == -1 || !cpumask_of_node(numa_node)) { /* Use all online cpu's for non numa aware system */ cpumask_copy(mask, cpu_online_mask); } else { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2024bbd573d2..9221c02ed9e2 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2604,6 +2604,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, } else { if (cow) huge_ptep_set_wrprotect(src, addr, src_pte); + entry = huge_ptep_get(src_pte); ptepage = pte_page(entry); get_page(ptepage); page_dup_rmap(ptepage); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index c6399e328931..7211a73ba14d 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, if (av == NULL) /* Not actually mapped anymore */ return; - pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + pgoff = page_to_pgoff(page); read_lock(&tasklist_lock); for_each_process (tsk) { struct anon_vma_chain *vmac; @@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, mutex_lock(&mapping->i_mmap_mutex); read_lock(&tasklist_lock); for_each_process(tsk) { - pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + pgoff_t pgoff = page_to_pgoff(page); struct task_struct *t = task_early_kill(tsk, force_early); if (!t) diff --git a/mm/memory.c b/mm/memory.c index d67fd9fcf1f2..7e8d8205b610 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2882,7 +2882,8 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, * if page by the offset is not ready to be mapped (cold cache or * something). */ - if (vma->vm_ops->map_pages && fault_around_pages() > 1) { + if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) && + fault_around_pages() > 1) { pte = pte_offset_map_lock(mm, pmd, address, &ptl); do_fault_around(vma, address, pte, pgoff, flags); if (!pte_same(*pte, orig_pte)) diff --git a/mm/migrate.c b/mm/migrate.c index 9e0beaa91845..be6dbf995c0c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -988,9 +988,10 @@ out: * it. Otherwise, putback_lru_page() will drop the reference grabbed * during isolation. */ - if (rc != MIGRATEPAGE_SUCCESS && put_new_page) + if (rc != MIGRATEPAGE_SUCCESS && put_new_page) { + ClearPageSwapBacked(newpage); put_new_page(newpage, private); - else + } else putback_lru_page(newpage); if (result) { diff --git a/mm/rmap.c b/mm/rmap.c index b7e94ebbd09e..22a4a7699cdb 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma) static inline unsigned long __vma_address(struct page *page, struct vm_area_struct *vma) { - pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - - if (unlikely(is_vm_hugetlb_page(vma))) - pgoff = page->index << huge_page_order(page_hstate(page)); - + pgoff_t pgoff = page_to_pgoff(page); return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); } @@ -1639,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) { struct anon_vma *anon_vma; - pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + pgoff_t pgoff = page_to_pgoff(page); struct anon_vma_chain *avc; int ret = SWAP_AGAIN; @@ -1680,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) { struct address_space *mapping = page->mapping; - pgoff_t pgoff = page->index << compound_order(page); + pgoff_t pgoff = page_to_pgoff(page); struct vm_area_struct *vma; int ret = SWAP_AGAIN; diff --git a/mm/shmem.c b/mm/shmem.c index 1140f49b6ded..af68b15a8fc1 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt; * a time): we would prefer not to enlarge the shmem inode just for that. */ struct shmem_falloc { - int mode; /* FALLOC_FL mode currently operating */ + wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ pgoff_t start; /* start of range currently being fallocated */ pgoff_t next; /* the next page offset to be fallocated */ pgoff_t nr_falloced; /* how many new pages have been fallocated */ @@ -468,23 +468,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, return; index = start; - for ( ; ; ) { + while (index < end) { cond_resched(); pvec.nr = find_get_entries(mapping, index, min(end - index, (pgoff_t)PAGEVEC_SIZE), pvec.pages, indices); if (!pvec.nr) { - if (index == start || unfalloc) + /* If all gone or hole-punch or unfalloc, we're done */ + if (index == start || end != -1) break; + /* But if truncating, restart to make sure all gone */ index = start; continue; } - if ((index == start || unfalloc) && indices[0] >= end) { - pagevec_remove_exceptionals(&pvec); - pagevec_release(&pvec); - break; - } mem_cgroup_uncharge_start(); for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; @@ -496,8 +493,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, if (radix_tree_exceptional_entry(page)) { if (unfalloc) continue; - nr_swaps_freed += !shmem_free_swap(mapping, - index, page); + if (shmem_free_swap(mapping, index, page)) { + /* Swap was replaced by page: retry */ + index--; + break; + } + nr_swaps_freed++; continue; } @@ -506,6 +507,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, if (page->mapping == mapping) { VM_BUG_ON_PAGE(PageWriteback(page), page); truncate_inode_page(mapping, page); + } else { + /* Page was replaced by swap: retry */ + unlock_page(page); + index--; + break; } } unlock_page(page); @@ -760,7 +766,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) spin_lock(&inode->i_lock); shmem_falloc = inode->i_private; if (shmem_falloc && - !shmem_falloc->mode && + !shmem_falloc->waitq && index >= shmem_falloc->start && index < shmem_falloc->next) shmem_falloc->nr_unswapped++; @@ -1248,38 +1254,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) * Trinity finds that probing a hole which tmpfs is punching can * prevent the hole-punch from ever completing: which in turn * locks writers out with its hold on i_mutex. So refrain from - * faulting pages into the hole while it's being punched, and - * wait on i_mutex to be released if vmf->flags permits. + * faulting pages into the hole while it's being punched. Although + * shmem_undo_range() does remove the additions, it may be unable to + * keep up, as each new page needs its own unmap_mapping_range() call, + * and the i_mmap tree grows ever slower to scan if new vmas are added. + * + * It does not matter if we sometimes reach this check just before the + * hole-punch begins, so that one fault then races with the punch: + * we just need to make racing faults a rare case. + * + * The implementation below would be much simpler if we just used a + * standard mutex or completion: but we cannot take i_mutex in fault, + * and bloating every shmem inode for this unlikely case would be sad. */ if (unlikely(inode->i_private)) { struct shmem_falloc *shmem_falloc; spin_lock(&inode->i_lock); shmem_falloc = inode->i_private; - if (!shmem_falloc || - shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE || - vmf->pgoff < shmem_falloc->start || - vmf->pgoff >= shmem_falloc->next) - shmem_falloc = NULL; - spin_unlock(&inode->i_lock); - /* - * i_lock has protected us from taking shmem_falloc seriously - * once return from shmem_fallocate() went back up that stack. - * i_lock does not serialize with i_mutex at all, but it does - * not matter if sometimes we wait unnecessarily, or sometimes - * miss out on waiting: we just need to make those cases rare. - */ - if (shmem_falloc) { + if (shmem_falloc && + shmem_falloc->waitq && + vmf->pgoff >= shmem_falloc->start && + vmf->pgoff < shmem_falloc->next) { + wait_queue_head_t *shmem_falloc_waitq; + DEFINE_WAIT(shmem_fault_wait); + + ret = VM_FAULT_NOPAGE; if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { + /* It's polite to up mmap_sem if we can */ up_read(&vma->vm_mm->mmap_sem); - mutex_lock(&inode->i_mutex); - mutex_unlock(&inode->i_mutex); - return VM_FAULT_RETRY; + ret = VM_FAULT_RETRY; } - /* cond_resched? Leave that to GUP or return to user */ - return VM_FAULT_NOPAGE; + + shmem_falloc_waitq = shmem_falloc->waitq; + prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, + TASK_UNINTERRUPTIBLE); + spin_unlock(&inode->i_lock); + schedule(); + + /* + * shmem_falloc_waitq points into the shmem_fallocate() + * stack of the hole-punching task: shmem_falloc_waitq + * is usually invalid by the time we reach here, but + * finish_wait() does not dereference it in that case; + * though i_lock needed lest racing with wake_up_all(). + */ + spin_lock(&inode->i_lock); + finish_wait(shmem_falloc_waitq, &shmem_fault_wait); + spin_unlock(&inode->i_lock); + return ret; } + spin_unlock(&inode->i_lock); } error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); @@ -1774,13 +1800,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, mutex_lock(&inode->i_mutex); - shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE; - if (mode & FALLOC_FL_PUNCH_HOLE) { struct address_space *mapping = file->f_mapping; loff_t unmap_start = round_up(offset, PAGE_SIZE); loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); + shmem_falloc.waitq = &shmem_falloc_waitq; shmem_falloc.start = unmap_start >> PAGE_SHIFT; shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; spin_lock(&inode->i_lock); @@ -1792,8 +1818,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, 1 + unmap_end - unmap_start, 0); shmem_truncate_range(inode, offset, offset + len - 1); /* No need to unmap again: hole-punching leaves COWed pages */ + + spin_lock(&inode->i_lock); + inode->i_private = NULL; + wake_up_all(&shmem_falloc_waitq); + spin_unlock(&inode->i_lock); error = 0; - goto undone; + goto out; } /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ @@ -1809,6 +1840,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, goto out; } + shmem_falloc.waitq = NULL; shmem_falloc.start = start; shmem_falloc.next = start; shmem_falloc.nr_falloced = 0; diff --git a/mm/slab_common.c b/mm/slab_common.c index 735e01a0db6f..d31c4bacc6a2 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -55,7 +55,7 @@ static int kmem_cache_sanity_check(const char *name, size_t size) continue; } -#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON) +#if !defined(CONFIG_SLUB) if (!strcmp(s->name, name)) { pr_err("%s (%s): Cache name already exists.\n", __func__, name); diff --git a/mm/truncate.c b/mm/truncate.c index 6a78c814bebf..eda247307164 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -355,14 +355,16 @@ void truncate_inode_pages_range(struct address_space *mapping, for ( ; ; ) { cond_resched(); if (!pagevec_lookup_entries(&pvec, mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE), - indices)) { + min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) { + /* If all gone from start onwards, we're done */ if (index == start) break; + /* Otherwise restart to make sure all gone */ index = start; continue; } if (index == start && indices[0] >= end) { + /* All gone out of hole to be punched, we're done */ pagevec_remove_exceptionals(&pvec); pagevec_release(&pvec); break; @@ -373,8 +375,11 @@ void truncate_inode_pages_range(struct address_space *mapping, /* We rely upon deletion not changing page->index */ index = indices[i]; - if (index >= end) + if (index >= end) { + /* Restart punch to make sure all gone */ + index = start - 1; break; + } if (radix_tree_exceptional_entry(page)) { clear_exceptional_entry(mapping, index, page); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index ad2ac3c00398..dd11f612e03e 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -627,8 +627,6 @@ static void vlan_dev_uninit(struct net_device *dev) struct vlan_dev_priv *vlan = vlan_dev_priv(dev); int i; - free_percpu(vlan->vlan_pcpu_stats); - vlan->vlan_pcpu_stats = NULL; for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { while ((pm = vlan->egress_priority_map[i]) != NULL) { vlan->egress_priority_map[i] = pm->next; @@ -785,6 +783,15 @@ static const struct net_device_ops vlan_netdev_ops = { .ndo_get_lock_subclass = vlan_dev_get_lock_subclass, }; +static void vlan_dev_free(struct net_device *dev) +{ + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + + free_percpu(vlan->vlan_pcpu_stats); + vlan->vlan_pcpu_stats = NULL; + free_netdev(dev); +} + void vlan_setup(struct net_device *dev) { ether_setup(dev); @@ -794,7 +801,7 @@ void vlan_setup(struct net_device *dev) dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops; - dev->destructor = free_netdev; + dev->destructor = vlan_dev_free; dev->ethtool_ops = &vlan_ethtool_ops; memset(dev->broadcast, 0, ETH_ALEN); diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 01a1082e02b3..bfcf6be1d665 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, goto drop; /* Queue packet (standard) */ - skb->sk = sock; - if (sock_queue_rcv_skb(sock, skb) < 0) goto drop; @@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr if (!skb) goto out; - skb->sk = sk; skb_reserve(skb, ddp_dl->header_length); skb_reserve(skb, dev->hard_header_len); skb->dev = dev; diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 6f0d9ec37950..a957c8140721 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -800,11 +800,6 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv, bla_dst = (struct batadv_bla_claim_dst *)hw_dst; bla_dst_own = &bat_priv->bla.claim_dest; - /* check if it is a claim packet in general */ - if (memcmp(bla_dst->magic, bla_dst_own->magic, - sizeof(bla_dst->magic)) != 0) - return 0; - /* if announcement packet, use the source, * otherwise assume it is in the hw_src */ @@ -866,12 +861,13 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, struct sk_buff *skb) { - struct batadv_bla_claim_dst *bla_dst; + struct batadv_bla_claim_dst *bla_dst, *bla_dst_own; uint8_t *hw_src, *hw_dst; - struct vlan_ethhdr *vhdr; + struct vlan_hdr *vhdr, vhdr_buf; struct ethhdr *ethhdr; struct arphdr *arphdr; unsigned short vid; + int vlan_depth = 0; __be16 proto; int headlen; int ret; @@ -882,9 +878,24 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv, proto = ethhdr->h_proto; headlen = ETH_HLEN; if (vid & BATADV_VLAN_HAS_TAG) { - vhdr = vlan_eth_hdr(skb); - proto = vhdr->h_vlan_encapsulated_proto; - headlen += VLAN_HLEN; + /* Traverse the VLAN/Ethertypes. + * + * At this point it is known that the first protocol is a VLAN + * header, so start checking at the encapsulated protocol. + * + * The depth of the VLAN headers is recorded to drop BLA claim + * frames encapsulated into multiple VLAN headers (QinQ). + */ + do { + vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN, + &vhdr_buf); + if (!vhdr) + return 0; + + proto = vhdr->h_vlan_encapsulated_proto; + headlen += VLAN_HLEN; + vlan_depth++; + } while (proto == htons(ETH_P_8021Q)); } if (proto != htons(ETH_P_ARP)) @@ -914,6 +925,19 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv, hw_src = (uint8_t *)arphdr + sizeof(struct arphdr); hw_dst = hw_src + ETH_ALEN + 4; bla_dst = (struct batadv_bla_claim_dst *)hw_dst; + bla_dst_own = &bat_priv->bla.claim_dest; + + /* check if it is a claim frame in general */ + if (memcmp(bla_dst->magic, bla_dst_own->magic, + sizeof(bla_dst->magic)) != 0) + return 0; + + /* check if there is a claim frame encapsulated deeper in (QinQ) and + * drop that, as this is not supported by BLA but should also not be + * sent via the mesh. + */ + if (vlan_depth > 1) + return 1; /* check if it is a claim frame. */ ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst, diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index e7ee65dc20bf..cbd677f48c00 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -448,10 +448,15 @@ out: * possibly free it * @softif_vlan: the vlan object to release */ -void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan) +void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan) { - if (atomic_dec_and_test(&softif_vlan->refcount)) - kfree_rcu(softif_vlan, rcu); + if (atomic_dec_and_test(&vlan->refcount)) { + spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock); + hlist_del_rcu(&vlan->list); + spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock); + + kfree_rcu(vlan, rcu); + } } /** @@ -505,6 +510,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) if (!vlan) return -ENOMEM; + vlan->bat_priv = bat_priv; vlan->vid = vid; atomic_set(&vlan->refcount, 1); @@ -516,6 +522,10 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) return err; } + spin_lock_bh(&bat_priv->softif_vlan_list_lock); + hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); + spin_unlock_bh(&bat_priv->softif_vlan_list_lock); + /* add a new TT local entry. This one will be marked with the NOPURGE * flag */ @@ -523,10 +533,6 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) bat_priv->soft_iface->dev_addr, vid, BATADV_NULL_IFINDEX, BATADV_NO_MARK); - spin_lock_bh(&bat_priv->softif_vlan_list_lock); - hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); - spin_unlock_bh(&bat_priv->softif_vlan_list_lock); - return 0; } @@ -538,18 +544,13 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv, struct batadv_softif_vlan *vlan) { - spin_lock_bh(&bat_priv->softif_vlan_list_lock); - hlist_del_rcu(&vlan->list); - spin_unlock_bh(&bat_priv->softif_vlan_list_lock); - - batadv_sysfs_del_vlan(bat_priv, vlan); - /* explicitly remove the associated TT local entry because it is marked * with the NOPURGE flag */ batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr, vlan->vid, "vlan interface destroyed", false); + batadv_sysfs_del_vlan(bat_priv, vlan); batadv_softif_vlan_free_ref(vlan); } @@ -567,6 +568,8 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto, unsigned short vid) { struct batadv_priv *bat_priv = netdev_priv(dev); + struct batadv_softif_vlan *vlan; + int ret; /* only 802.1Q vlans are supported. * batman-adv does not know how to handle other types @@ -576,7 +579,36 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto, vid |= BATADV_VLAN_HAS_TAG; - return batadv_softif_create_vlan(bat_priv, vid); + /* if a new vlan is getting created and it already exists, it means that + * it was not deleted yet. batadv_softif_vlan_get() increases the + * refcount in order to revive the object. + * + * if it does not exist then create it. + */ + vlan = batadv_softif_vlan_get(bat_priv, vid); + if (!vlan) + return batadv_softif_create_vlan(bat_priv, vid); + + /* recreate the sysfs object if it was already destroyed (and it should + * be since we received a kill_vid() for this vlan + */ + if (!vlan->kobj) { + ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan); + if (ret) { + batadv_softif_vlan_free_ref(vlan); + return ret; + } + } + + /* add a new TT local entry. This one will be marked with the NOPURGE + * flag. This must be added again, even if the vlan object already + * exists, because the entry was deleted by kill_vid() + */ + batadv_tt_local_add(bat_priv->soft_iface, + bat_priv->soft_iface->dev_addr, vid, + BATADV_NULL_IFINDEX, BATADV_NO_MARK); + + return 0; } /** diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index d636bde72c9a..5f59e7f899a0 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -511,6 +511,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, struct batadv_priv *bat_priv = netdev_priv(soft_iface); struct batadv_tt_local_entry *tt_local; struct batadv_tt_global_entry *tt_global = NULL; + struct batadv_softif_vlan *vlan; struct net_device *in_dev = NULL; struct hlist_head *head; struct batadv_tt_orig_list_entry *orig_entry; @@ -572,6 +573,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, if (!tt_local) goto out; + /* increase the refcounter of the related vlan */ + vlan = batadv_softif_vlan_get(bat_priv, vid); + batadv_dbg(BATADV_DBG_TT, bat_priv, "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", addr, BATADV_PRINT_VID(vid), @@ -604,6 +608,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, if (unlikely(hash_added != 0)) { /* remove the reference for the hash */ batadv_tt_local_entry_free_ref(tt_local); + batadv_softif_vlan_free_ref(vlan); goto out; } @@ -1009,6 +1014,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv, { struct batadv_tt_local_entry *tt_local_entry; uint16_t flags, curr_flags = BATADV_NO_FLAGS; + struct batadv_softif_vlan *vlan; tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); if (!tt_local_entry) @@ -1039,6 +1045,11 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv, hlist_del_rcu(&tt_local_entry->common.hash_entry); batadv_tt_local_entry_free_ref(tt_local_entry); + /* decrease the reference held for this vlan */ + vlan = batadv_softif_vlan_get(bat_priv, vid); + batadv_softif_vlan_free_ref(vlan); + batadv_softif_vlan_free_ref(vlan); + out: if (tt_local_entry) batadv_tt_local_entry_free_ref(tt_local_entry); @@ -1111,6 +1122,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) spinlock_t *list_lock; /* protects write access to the hash lists */ struct batadv_tt_common_entry *tt_common_entry; struct batadv_tt_local_entry *tt_local; + struct batadv_softif_vlan *vlan; struct hlist_node *node_tmp; struct hlist_head *head; uint32_t i; @@ -1131,6 +1143,13 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) tt_local = container_of(tt_common_entry, struct batadv_tt_local_entry, common); + + /* decrease the reference held for this vlan */ + vlan = batadv_softif_vlan_get(bat_priv, + tt_common_entry->vid); + batadv_softif_vlan_free_ref(vlan); + batadv_softif_vlan_free_ref(vlan); + batadv_tt_local_entry_free_ref(tt_local); } spin_unlock_bh(list_lock); @@ -3139,6 +3158,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) struct batadv_hashtable *hash = bat_priv->tt.local_hash; struct batadv_tt_common_entry *tt_common; struct batadv_tt_local_entry *tt_local; + struct batadv_softif_vlan *vlan; struct hlist_node *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ @@ -3167,6 +3187,12 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) tt_local = container_of(tt_common, struct batadv_tt_local_entry, common); + + /* decrease the reference held for this vlan */ + vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid); + batadv_softif_vlan_free_ref(vlan); + batadv_softif_vlan_free_ref(vlan); + batadv_tt_local_entry_free_ref(tt_local); } spin_unlock_bh(list_lock); diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 34891a56773f..8854c05622a9 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -687,6 +687,7 @@ struct batadv_priv_nc { /** * struct batadv_softif_vlan - per VLAN attributes set + * @bat_priv: pointer to the mesh object * @vid: VLAN identifier * @kobj: kobject for sysfs vlan subdirectory * @ap_isolation: AP isolation state @@ -696,6 +697,7 @@ struct batadv_priv_nc { * @rcu: struct used for freeing in a RCU-safe manner */ struct batadv_softif_vlan { + struct batadv_priv *bat_priv; unsigned short vid; struct kobject *kobj; atomic_t ap_isolation; /* boolean */ diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index ca01d1861854..a7a27bc2c0b1 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -289,10 +289,20 @@ static void hci_conn_timeout(struct work_struct *work) { struct hci_conn *conn = container_of(work, struct hci_conn, disc_work.work); + int refcnt = atomic_read(&conn->refcnt); BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); - if (atomic_read(&conn->refcnt)) + WARN_ON(refcnt < 0); + + /* FIXME: It was observed that in pairing failed scenario, refcnt + * drops below 0. Probably this is because l2cap_conn_del calls + * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is + * dropped. After that loop hci_chan_del is called which also drops + * conn. For now make sure that ACL is alive if refcnt is higher then 0, + * otherwise drop it. + */ + if (refcnt > 0) return; switch (conn->state) { diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index f2829a7932e2..e33a982161c1 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -385,6 +385,16 @@ static const u8 gen_method[5][5] = { { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP }, }; +static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io) +{ + /* If either side has unknown io_caps, use JUST WORKS */ + if (local_io > SMP_IO_KEYBOARD_DISPLAY || + remote_io > SMP_IO_KEYBOARD_DISPLAY) + return JUST_WORKS; + + return gen_method[remote_io][local_io]; +} + static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, u8 local_io, u8 remote_io) { @@ -401,14 +411,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io); /* If neither side wants MITM, use JUST WORKS */ - /* If either side has unknown io_caps, use JUST WORKS */ /* Otherwise, look up method from the table */ - if (!(auth & SMP_AUTH_MITM) || - local_io > SMP_IO_KEYBOARD_DISPLAY || - remote_io > SMP_IO_KEYBOARD_DISPLAY) + if (!(auth & SMP_AUTH_MITM)) method = JUST_WORKS; else - method = gen_method[remote_io][local_io]; + method = get_auth_method(smp, local_io, remote_io); /* If not bonding, don't ask user to confirm a Zero TK */ if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM) @@ -669,7 +676,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_pairing rsp, *req = (void *) skb->data; struct smp_chan *smp; - u8 key_size, auth; + u8 key_size, auth, sec_level; int ret; BT_DBG("conn %p", conn); @@ -695,7 +702,19 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) /* We didn't start the pairing, so match remote */ auth = req->auth_req; - conn->hcon->pending_sec_level = authreq_to_seclevel(auth); + sec_level = authreq_to_seclevel(auth); + if (sec_level > conn->hcon->pending_sec_level) + conn->hcon->pending_sec_level = sec_level; + + /* If we need MITM check that it can be acheived */ + if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) { + u8 method; + + method = get_auth_method(smp, conn->hcon->io_capability, + req->io_capability); + if (method == JUST_WORKS || method == JUST_CFM) + return SMP_AUTH_REQUIREMENTS; + } build_pairing_cmd(conn, req, &rsp, auth); @@ -743,6 +762,16 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) if (check_enc_key_size(conn, key_size)) return SMP_ENC_KEY_SIZE; + /* If we need MITM check that it can be acheived */ + if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) { + u8 method; + + method = get_auth_method(smp, req->io_capability, + rsp->io_capability); + if (method == JUST_WORKS || method == JUST_CFM) + return SMP_AUTH_REQUIREMENTS; + } + get_random_bytes(smp->prnd, sizeof(smp->prnd)); smp->prsp[0] = SMP_CMD_PAIRING_RSP; @@ -838,6 +867,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) struct smp_cmd_pairing cp; struct hci_conn *hcon = conn->hcon; struct smp_chan *smp; + u8 sec_level; BT_DBG("conn %p", conn); @@ -847,7 +877,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) if (!(conn->hcon->link_mode & HCI_LM_MASTER)) return SMP_CMD_NOTSUPP; - hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); + sec_level = authreq_to_seclevel(rp->auth_req); + if (sec_level > hcon->pending_sec_level) + hcon->pending_sec_level = sec_level; if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) return 0; @@ -901,9 +933,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) if (smp_sufficient_security(hcon, sec_level)) return 1; + if (sec_level > hcon->pending_sec_level) + hcon->pending_sec_level = sec_level; + if (hcon->link_mode & HCI_LM_MASTER) - if (smp_ltk_encrypt(conn, sec_level)) - goto done; + if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) + return 0; if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) return 0; @@ -918,7 +953,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) * requires it. */ if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT || - sec_level > BT_SECURITY_MEDIUM) + hcon->pending_sec_level > BT_SECURITY_MEDIUM) authreq |= SMP_AUTH_MITM; if (hcon->link_mode & HCI_LM_MASTER) { @@ -937,9 +972,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) set_bit(SMP_FLAG_INITIATOR, &smp->flags); -done: - hcon->pending_sec_level = sec_level; - return 0; } diff --git a/net/core/dev.c b/net/core/dev.c index 30eedf677913..367a586d0c8a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly; /* Taps */ static struct list_head offload_base __read_mostly; static int netif_rx_internal(struct sk_buff *skb); +static int call_netdevice_notifiers_info(unsigned long val, + struct net_device *dev, + struct netdev_notifier_info *info); /* * The @dev_base_head list is protected by @dev_base_lock and the rtnl @@ -1207,7 +1210,11 @@ EXPORT_SYMBOL(netdev_features_change); void netdev_state_change(struct net_device *dev) { if (dev->flags & IFF_UP) { - call_netdevice_notifiers(NETDEV_CHANGE, dev); + struct netdev_notifier_change_info change_info; + + change_info.flags_changed = 0; + call_netdevice_notifiers_info(NETDEV_CHANGE, dev, + &change_info.info); rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); } } @@ -4089,6 +4096,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) skb->vlan_tci = 0; skb->dev = napi->dev; skb->skb_iif = 0; + skb->encapsulation = 0; + skb_shinfo(skb)->gso_type = 0; skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); napi->skb = skb; @@ -4227,9 +4236,8 @@ static int process_backlog(struct napi_struct *napi, int quota) #endif napi->weight = weight_p; local_irq_disable(); - while (work < quota) { + while (1) { struct sk_buff *skb; - unsigned int qlen; while ((skb = __skb_dequeue(&sd->process_queue))) { local_irq_enable(); @@ -4243,24 +4251,24 @@ static int process_backlog(struct napi_struct *napi, int quota) } rps_lock(sd); - qlen = skb_queue_len(&sd->input_pkt_queue); - if (qlen) - skb_queue_splice_tail_init(&sd->input_pkt_queue, - &sd->process_queue); - - if (qlen < quota - work) { + if (skb_queue_empty(&sd->input_pkt_queue)) { /* * Inline a custom version of __napi_complete(). * only current cpu owns and manipulates this napi, - * and NAPI_STATE_SCHED is the only possible flag set on backlog. - * we can use a plain write instead of clear_bit(), + * and NAPI_STATE_SCHED is the only possible flag set + * on backlog. + * We can use a plain write instead of clear_bit(), * and we dont need an smp_mb() memory barrier. */ list_del(&napi->poll_list); napi->state = 0; + rps_unlock(sd); - quota = work + qlen; + break; } + + skb_queue_splice_tail_init(&sd->input_pkt_queue, + &sd->process_queue); rps_unlock(sd); } local_irq_enable(); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 32d872eec7f5..559890b0f0a2 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -3059,11 +3059,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); } else { + struct neigh_table *tbl = p->tbl; dev_name_source = "default"; - t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1); - t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1; - t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2; - t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3; + t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval; + t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1; + t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2; + t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3; } if (handler) { diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c index 9acec61f5433..dd8696a3dbec 100644 --- a/net/dns_resolver/dns_query.c +++ b/net/dns_resolver/dns_query.c @@ -150,7 +150,7 @@ int dns_query(const char *type, const char *name, size_t namelen, goto put; memcpy(*_result, upayload->data, len); - *_result[len] = '\0'; + (*_result)[len] = '\0'; if (_expiry) *_expiry = rkey->expiry; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index d5e6836cf772..d156b3c5f363 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1429,6 +1429,9 @@ static int inet_gro_complete(struct sk_buff *skb, int nhoff) int proto = iph->protocol; int err = -ENOSYS; + if (skb->encapsulation) + skb_set_inner_network_header(skb, nhoff); + csum_replace2(&iph->check, iph->tot_len, newlen); iph->tot_len = newlen; diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index 4e9619bca732..0485bf7f8f03 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@ -68,6 +68,7 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi, skb_push(skb, hdr_len); + skb_reset_transport_header(skb); greh = (struct gre_base_hdr *)skb->data; greh->flags = tnl_flags_to_gre_flags(tpi->flags); greh->protocol = tpi->proto; diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index eb92deb12666..f0bdd47bbbcb 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -263,6 +263,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff) int err = -ENOENT; __be16 type; + skb->encapsulation = 1; + skb_shinfo(skb)->gso_type = SKB_GSO_GRE; + type = greh->protocol; if (greh->flags & GRE_KEY) grehlen += GRE_HEADER_SECTION; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 79c3d947a481..42b7bcf8045b 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -739,8 +739,6 @@ static void icmp_unreach(struct sk_buff *skb) /* fall through */ case 0: info = ntohs(icmph->un.frag.mtu); - if (!info) - goto out; } break; case ICMP_SR_FAILED: diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 6748d420f714..db710b059bab 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1944,6 +1944,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) rtnl_lock(); in_dev = ip_mc_find_dev(net, imr); + if (!in_dev) { + ret = -ENODEV; + goto out; + } ifindex = imr->imr_ifindex; for (imlp = &inet->mc_list; (iml = rtnl_dereference(*imlp)) != NULL; @@ -1961,16 +1965,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) *imlp = iml->next_rcu; - if (in_dev) - ip_mc_dec_group(in_dev, group); + ip_mc_dec_group(in_dev, group); rtnl_unlock(); /* decrease mem now to avoid the memleak warning */ atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); kfree_rcu(iml, rcu); return 0; } - if (!in_dev) - ret = -ENODEV; +out: rtnl_unlock(); return ret; } diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 5e7aecea05cd..ad382499bace 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -288,6 +288,10 @@ int ip_options_compile(struct net *net, optptr++; continue; } + if (unlikely(l < 2)) { + pp_ptr = optptr; + goto error; + } optlen = optptr[1]; if (optlen < 2 || optlen > l) { pp_ptr = optptr; diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 54b6731dab55..6f9de61dce5f 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -169,6 +169,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, hlist_for_each_entry_rcu(t, head, hash_node) { if (remote != t->parms.iph.daddr || + t->parms.iph.saddr != 0 || !(t->dev->flags & IFF_UP)) continue; @@ -185,10 +186,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, head = &itn->tunnels[hash]; hlist_for_each_entry_rcu(t, head, hash_node) { - if ((local != t->parms.iph.saddr && - (local != t->parms.iph.daddr || - !ipv4_is_multicast(local))) || - !(t->dev->flags & IFF_UP)) + if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) && + (local != t->parms.iph.daddr || !ipv4_is_multicast(local))) + continue; + + if (!(t->dev->flags & IFF_UP)) continue; if (!ip_tunnel_key_match(&t->parms, flags, key)) @@ -205,6 +207,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, hlist_for_each_entry_rcu(t, head, hash_node) { if (t->parms.i_key != key || + t->parms.iph.saddr != 0 || + t->parms.iph.daddr != 0 || !(t->dev->flags & IFF_UP)) continue; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 082239ffe34a..3162ea923ded 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1010,7 +1010,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) const struct iphdr *iph = (const struct iphdr *) skb->data; struct flowi4 fl4; struct rtable *rt; - struct dst_entry *dst; + struct dst_entry *odst = NULL; bool new = false; bh_lock_sock(sk); @@ -1018,16 +1018,17 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) if (!ip_sk_accept_pmtu(sk)) goto out; - rt = (struct rtable *) __sk_dst_get(sk); + odst = sk_dst_get(sk); - if (sock_owned_by_user(sk) || !rt) { + if (sock_owned_by_user(sk) || !odst) { __ipv4_sk_update_pmtu(skb, sk, mtu); goto out; } __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); - if (!__sk_dst_check(sk, 0)) { + rt = (struct rtable *)odst; + if (odst->obsolete && odst->ops->check(odst, 0) == NULL) { rt = ip_route_output_flow(sock_net(sk), &fl4, sk); if (IS_ERR(rt)) goto out; @@ -1037,8 +1038,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu); - dst = dst_check(&rt->dst, 0); - if (!dst) { + if (!dst_check(&rt->dst, 0)) { if (new) dst_release(&rt->dst); @@ -1050,10 +1050,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) } if (new) - __sk_dst_set(sk, &rt->dst); + sk_dst_set(sk, &rt->dst); out: bh_unlock_sock(sk); + dst_release(odst); } EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index eb1dde37e678..9d2118e5fbc7 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (unlikely(tp->repair)) { if (tp->repair_queue == TCP_RECV_QUEUE) { copied = tcp_send_rcvq(sk, msg, size); - goto out; + goto out_nopush; } err = -EINVAL; @@ -1282,6 +1282,7 @@ wait_for_memory: out: if (copied) tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); +out_nopush: release_sock(sk); return copied + copied_syn; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b5c23756965a..40639c288dc2 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1106,7 +1106,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, } /* D-SACK for already forgotten data... Do dumb counting. */ - if (dup_sack && tp->undo_marker && tp->undo_retrans && + if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 && !after(end_seq_0, prior_snd_una) && after(end_seq_0, tp->undo_marker)) tp->undo_retrans--; @@ -1187,7 +1187,7 @@ static u8 tcp_sacktag_one(struct sock *sk, /* Account D-SACK for retransmitted packet. */ if (dup_sack && (sacked & TCPCB_RETRANS)) { - if (tp->undo_marker && tp->undo_retrans && + if (tp->undo_marker && tp->undo_retrans > 0 && after(end_seq, tp->undo_marker)) tp->undo_retrans--; if (sacked & TCPCB_SACKED_ACKED) @@ -1893,7 +1893,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp) tp->lost_out = 0; tp->undo_marker = 0; - tp->undo_retrans = 0; + tp->undo_retrans = -1; } void tcp_clear_retrans(struct tcp_sock *tp) @@ -2665,7 +2665,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) tp->prior_ssthresh = 0; tp->undo_marker = tp->snd_una; - tp->undo_retrans = tp->retrans_out; + tp->undo_retrans = tp->retrans_out ? : -1; if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { if (!ece_ack) diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 4e86c59ec7f7..55046ecd083e 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -309,7 +309,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff) th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, iph->daddr, 0); - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; return tcp_gro_complete(skb); } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d92bce0ea24e..179b51e6bda3 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2525,8 +2525,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) if (!tp->retrans_stamp) tp->retrans_stamp = TCP_SKB_CB(skb)->when; - tp->undo_retrans += tcp_skb_pcount(skb); - /* snd_nxt is stored to detect loss of retransmitted segment, * see tcp_input.c tcp_sacktag_write_queue(). */ @@ -2534,6 +2532,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) } else if (err != -EBUSY) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); } + + if (tp->undo_retrans < 0) + tp->undo_retrans = 0; + tp->undo_retrans += tcp_skb_pcount(skb); return err; } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d92f94b7e402..7d5a8661df76 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1588,8 +1588,11 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) goto csum_error; - if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) + if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) { + UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, + is_udplite); goto drop; + } rc = 0; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 08b367c6b9cf..617f0958e164 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1301,8 +1301,17 @@ int igmp6_event_query(struct sk_buff *skb) len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); len -= skb_network_header_len(skb); - /* Drop queries with not link local source */ - if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) + /* RFC3810 6.2 + * Upon reception of an MLD message that contains a Query, the node + * checks if the source address of the message is a valid link-local + * address, if the Hop Limit is set to 1, and if the Router Alert + * option is present in the Hop-By-Hop Options header of the IPv6 + * packet. If any of these checks fails, the packet is dropped. + */ + if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) || + ipv6_hdr(skb)->hop_limit != 1 || + !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) || + IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD)) return -EINVAL; idev = __in6_dev_get(skb->dev); diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index 8517d3cd1aed..01b0ff9a0c2c 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -73,7 +73,7 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff) th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr, &iph->daddr, 0); - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; return tcp_gro_complete(skb); } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 95c834799288..7092ff78fd84 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -674,8 +674,11 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) goto csum_error; } - if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) + if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) { + UDP6_INC_STATS_BH(sock_net(sk), + UDP_MIB_RCVBUFERRORS, is_udplite); goto drop; + } skb_dst_drop(skb); @@ -690,6 +693,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) bh_unlock_sock(sk); return rc; + csum_error: UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); drop: diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 950909f04ee6..13752d96275e 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1365,7 +1365,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, int err; if (level != SOL_PPPOL2TP) - return udp_prot.setsockopt(sk, level, optname, optval, optlen); + return -EINVAL; if (optlen < sizeof(int)) return -EINVAL; @@ -1491,7 +1491,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname, struct pppol2tp_session *ps; if (level != SOL_PPPOL2TP) - return udp_prot.getsockopt(sk, level, optname, optval, optlen); + return -EINVAL; if (get_user(len, optlen)) return -EFAULT; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 6886601afe1c..a6cda52ed920 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1096,11 +1096,12 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, int err; /* 24 + 6 = header + auth_algo + auth_transaction + status_code */ - skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 6 + extra_len); + skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN + + 24 + 6 + extra_len + IEEE80211_WEP_ICV_LEN); if (!skb) return; - skb_reserve(skb, local->hw.extra_tx_headroom); + skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN); mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); memset(mgmt, 0, 24 + 6); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index ab4566cfcbe4..8746ff9a8357 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -35,7 +35,7 @@ int nft_register_afinfo(struct net *net, struct nft_af_info *afi) { INIT_LIST_HEAD(&afi->tables); nfnl_lock(NFNL_SUBSYS_NFTABLES); - list_add_tail(&afi->list, &net->nft.af_info); + list_add_tail_rcu(&afi->list, &net->nft.af_info); nfnl_unlock(NFNL_SUBSYS_NFTABLES); return 0; } @@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(nft_register_afinfo); void nft_unregister_afinfo(struct nft_af_info *afi) { nfnl_lock(NFNL_SUBSYS_NFTABLES); - list_del(&afi->list); + list_del_rcu(&afi->list); nfnl_unlock(NFNL_SUBSYS_NFTABLES); } EXPORT_SYMBOL_GPL(nft_unregister_afinfo); @@ -277,11 +277,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb, struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; - list_for_each_entry(afi, &net->nft.af_info, list) { + rcu_read_lock(); + cb->seq = net->nft.base_seq; + + list_for_each_entry_rcu(afi, &net->nft.af_info, list) { if (family != NFPROTO_UNSPEC && family != afi->family) continue; - list_for_each_entry(table, &afi->tables, list) { + list_for_each_entry_rcu(table, &afi->tables, list) { if (idx < s_idx) goto cont; if (idx > s_idx) @@ -294,11 +297,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb, NLM_F_MULTI, afi->family, table) < 0) goto done; + + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } } done: + rcu_read_unlock(); cb->args[0] = idx; return skb->len; } @@ -407,6 +413,9 @@ static int nf_tables_updtable(struct nft_ctx *ctx) if (flags & ~NFT_TABLE_F_DORMANT) return -EINVAL; + if (flags == ctx->table->flags) + return 0; + trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE, sizeof(struct nft_trans_table)); if (trans == NULL) @@ -514,7 +523,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, module_put(afi->owner); return err; } - list_add_tail(&table->list, &afi->tables); + list_add_tail_rcu(&table->list, &afi->tables); return 0; } @@ -546,7 +555,7 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb, if (err < 0) return err; - list_del(&table->list); + list_del_rcu(&table->list); return 0; } @@ -635,13 +644,20 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats) { struct nft_stats *cpu_stats, total; struct nlattr *nest; + unsigned int seq; + u64 pkts, bytes; int cpu; memset(&total, 0, sizeof(total)); for_each_possible_cpu(cpu) { cpu_stats = per_cpu_ptr(stats, cpu); - total.pkts += cpu_stats->pkts; - total.bytes += cpu_stats->bytes; + do { + seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + pkts = cpu_stats->pkts; + bytes = cpu_stats->bytes; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq)); + total.pkts += pkts; + total.bytes += bytes; } nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS); if (nest == NULL) @@ -761,12 +777,15 @@ static int nf_tables_dump_chains(struct sk_buff *skb, struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; - list_for_each_entry(afi, &net->nft.af_info, list) { + rcu_read_lock(); + cb->seq = net->nft.base_seq; + + list_for_each_entry_rcu(afi, &net->nft.af_info, list) { if (family != NFPROTO_UNSPEC && family != afi->family) continue; - list_for_each_entry(table, &afi->tables, list) { - list_for_each_entry(chain, &table->chains, list) { + list_for_each_entry_rcu(table, &afi->tables, list) { + list_for_each_entry_rcu(chain, &table->chains, list) { if (idx < s_idx) goto cont; if (idx > s_idx) @@ -778,17 +797,19 @@ static int nf_tables_dump_chains(struct sk_buff *skb, NLM_F_MULTI, afi->family, table, chain) < 0) goto done; + + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } } } done: + rcu_read_unlock(); cb->args[0] = idx; return skb->len; } - static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) @@ -861,7 +882,7 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr) if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS]) return ERR_PTR(-EINVAL); - newstats = alloc_percpu(struct nft_stats); + newstats = netdev_alloc_pcpu_stats(struct nft_stats); if (newstats == NULL) return ERR_PTR(-ENOMEM); @@ -1077,7 +1098,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, } basechain->stats = stats; } else { - stats = alloc_percpu(struct nft_stats); + stats = netdev_alloc_pcpu_stats(struct nft_stats); if (IS_ERR(stats)) { module_put(type->owner); kfree(basechain); @@ -1130,7 +1151,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, goto err2; table->use++; - list_add_tail(&chain->list, &table->chains); + list_add_tail_rcu(&chain->list, &table->chains); return 0; err2: if (!(table->flags & NFT_TABLE_F_DORMANT) && @@ -1180,7 +1201,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, return err; table->use--; - list_del(&chain->list); + list_del_rcu(&chain->list); return 0; } @@ -1199,9 +1220,9 @@ int nft_register_expr(struct nft_expr_type *type) { nfnl_lock(NFNL_SUBSYS_NFTABLES); if (type->family == NFPROTO_UNSPEC) - list_add_tail(&type->list, &nf_tables_expressions); + list_add_tail_rcu(&type->list, &nf_tables_expressions); else - list_add(&type->list, &nf_tables_expressions); + list_add_rcu(&type->list, &nf_tables_expressions); nfnl_unlock(NFNL_SUBSYS_NFTABLES); return 0; } @@ -1216,7 +1237,7 @@ EXPORT_SYMBOL_GPL(nft_register_expr); void nft_unregister_expr(struct nft_expr_type *type) { nfnl_lock(NFNL_SUBSYS_NFTABLES); - list_del(&type->list); + list_del_rcu(&type->list); nfnl_unlock(NFNL_SUBSYS_NFTABLES); } EXPORT_SYMBOL_GPL(nft_unregister_expr); @@ -1549,16 +1570,17 @@ static int nf_tables_dump_rules(struct sk_buff *skb, unsigned int idx = 0, s_idx = cb->args[0]; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; - u8 genctr = ACCESS_ONCE(net->nft.genctr); - u8 gencursor = ACCESS_ONCE(net->nft.gencursor); - list_for_each_entry(afi, &net->nft.af_info, list) { + rcu_read_lock(); + cb->seq = net->nft.base_seq; + + list_for_each_entry_rcu(afi, &net->nft.af_info, list) { if (family != NFPROTO_UNSPEC && family != afi->family) continue; - list_for_each_entry(table, &afi->tables, list) { - list_for_each_entry(chain, &table->chains, list) { - list_for_each_entry(rule, &chain->rules, list) { + list_for_each_entry_rcu(table, &afi->tables, list) { + list_for_each_entry_rcu(chain, &table->chains, list) { + list_for_each_entry_rcu(rule, &chain->rules, list) { if (!nft_rule_is_active(net, rule)) goto cont; if (idx < s_idx) @@ -1572,6 +1594,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb, NLM_F_MULTI | NLM_F_APPEND, afi->family, table, chain, rule) < 0) goto done; + + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } @@ -1579,9 +1603,7 @@ cont: } } done: - /* Invalidate this dump, a transition to the new generation happened */ - if (gencursor != net->nft.gencursor || genctr != net->nft.genctr) - return -EBUSY; + rcu_read_unlock(); cb->args[0] = idx; return skb->len; @@ -1932,7 +1954,7 @@ static LIST_HEAD(nf_tables_set_ops); int nft_register_set(struct nft_set_ops *ops) { nfnl_lock(NFNL_SUBSYS_NFTABLES); - list_add_tail(&ops->list, &nf_tables_set_ops); + list_add_tail_rcu(&ops->list, &nf_tables_set_ops); nfnl_unlock(NFNL_SUBSYS_NFTABLES); return 0; } @@ -1941,7 +1963,7 @@ EXPORT_SYMBOL_GPL(nft_register_set); void nft_unregister_set(struct nft_set_ops *ops) { nfnl_lock(NFNL_SUBSYS_NFTABLES); - list_del(&ops->list); + list_del_rcu(&ops->list); nfnl_unlock(NFNL_SUBSYS_NFTABLES); } EXPORT_SYMBOL_GPL(nft_unregister_set); @@ -2234,7 +2256,10 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb, if (cb->args[1]) return skb->len; - list_for_each_entry(set, &ctx->table->sets, list) { + rcu_read_lock(); + cb->seq = ctx->net->nft.base_seq; + + list_for_each_entry_rcu(set, &ctx->table->sets, list) { if (idx < s_idx) goto cont; if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET, @@ -2242,11 +2267,13 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb, cb->args[0] = idx; goto done; } + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } cb->args[1] = 1; done: + rcu_read_unlock(); return skb->len; } @@ -2260,7 +2287,10 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb, if (cb->args[1]) return skb->len; - list_for_each_entry(table, &ctx->afi->tables, list) { + rcu_read_lock(); + cb->seq = ctx->net->nft.base_seq; + + list_for_each_entry_rcu(table, &ctx->afi->tables, list) { if (cur_table) { if (cur_table != table) continue; @@ -2269,7 +2299,7 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb, } ctx->table = table; idx = 0; - list_for_each_entry(set, &ctx->table->sets, list) { + list_for_each_entry_rcu(set, &ctx->table->sets, list) { if (idx < s_idx) goto cont; if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET, @@ -2278,12 +2308,14 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb, cb->args[2] = (unsigned long) table; goto done; } + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } } cb->args[1] = 1; done: + rcu_read_unlock(); return skb->len; } @@ -2300,7 +2332,10 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb, if (cb->args[1]) return skb->len; - list_for_each_entry(afi, &net->nft.af_info, list) { + rcu_read_lock(); + cb->seq = net->nft.base_seq; + + list_for_each_entry_rcu(afi, &net->nft.af_info, list) { if (cur_family) { if (afi->family != cur_family) continue; @@ -2308,7 +2343,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb, cur_family = 0; } - list_for_each_entry(table, &afi->tables, list) { + list_for_each_entry_rcu(table, &afi->tables, list) { if (cur_table) { if (cur_table != table) continue; @@ -2319,7 +2354,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb, ctx->table = table; ctx->afi = afi; idx = 0; - list_for_each_entry(set, &ctx->table->sets, list) { + list_for_each_entry_rcu(set, &ctx->table->sets, list) { if (idx < s_idx) goto cont; if (nf_tables_fill_set(skb, ctx, set, @@ -2330,6 +2365,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb, cb->args[3] = afi->family; goto done; } + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } @@ -2339,6 +2375,7 @@ cont: } cb->args[1] = 1; done: + rcu_read_unlock(); return skb->len; } @@ -2597,7 +2634,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, if (err < 0) goto err2; - list_add_tail(&set->list, &table->sets); + list_add_tail_rcu(&set->list, &table->sets); table->use++; return 0; @@ -2617,7 +2654,7 @@ static void nft_set_destroy(struct nft_set *set) static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) { - list_del(&set->list); + list_del_rcu(&set->list); nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); nft_set_destroy(set); } @@ -2652,7 +2689,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, if (err < 0) return err; - list_del(&set->list); + list_del_rcu(&set->list); ctx.table->use--; return 0; } @@ -2704,14 +2741,14 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, } bind: binding->chain = ctx->chain; - list_add_tail(&binding->list, &set->bindings); + list_add_tail_rcu(&binding->list, &set->bindings); return 0; } void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding) { - list_del(&binding->list); + list_del_rcu(&binding->list); if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS && !(set->flags & NFT_SET_INACTIVE)) @@ -3346,7 +3383,7 @@ static int nf_tables_commit(struct sk_buff *skb) struct nft_set *set; /* Bump generation counter, invalidate any dump in progress */ - net->nft.genctr++; + while (++net->nft.base_seq == 0); /* A new generation has just started */ net->nft.gencursor = gencursor_next(net); @@ -3491,12 +3528,12 @@ static int nf_tables_abort(struct sk_buff *skb) } nft_trans_destroy(trans); } else { - list_del(&trans->ctx.table->list); + list_del_rcu(&trans->ctx.table->list); } break; case NFT_MSG_DELTABLE: - list_add_tail(&trans->ctx.table->list, - &trans->ctx.afi->tables); + list_add_tail_rcu(&trans->ctx.table->list, + &trans->ctx.afi->tables); nft_trans_destroy(trans); break; case NFT_MSG_NEWCHAIN: @@ -3507,7 +3544,7 @@ static int nf_tables_abort(struct sk_buff *skb) nft_trans_destroy(trans); } else { trans->ctx.table->use--; - list_del(&trans->ctx.chain->list); + list_del_rcu(&trans->ctx.chain->list); if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) && trans->ctx.chain->flags & NFT_BASE_CHAIN) { nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops, @@ -3517,8 +3554,8 @@ static int nf_tables_abort(struct sk_buff *skb) break; case NFT_MSG_DELCHAIN: trans->ctx.table->use++; - list_add_tail(&trans->ctx.chain->list, - &trans->ctx.table->chains); + list_add_tail_rcu(&trans->ctx.chain->list, + &trans->ctx.table->chains); nft_trans_destroy(trans); break; case NFT_MSG_NEWRULE: @@ -3532,12 +3569,12 @@ static int nf_tables_abort(struct sk_buff *skb) break; case NFT_MSG_NEWSET: trans->ctx.table->use--; - list_del(&nft_trans_set(trans)->list); + list_del_rcu(&nft_trans_set(trans)->list); break; case NFT_MSG_DELSET: trans->ctx.table->use++; - list_add_tail(&nft_trans_set(trans)->list, - &trans->ctx.table->sets); + list_add_tail_rcu(&nft_trans_set(trans)->list, + &trans->ctx.table->sets); nft_trans_destroy(trans); break; case NFT_MSG_NEWSETELEM: @@ -3951,6 +3988,7 @@ static int nf_tables_init_net(struct net *net) { INIT_LIST_HEAD(&net->nft.af_info); INIT_LIST_HEAD(&net->nft.commit_list); + net->nft.base_seq = 1; return 0; } diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 345acfb1720b..3b90eb2b2c55 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -109,7 +109,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops) struct nft_data data[NFT_REG_MAX + 1]; unsigned int stackptr = 0; struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE]; - struct nft_stats __percpu *stats; + struct nft_stats *stats; int rulenum; /* * Cache cursor to avoid problems in case that the cursor is updated @@ -205,9 +205,11 @@ next_rule: nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY); rcu_read_lock_bh(); - stats = rcu_dereference(nft_base_chain(basechain)->stats); - __this_cpu_inc(stats->pkts); - __this_cpu_add(stats->bytes, pkt->skb->len); + stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats)); + u64_stats_update_begin(&stats->syncp); + stats->pkts++; + stats->bytes += pkt->skb->len; + u64_stats_update_end(&stats->syncp); rcu_read_unlock_bh(); return nft_base_chain(basechain)->policy; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 15c731f03fa6..e6fac7e3db52 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -636,7 +636,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock, while (nlk->cb_running && netlink_dump_space(nlk)) { err = netlink_dump(sk); if (err < 0) { - sk->sk_err = err; + sk->sk_err = -err; sk->sk_error_report(sk); break; } @@ -2483,7 +2483,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { ret = netlink_dump(sk); if (ret) { - sk->sk_err = ret; + sk->sk_err = -ret; sk->sk_error_report(sk); } } diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index c36856a457ca..e70d8b18e962 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -551,6 +551,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, case OVS_ACTION_ATTR_SAMPLE: err = sample(dp, skb, a); + if (unlikely(err)) /* skb already freed. */ + return err; break; } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 0d407bca81e3..9db4bf6740d1 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2013 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -276,7 +276,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) OVS_CB(skb)->flow = flow; OVS_CB(skb)->pkt_key = &key; - ovs_flow_stats_update(OVS_CB(skb)->flow, skb); + ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb); ovs_execute_actions(dp, skb); stats_counter = &stats->n_hit; @@ -889,8 +889,11 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) } /* The unmasked key has to be the same for flow updates. */ if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) { - error = -EEXIST; - goto err_unlock_ovs; + flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); + if (!flow) { + error = -ENOENT; + goto err_unlock_ovs; + } } /* Update actions. */ old_acts = ovsl_dereference(flow->sf_acts); @@ -981,16 +984,12 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) goto err_unlock_ovs; } /* Check that the flow exists. */ - flow = ovs_flow_tbl_lookup(&dp->table, &key); + flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); if (unlikely(!flow)) { error = -ENOENT; goto err_unlock_ovs; } - /* The unmasked key has to be the same for flow updates. */ - if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) { - error = -EEXIST; - goto err_unlock_ovs; - } + /* Update actions, if present. */ if (likely(acts)) { old_acts = ovsl_dereference(flow->sf_acts); @@ -1063,8 +1062,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) goto unlock; } - flow = ovs_flow_tbl_lookup(&dp->table, &key); - if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { + flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); + if (!flow) { err = -ENOENT; goto unlock; } @@ -1113,8 +1112,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) goto unlock; } - flow = ovs_flow_tbl_lookup(&dp->table, &key); - if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) { + flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); + if (unlikely(!flow)) { err = -ENOENT; goto unlock; } diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 334751cb1528..d07ab538fc9d 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -61,10 +61,10 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies) #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) -void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb) +void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, + struct sk_buff *skb) { struct flow_stats *stats; - __be16 tcp_flags = flow->key.tp.flags; int node = numa_node_id(); stats = rcu_dereference(flow->stats[node]); diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index ac395d2cd821..5e5aaed3a85b 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2013 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -180,7 +180,8 @@ struct arp_eth_header { unsigned char ar_tip[4]; /* target IP address */ } __packed; -void ovs_flow_stats_update(struct sw_flow *, struct sk_buff *); +void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags, + struct sk_buff *); void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *, unsigned long *used, __be16 *tcp_flags); void ovs_flow_stats_clear(struct sw_flow *); diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index 574c3abc9b30..cf2d853646f0 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -456,6 +456,22 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit); } +struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, + struct sw_flow_match *match) +{ + struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); + struct sw_flow_mask *mask; + struct sw_flow *flow; + + /* Always called under ovs-mutex. */ + list_for_each_entry(mask, &tbl->mask_list, list) { + flow = masked_flow_lookup(ti, match->key, mask); + if (flow && ovs_flow_cmp_unmasked_key(flow, match)) /* Found */ + return flow; + } + return NULL; +} + int ovs_flow_tbl_num_masks(const struct flow_table *table) { struct sw_flow_mask *mask; diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index ca8a5820f615..5918bff7f3f6 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h @@ -76,7 +76,8 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *, u32 *n_mask_hit); struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, const struct sw_flow_key *); - +struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, + struct sw_flow_match *match); bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, struct sw_flow_match *match); diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index 35ec4fed09e2..f49148a07da2 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c @@ -110,6 +110,22 @@ static int gre_rcv(struct sk_buff *skb, return PACKET_RCVD; } +/* Called with rcu_read_lock and BH disabled. */ +static int gre_err(struct sk_buff *skb, u32 info, + const struct tnl_ptk_info *tpi) +{ + struct ovs_net *ovs_net; + struct vport *vport; + + ovs_net = net_generic(dev_net(skb->dev), ovs_net_id); + vport = rcu_dereference(ovs_net->vport_net.gre_vport); + + if (unlikely(!vport)) + return PACKET_REJECT; + else + return PACKET_RCVD; +} + static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) { struct net *net = ovs_dp_get_net(vport->dp); @@ -186,6 +202,7 @@ error: static struct gre_cisco_protocol gre_protocol = { .handler = gre_rcv, + .err_handler = gre_err, .priority = 1, }; diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index c39b583ace32..70c0be8d0121 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -38,6 +38,7 @@ #include <linux/errno.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> +#include <linux/bitmap.h> #include <net/netlink.h> #include <net/act_api.h> #include <net/pkt_cls.h> @@ -460,17 +461,25 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg) return 0; } +#define NR_U32_NODE (1<<12) static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) { struct tc_u_knode *n; - unsigned int i = 0x7FF; + unsigned long i; + unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long), + GFP_KERNEL); + if (!bitmap) + return handle | 0xFFF; for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next) - if (i < TC_U32_NODE(n->handle)) - i = TC_U32_NODE(n->handle); - i++; + set_bit(TC_U32_NODE(n->handle), bitmap); - return handle | (i > 0xFFF ? 0xFFF : i); + i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800); + if (i >= NR_U32_NODE) + i = find_next_zero_bit(bitmap, NR_U32_NODE, 1); + + kfree(bitmap); + return handle | (i >= NR_U32_NODE ? 0xFFF : i); } static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 85c64658bd0b..b6842fdb53d4 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -366,9 +366,10 @@ fail: * specification [SCTP] and any extensions for a list of possible * error formats. */ -struct sctp_ulpevent *sctp_ulpevent_make_remote_error( - const struct sctp_association *asoc, struct sctp_chunk *chunk, - __u16 flags, gfp_t gfp) +struct sctp_ulpevent * +sctp_ulpevent_make_remote_error(const struct sctp_association *asoc, + struct sctp_chunk *chunk, __u16 flags, + gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_remote_error *sre; @@ -387,8 +388,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error( /* Copy the skb to a new skb with room for us to prepend * notification with. */ - skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error), - 0, gfp); + skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp); /* Pull off the rest of the cause TLV from the chunk. */ skb_pull(chunk->skb, elen); @@ -399,62 +399,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error( event = sctp_skb2event(skb); sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); - sre = (struct sctp_remote_error *) - skb_push(skb, sizeof(struct sctp_remote_error)); + sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre)); /* Trim the buffer to the right length. */ - skb_trim(skb, sizeof(struct sctp_remote_error) + elen); + skb_trim(skb, sizeof(*sre) + elen); - /* Socket Extensions for SCTP - * 5.3.1.3 SCTP_REMOTE_ERROR - * - * sre_type: - * It should be SCTP_REMOTE_ERROR. - */ + /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */ + memset(sre, 0, sizeof(*sre)); sre->sre_type = SCTP_REMOTE_ERROR; - - /* - * Socket Extensions for SCTP - * 5.3.1.3 SCTP_REMOTE_ERROR - * - * sre_flags: 16 bits (unsigned integer) - * Currently unused. - */ sre->sre_flags = 0; - - /* Socket Extensions for SCTP - * 5.3.1.3 SCTP_REMOTE_ERROR - * - * sre_length: sizeof (__u32) - * - * This field is the total length of the notification data, - * including the notification header. - */ sre->sre_length = skb->len; - - /* Socket Extensions for SCTP - * 5.3.1.3 SCTP_REMOTE_ERROR - * - * sre_error: 16 bits (unsigned integer) - * This value represents one of the Operational Error causes defined in - * the SCTP specification, in network byte order. - */ sre->sre_error = cause; - - /* Socket Extensions for SCTP - * 5.3.1.3 SCTP_REMOTE_ERROR - * - * sre_assoc_id: sizeof (sctp_assoc_t) - * - * The association id field, holds the identifier for the association. - * All notifications for a given association have the same association - * identifier. For TCP style socket, this field is ignored. - */ sctp_ulpevent_set_owner(event, asoc); sre->sre_assoc_id = sctp_assoc2id(asoc); return event; - fail: return NULL; } @@ -899,7 +858,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event) return notification->sn_header.sn_type; } -/* Copy out the sndrcvinfo into a msghdr. */ +/* RFC6458, Section 5.3.2. SCTP Header Information Structure + * (SCTP_SNDRCV, DEPRECATED) + */ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, struct msghdr *msghdr) { @@ -908,74 +869,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, if (sctp_ulpevent_is_notification(event)) return; - /* Sockets API Extensions for SCTP - * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV) - * - * sinfo_stream: 16 bits (unsigned integer) - * - * For recvmsg() the SCTP stack places the message's stream number in - * this value. - */ + memset(&sinfo, 0, sizeof(sinfo)); sinfo.sinfo_stream = event->stream; - /* sinfo_ssn: 16 bits (unsigned integer) - * - * For recvmsg() this value contains the stream sequence number that - * the remote endpoint placed in the DATA chunk. For fragmented - * messages this is the same number for all deliveries of the message - * (if more than one recvmsg() is needed to read the message). - */ sinfo.sinfo_ssn = event->ssn; - /* sinfo_ppid: 32 bits (unsigned integer) - * - * In recvmsg() this value is - * the same information that was passed by the upper layer in the peer - * application. Please note that byte order issues are NOT accounted - * for and this information is passed opaquely by the SCTP stack from - * one end to the other. - */ sinfo.sinfo_ppid = event->ppid; - /* sinfo_flags: 16 bits (unsigned integer) - * - * This field may contain any of the following flags and is composed of - * a bitwise OR of these values. - * - * recvmsg() flags: - * - * SCTP_UNORDERED - This flag is present when the message was sent - * non-ordered. - */ sinfo.sinfo_flags = event->flags; - /* sinfo_tsn: 32 bit (unsigned integer) - * - * For the receiving side, this field holds a TSN that was - * assigned to one of the SCTP Data Chunks. - */ sinfo.sinfo_tsn = event->tsn; - /* sinfo_cumtsn: 32 bit (unsigned integer) - * - * This field will hold the current cumulative TSN as - * known by the underlying SCTP layer. Note this field is - * ignored when sending and only valid for a receive - * operation when sinfo_flags are set to SCTP_UNORDERED. - */ sinfo.sinfo_cumtsn = event->cumtsn; - /* sinfo_assoc_id: sizeof (sctp_assoc_t) - * - * The association handle field, sinfo_assoc_id, holds the identifier - * for the association announced in the COMMUNICATION_UP notification. - * All notifications for a given association have the same identifier. - * Ignored for one-to-one style sockets. - */ sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); - - /* context value that is set via SCTP_CONTEXT socket option. */ + /* Context value that is set via SCTP_CONTEXT socket option. */ sinfo.sinfo_context = event->asoc->default_rcv_context; - /* These fields are not used while receiving. */ sinfo.sinfo_timetolive = 0; put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV, - sizeof(struct sctp_sndrcvinfo), (void *)&sinfo); + sizeof(sinfo), &sinfo); } /* Do accounting for bytes received and hold a reference to the association diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 26631679a1fa..55c6c9d3e1ce 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -559,6 +559,7 @@ receive: buf = node->bclink.deferred_head; node->bclink.deferred_head = buf->next; + buf->next = NULL; node->bclink.deferred_size--; goto receive; } diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 8be6e94a1ca9..0a37a472c29f 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -101,9 +101,11 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, } /* tipc_buf_append(): Append a buffer to the fragment list of another buffer - * Let first buffer become head buffer - * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0 - * Leaves headbuf pointer at NULL if failure + * @*headbuf: in: NULL for first frag, otherwise value returned from prev call + * out: set when successful non-complete reassembly, otherwise NULL + * @*buf: in: the buffer to append. Always defined + * out: head buf after sucessful complete reassembly, otherwise NULL + * Returns 1 when reassembly complete, otherwise 0 */ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) { @@ -122,6 +124,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) goto out_free; head = *headbuf = frag; skb_frag_list_init(head); + *buf = NULL; return 0; } if (!head) @@ -150,5 +153,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) out_free: pr_warn_ratelimited("Unable to build fragment list\n"); kfree_skb(*buf); + kfree_skb(*headbuf); + *buf = *headbuf = NULL; return 0; } diff --git a/net/wireless/core.h b/net/wireless/core.h index e9afbf10e756..7e3a3cef7df9 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -424,7 +424,7 @@ static inline unsigned int elapsed_jiffies_msecs(unsigned long start) if (end >= start) return jiffies_to_msecs(end - start); - return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1); + return jiffies_to_msecs(end + (ULONG_MAX - start) + 1); } void diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ba4f1723c83a..6668daf69326 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1497,18 +1497,17 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, } CMD(start_p2p_device, START_P2P_DEVICE); CMD(set_mcast_rate, SET_MCAST_RATE); +#ifdef CONFIG_NL80211_TESTMODE + CMD(testmode_cmd, TESTMODE); +#endif if (state->split) { CMD(crit_proto_start, CRIT_PROTOCOL_START); CMD(crit_proto_stop, CRIT_PROTOCOL_STOP); if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH) CMD(channel_switch, CHANNEL_SWITCH); + CMD(set_qos_map, SET_QOS_MAP); } - CMD(set_qos_map, SET_QOS_MAP); - -#ifdef CONFIG_NL80211_TESTMODE - CMD(testmode_cmd, TESTMODE); -#endif - + /* add into the if now */ #undef CMD if (rdev->ops->connect || rdev->ops->auth) { diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 558b0e3a02d8..1afdf45db38f 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -935,7 +935,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq, if (!band_rule_found) band_rule_found = freq_in_rule_band(fr, center_freq); - bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(5)); + bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20)); if (band_rule_found && bw_fits) return rr; @@ -1019,10 +1019,10 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd, } #endif -/* Find an ieee80211_reg_rule such that a 5MHz channel with frequency - * chan->center_freq fits there. - * If there is no such reg_rule, disable the channel, otherwise set the - * flags corresponding to the bandwidths allowed in the particular reg_rule +/* + * Note that right now we assume the desired channel bandwidth + * is always 20 MHz for each individual channel (HT40 uses 20 MHz + * per channel, the primary and the extension channel). */ static void handle_channel(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, @@ -1083,12 +1083,8 @@ static void handle_channel(struct wiphy *wiphy, if (reg_rule->flags & NL80211_RRF_AUTO_BW) max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); - if (max_bandwidth_khz < MHZ_TO_KHZ(10)) - bw_flags = IEEE80211_CHAN_NO_10MHZ; - if (max_bandwidth_khz < MHZ_TO_KHZ(20)) - bw_flags |= IEEE80211_CHAN_NO_20MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(40)) - bw_flags |= IEEE80211_CHAN_NO_HT40; + bw_flags = IEEE80211_CHAN_NO_HT40; if (max_bandwidth_khz < MHZ_TO_KHZ(80)) bw_flags |= IEEE80211_CHAN_NO_80MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(160)) @@ -1522,12 +1518,8 @@ static void handle_channel_custom(struct wiphy *wiphy, if (reg_rule->flags & NL80211_RRF_AUTO_BW) max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); - if (max_bandwidth_khz < MHZ_TO_KHZ(10)) - bw_flags = IEEE80211_CHAN_NO_10MHZ; - if (max_bandwidth_khz < MHZ_TO_KHZ(20)) - bw_flags |= IEEE80211_CHAN_NO_20MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(40)) - bw_flags |= IEEE80211_CHAN_NO_HT40; + bw_flags = IEEE80211_CHAN_NO_HT40; if (max_bandwidth_khz < MHZ_TO_KHZ(80)) bw_flags |= IEEE80211_CHAN_NO_80MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(160)) diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c index 6af50eb80ea7..70faa3a32526 100644 --- a/sound/firewire/bebob/bebob_maudio.c +++ b/sound/firewire/bebob/bebob_maudio.c @@ -379,11 +379,11 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl, struct special_params *params = bebob->maudio_special_quirk; int err, id; - mutex_lock(&bebob->mutex); - id = uval->value.enumerated.item[0]; if (id >= ARRAY_SIZE(special_clk_labels)) - return 0; + return -EINVAL; + + mutex_lock(&bebob->mutex); err = avc_maudio_set_special_clk(bebob, id, params->dig_in_fmt, @@ -391,7 +391,10 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl, params->clk_lock); mutex_unlock(&bebob->mutex); - return err >= 0; + if (err >= 0) + err = 1; + + return err; } static struct snd_kcontrol_new special_clk_ctl = { .name = "Clock Source", @@ -434,8 +437,8 @@ static struct snd_kcontrol_new special_sync_ctl = { .get = special_sync_ctl_get, }; -/* Digital interface control for special firmware */ -static char *const special_dig_iface_labels[] = { +/* Digital input interface control for special firmware */ +static char *const special_dig_in_iface_labels[] = { "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical" }; static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl, @@ -443,13 +446,13 @@ static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl, { einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; einf->count = 1; - einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels); + einf->value.enumerated.items = ARRAY_SIZE(special_dig_in_iface_labels); if (einf->value.enumerated.item >= einf->value.enumerated.items) einf->value.enumerated.item = einf->value.enumerated.items - 1; strcpy(einf->value.enumerated.name, - special_dig_iface_labels[einf->value.enumerated.item]); + special_dig_in_iface_labels[einf->value.enumerated.item]); return 0; } @@ -491,26 +494,36 @@ static int special_dig_in_iface_ctl_set(struct snd_kcontrol *kctl, unsigned int id, dig_in_fmt, dig_in_iface; int err; - mutex_lock(&bebob->mutex); - id = uval->value.enumerated.item[0]; + if (id >= ARRAY_SIZE(special_dig_in_iface_labels)) + return -EINVAL; /* decode user value */ dig_in_fmt = (id >> 1) & 0x01; dig_in_iface = id & 0x01; + mutex_lock(&bebob->mutex); + err = avc_maudio_set_special_clk(bebob, params->clk_src, dig_in_fmt, params->dig_out_fmt, params->clk_lock); - if ((err < 0) || (params->dig_in_fmt > 0)) /* ADAT */ + if (err < 0) + goto end; + + /* For ADAT, optical interface is only available. */ + if (params->dig_in_fmt > 0) { + err = 1; goto end; + } + /* For S/PDIF, optical/coaxial interfaces are selectable. */ err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface); if (err < 0) dev_err(&bebob->unit->device, "fail to set digital input interface: %d\n", err); + err = 1; end: special_stream_formation_set(bebob); mutex_unlock(&bebob->mutex); @@ -525,18 +538,22 @@ static struct snd_kcontrol_new special_dig_in_iface_ctl = { .put = special_dig_in_iface_ctl_set }; +/* Digital output interface control for special firmware */ +static char *const special_dig_out_iface_labels[] = { + "S/PDIF Optical and Coaxial", "ADAT Optical" +}; static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *einf) { einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; einf->count = 1; - einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels) - 1; + einf->value.enumerated.items = ARRAY_SIZE(special_dig_out_iface_labels); if (einf->value.enumerated.item >= einf->value.enumerated.items) einf->value.enumerated.item = einf->value.enumerated.items - 1; strcpy(einf->value.enumerated.name, - special_dig_iface_labels[einf->value.enumerated.item + 1]); + special_dig_out_iface_labels[einf->value.enumerated.item]); return 0; } @@ -558,16 +575,20 @@ static int special_dig_out_iface_ctl_set(struct snd_kcontrol *kctl, unsigned int id; int err; - mutex_lock(&bebob->mutex); - id = uval->value.enumerated.item[0]; + if (id >= ARRAY_SIZE(special_dig_out_iface_labels)) + return -EINVAL; + + mutex_lock(&bebob->mutex); err = avc_maudio_set_special_clk(bebob, params->clk_src, params->dig_in_fmt, id, params->clk_lock); - if (err >= 0) + if (err >= 0) { special_stream_formation_set(bebob); + err = 1; + } mutex_unlock(&bebob->mutex); return err; diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 480bbddbd801..6df04d91c93c 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -193,7 +193,8 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream) dsp_unlock(azx_dev); return azx_dev; } - if (!res) + if (!res || + (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN)) res = azx_dev; } dsp_unlock(azx_dev); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index b6b4e71a0b0b..83cd19017cf3 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -227,7 +227,7 @@ enum { /* quirks for Intel PCH */ #define AZX_DCAPS_INTEL_PCH_NOPM \ (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \ - AZX_DCAPS_COUNT_LPIB_DELAY) + AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_REVERSE_ASSIGN) #define AZX_DCAPS_INTEL_PCH \ (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME) @@ -596,7 +596,7 @@ static int azx_suspend(struct device *dev) struct azx *chip = card->private_data; struct azx_pcm *p; - if (chip->disabled) + if (chip->disabled || chip->init_failed) return 0; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); @@ -628,7 +628,7 @@ static int azx_resume(struct device *dev) struct snd_card *card = dev_get_drvdata(dev); struct azx *chip = card->private_data; - if (chip->disabled) + if (chip->disabled || chip->init_failed) return 0; if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { @@ -665,7 +665,7 @@ static int azx_runtime_suspend(struct device *dev) struct snd_card *card = dev_get_drvdata(dev); struct azx *chip = card->private_data; - if (chip->disabled) + if (chip->disabled || chip->init_failed) return 0; if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) @@ -692,7 +692,7 @@ static int azx_runtime_resume(struct device *dev) struct hda_codec *codec; int status; - if (chip->disabled) + if (chip->disabled || chip->init_failed) return 0; if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) @@ -729,7 +729,7 @@ static int azx_runtime_idle(struct device *dev) struct snd_card *card = dev_get_drvdata(dev); struct azx *chip = card->private_data; - if (chip->disabled) + if (chip->disabled || chip->init_failed) return 0; if (!power_save_controller || diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h index 4a7cb01fa912..e9d1a5762a55 100644 --- a/sound/pci/hda/hda_priv.h +++ b/sound/pci/hda/hda_priv.h @@ -186,6 +186,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 }; #define AZX_DCAPS_BUFSIZE (1 << 21) /* no buffer size alignment */ #define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */ #define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */ +#define AZX_DCAPS_REVERSE_ASSIGN (1 << 24) /* Assign devices in reverse order */ #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ #define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */ diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c index a366ba9293a8..358414da6418 100644 --- a/sound/pci/hda/hda_tegra.c +++ b/sound/pci/hda/hda_tegra.c @@ -236,6 +236,7 @@ disable_hda: return rc; } +#ifdef CONFIG_PM_SLEEP static void hda_tegra_disable_clocks(struct hda_tegra *data) { clk_disable_unprepare(data->hda2hdmi_clk); @@ -243,7 +244,6 @@ static void hda_tegra_disable_clocks(struct hda_tegra *data) clk_disable_unprepare(data->hda_clk); } -#ifdef CONFIG_PM_SLEEP /* * power management */ diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 4fe876b65fda..ba4ca52072ff 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -3337,6 +3337,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = { { .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, +{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, @@ -3394,6 +3395,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0044"); MODULE_ALIAS("snd-hda-codec-id:10de0051"); MODULE_ALIAS("snd-hda-codec-id:10de0060"); MODULE_ALIAS("snd-hda-codec-id:10de0067"); +MODULE_ALIAS("snd-hda-codec-id:10de0070"); MODULE_ALIAS("snd-hda-codec-id:10de0071"); MODULE_ALIAS("snd-hda-codec-id:10de8001"); MODULE_ALIAS("snd-hda-codec-id:11069f80"); diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h index c342f7087147..ee53a42818ca 100644 --- a/tools/lib/lockdep/include/liblockdep/mutex.h +++ b/tools/lib/lockdep/include/liblockdep/mutex.h @@ -35,7 +35,7 @@ static inline int __mutex_init(liblockdep_pthread_mutex_t *lock, static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock) { - lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); + lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_); return pthread_mutex_lock(&lock->mutex); } @@ -47,7 +47,7 @@ static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lo static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock) { - lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); + lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_); return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0; } diff --git a/tools/lib/lockdep/include/liblockdep/rwlock.h b/tools/lib/lockdep/include/liblockdep/rwlock.h index a680ab8c2e36..4ec03f861551 100644 --- a/tools/lib/lockdep/include/liblockdep/rwlock.h +++ b/tools/lib/lockdep/include/liblockdep/rwlock.h @@ -36,7 +36,7 @@ static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock, static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock) { - lock_acquire(&lock->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_); + lock_acquire(&lock->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_); return pthread_rwlock_rdlock(&lock->rwlock); } @@ -49,19 +49,19 @@ static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t * static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock) { - lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); + lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_); return pthread_rwlock_wrlock(&lock->rwlock); } static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock) { - lock_acquire(&lock->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_); + lock_acquire(&lock->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_); return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0; } static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock) { - lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); + lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_); return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0; } diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c index 23bd69cb5ade..6f803609e498 100644 --- a/tools/lib/lockdep/preload.c +++ b/tools/lib/lockdep/preload.c @@ -92,7 +92,7 @@ enum { none, prepare, done, } __init_state; static void init_preload(void); static void try_init_preload(void) { - if (!__init_state != done) + if (__init_state != done) init_preload(); } @@ -252,7 +252,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex) try_init_preload(); - lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, + lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_); /* * Here's the thing with pthread mutexes: unlike the kernel variant, @@ -281,7 +281,7 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex) try_init_preload(); - lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); + lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_); r = ll_pthread_mutex_trylock(mutex); if (r) lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_); @@ -303,7 +303,7 @@ int pthread_mutex_unlock(pthread_mutex_t *mutex) */ r = ll_pthread_mutex_unlock(mutex); if (r) - lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); + lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_); return r; } @@ -352,7 +352,7 @@ int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock) init_preload(); - lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_); + lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_); r = ll_pthread_rwlock_rdlock(rwlock); if (r) lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); @@ -366,7 +366,7 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock) init_preload(); - lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_); + lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_); r = ll_pthread_rwlock_tryrdlock(rwlock); if (r) lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); @@ -380,7 +380,7 @@ int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock) init_preload(); - lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); + lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_); r = ll_pthread_rwlock_trywrlock(rwlock); if (r) lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); @@ -394,7 +394,7 @@ int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock) init_preload(); - lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); + lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_); r = ll_pthread_rwlock_wrlock(rwlock); if (r) lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); @@ -411,7 +411,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); r = ll_pthread_rwlock_unlock(rwlock); if (r) - lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); + lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_); return r; } @@ -439,8 +439,6 @@ __attribute__((constructor)) static void init_preload(void) ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock"); #endif - printf("%p\n", ll_pthread_mutex_trylock);fflush(stdout); - lockdep_init(); __init_state = done; diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 52c03fbbba17..04a229aa5c0f 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -17,6 +17,7 @@ #include "../util.h" #include "../ui.h" #include "map.h" +#include "annotate.h" struct hist_browser { struct ui_browser b; @@ -1593,13 +1594,18 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, bi->to.sym->name) > 0) annotate_t = nr_options++; } else { - if (browser->selection != NULL && browser->selection->sym != NULL && - !browser->selection->map->dso->annotate_warned && - asprintf(&options[nr_options], "Annotate %s", - browser->selection->sym->name) > 0) - annotate = nr_options++; + !browser->selection->map->dso->annotate_warned) { + struct annotation *notes; + + notes = symbol__annotation(browser->selection->sym); + + if (notes->src && + asprintf(&options[nr_options], "Annotate %s", + browser->selection->sym->name) > 0) + annotate = nr_options++; + } } if (thread != NULL && @@ -1656,6 +1662,7 @@ retry_popup_menu: if (choice == annotate || choice == annotate_t || choice == annotate_f) { struct hist_entry *he; + struct annotation *notes; int err; do_annotate: if (!objdump_path && perf_session_env__lookup_objdump(env)) @@ -1679,6 +1686,10 @@ do_annotate: he->ms.map = he->branch_info->to.map; } + notes = symbol__annotation(he->ms.sym); + if (!notes->src) + continue; + /* * Don't let this be freed, say, by hists__decay_entry. */ diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 0e5fea95d596..c73e1fc12e53 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -496,18 +496,6 @@ struct process_args { u64 start; }; -static int symbol__in_kernel(void *arg, const char *name, - char type __maybe_unused, u64 start) -{ - struct process_args *args = arg; - - if (strchr(name, '[')) - return 0; - - args->start = start; - return 1; -} - static void machine__get_kallsyms_filename(struct machine *machine, char *buf, size_t bufsz) { @@ -517,27 +505,41 @@ static void machine__get_kallsyms_filename(struct machine *machine, char *buf, scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); } -/* Figure out the start address of kernel map from /proc/kallsyms */ -static u64 machine__get_kernel_start_addr(struct machine *machine) +const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; + +/* Figure out the start address of kernel map from /proc/kallsyms. + * Returns the name of the start symbol in *symbol_name. Pass in NULL as + * symbol_name if it's not that important. + */ +static u64 machine__get_kernel_start_addr(struct machine *machine, + const char **symbol_name) { char filename[PATH_MAX]; - struct process_args args; + int i; + const char *name; + u64 addr = 0; machine__get_kallsyms_filename(machine, filename, PATH_MAX); if (symbol__restricted_filename(filename, "/proc/kallsyms")) return 0; - if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0) - return 0; + for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { + addr = kallsyms__get_function_start(filename, name); + if (addr) + break; + } + + if (symbol_name) + *symbol_name = name; - return args.start; + return addr; } int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) { enum map_type type; - u64 start = machine__get_kernel_start_addr(machine); + u64 start = machine__get_kernel_start_addr(machine, NULL); for (type = 0; type < MAP__NR_TYPES; ++type) { struct kmap *kmap; @@ -852,23 +854,11 @@ static int machine__create_modules(struct machine *machine) return 0; } -const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; - int machine__create_kernel_maps(struct machine *machine) { struct dso *kernel = machine__get_kernel(machine); - char filename[PATH_MAX]; const char *name; - u64 addr = 0; - int i; - - machine__get_kallsyms_filename(machine, filename, PATH_MAX); - - for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { - addr = kallsyms__get_function_start(filename, name); - if (addr) - break; - } + u64 addr = machine__get_kernel_start_addr(machine, &name); if (!addr) return -1; |